def VioNet_densenet(config, home_path): device = config.device ft_begin_idx = config.ft_begin_idx sample_size = config.sample_size[0] sample_duration = config.sample_duration model = densenet121(num_classes=2, sample_size=sample_size, sample_duration=sample_duration).to(device) # state_dict = torch.load(g_path +'/VioNet/'+ 'weights/DenseNet_Kinetics.pth') state_dict = torch.load( os.path.join(home_path, VIONET_WEIGHTS, 'DenseNet_Kinetics.pth')) model.load_state_dict(state_dict) params = dn.get_fine_tuning_params(model, ft_begin_idx) return model, params
def CreatNet(opt): name = opt.model_name label_num = opt.label if name =='lstm': net = lstm.lstm(100,27,num_classes=label_num) elif name == 'cnn_1d': net = cnn_1d.cnn(opt.input_nc,num_classes=label_num) elif name == 'resnet18_1d': net = resnet_1d.resnet18() net.conv1 = nn.Conv1d(opt.input_nc, 64, 7, 2, 3, bias=False) net.fc = nn.Linear(512, label_num) elif name == 'resnet34_1d': net = resnet_1d.resnet34() net.conv1 = nn.Conv1d(opt.input_nc, 64, 7, 2, 3, bias=False) net.fc = nn.Linear(512, label_num) elif name == 'multi_scale_resnet_1d': net = multi_scale_resnet_1d.Multi_Scale_ResNet(inchannel=opt.input_nc, num_classes=label_num) elif name == 'micro_multi_scale_resnet_1d': net = micro_multi_scale_resnet_1d.Multi_Scale_ResNet(inchannel=opt.input_nc, num_classes=label_num) elif name == 'multi_scale_resnet': net = multi_scale_resnet.Multi_Scale_ResNet(inchannel=opt.input_nc, num_classes=label_num) elif name == 'dfcnn': net = dfcnn.dfcnn(num_classes = label_num) elif name in ['resnet101','resnet50','resnet18']: if name =='resnet101': net = resnet.resnet101(pretrained=False) net.fc = nn.Linear(2048, label_num) elif name =='resnet50': net = resnet.resnet50(pretrained=False) net.fc = nn.Linear(2048, label_num) elif name =='resnet18': net = resnet.resnet18(pretrained=False) net.fc = nn.Linear(512, label_num) net.conv1 = nn.Conv2d(opt.input_nc, 64, 7, 2, 3, bias=False) elif 'densenet' in name: if name =='densenet121': net = densenet.densenet121(pretrained=False,num_classes=label_num) elif name == 'densenet201': net = densenet.densenet201(pretrained=False,num_classes=label_num) elif name =='squeezenet': net = squeezenet.squeezenet1_1(pretrained=False,num_classes=label_num,inchannel = 1) return net
def save_checkpoint(): """ Save checkpoint with correct input and output names Inputs: None Ouutputs: None """ config = XRAYconfig() images = tf.placeholder(tf.float32, shape=[None, 224, 224, 3], name='inputs') labels = tf.placeholder(tf.int32, shape=[None], name='labels') with tf.variable_scope('densenet121') as densenet_scope: processed_image = imagenet_preprocessing(images) with slim.arg_scope( densenet.densenet_arg_scope( weight_decay=config.l2_reg, batch_norm_decay=config.batch_norm_decay, batch_norm_epsilon=config.batch_norm_epsilon)): target_logits, _ = densenet.densenet121( inputs=processed_image, num_classes=config.output_shape, is_training=False, scope=densenet_scope) oh_enc = tf.one_hot(labels, config.output_shape) masked_logits = tf.multiply(target_logits, oh_enc, name='masked_logits') prob = tf.nn.softmax(target_logits, name='probability') saver = tf.train.Saver() restorer = tf.train.Saver() sess = tf.Session() sess.run( tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())) restorer.restore(sess, config.restore_checkpoint) save_path = saver.save(sess, "./model_to_freeze.ckpt") print("Model saved in path: %s" % save_path) sess.close()
def CreatNet(name): if name == 'lstm': net = lstm.lstm(100, 27, num_classes=5) elif name == 'cnn_1d': net = cnn_1d.cnn(1, num_classes=5) elif name == 'resnet18_1d': net = resnet_1d.resnet18() net.conv1 = nn.Conv1d(1, 64, 7, 2, 3, bias=False) net.fc = nn.Linear(512, 5) elif name == 'multi_scale_resnet_1d': net = multi_scale_resnet_1d.Multi_Scale_ResNet(inchannel=1, num_classes=5) elif name == 'multi_scale_resnet': net = multi_scale_resnet.Multi_Scale_ResNet(inchannel=1, num_classes=5) elif name == 'dfcnn': net = dfcnn.dfcnn(num_classes=5) elif name in ['resnet101', 'resnet50', 'resnet18']: if name == 'resnet101': net = resnet.resnet101(pretrained=False) net.fc = nn.Linear(2048, 5) elif name == 'resnet50': net = resnet.resnet50(pretrained=False) net.fc = nn.Linear(2048, 5) elif name == 'resnet18': net = resnet.resnet18(pretrained=False) net.fc = nn.Linear(512, 5) net.conv1 = nn.Conv2d(1, 64, 7, 2, 3, bias=False) elif 'densenet' in name: if name == 'densenet121': net = densenet.densenet121(pretrained=False, num_classes=5) elif name == 'densenet201': net = densenet.densenet201(pretrained=False, num_classes=5) elif name == 'squeezenet': net = squeezenet.squeezenet1_1(pretrained=False, num_classes=5, inchannel=1) return net
def generate_model(opt): assert opt.mode in ['score', 'feature'] if opt.mode == 'score': last_fc = True elif opt.mode == 'feature': last_fc = False assert opt.model_name in ['resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet'] if opt.model_name == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] if opt.model_depth == 10: model = resnet.resnet10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 152: model = resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 200: model = resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) polices = resnet.get_fine_tuning_parameters(model, opt.ft_begin_index) elif opt.model_name == 'wideresnet': assert opt.model_depth in [50] if opt.model_depth == 50: model = wide_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_name == 'resnext': assert opt.model_depth in [50, 101, 152] if opt.model_depth == 50: model = resnext.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 101: model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 152: model = resnext.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_name == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] if opt.model_depth == 18: model = pre_act_resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 34: model = pre_act_resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 50: model = pre_act_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 101: model = pre_act_resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 152: model = pre_act_resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 200: model = pre_act_resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_name == 'densenet': assert opt.model_depth in [121, 169, 201, 264] if opt.model_depth == 121: model = densenet.densenet121(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 169: model = densenet.densenet169(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 201: model = densenet.densenet201(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 264: model = densenet.densenet264(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) return model, polices
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters args = { "num_classes": opt.n_classes, "shortcut_type": opt.resnet_shortcut, "sample_size": opt.sample_size, "sample_duration": opt.sample_duration } if opt.model_depth == 10: model = resnet.resnet10(**args) elif opt.model_depth == 18: model = resnet.resnet18(**args) elif opt.model_depth == 34: model = resnet.resnet34(**args) elif opt.model_depth == 50: model = resnet.resnet50(**args) elif opt.model_depth == 101: model = resnet.resnet101(**args) elif opt.model_depth == 152: model = resnet.resnet152(**args) elif opt.model_depth == 200: model = resnet.resnet200(**args) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters args = { "num_classes": opt.n_classes, "shortcut_type": opt.resnet_shortcut, "cardinality": opt.resnext_cardinality, "sample_size": opt.sample_size, "sample_duration": opt.sample_duration } if opt.model_depth == 50: model = resnext.resnet50(**args) elif opt.model_depth == 101: model = resnext.resnet101(**args) elif opt.model_depth == 152: model = resnext.resnet152(**args) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters args = { "num_classes": opt.n_classes, "shortcut_type": opt.resnet_shortcut, "sample_size": opt.sample_size, "sample_duration": opt.sample_duration } if opt.model_depth == 18: model = pre_act_resnet.resnet18(**args) elif opt.model_depth == 34: model = pre_act_resnet.resnet34(**args) elif opt.model_depth == 50: model = pre_act_resnet.resnet50(**args) elif opt.model_depth == 101: model = pre_act_resnet.resnet101(**args) elif opt.model_depth == 152: model = pre_act_resnet.resnet152(**args) elif opt.model_depth == 200: model = pre_act_resnet.resnet200(**args) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters args = { "num_classes": opt.n_classes, "sample_size": opt.sample_size, "sample_duration": opt.sample_duration } if opt.model_depth == 121: model = densenet.densenet121(**args) elif opt.model_depth == 169: model = densenet.densenet169(**args) elif opt.model_depth == 201: model = densenet.densenet201(**args) elif opt.model_depth == 264: model = densenet.densenet264(**args) if opt.no_cuda: device = 'cpu' else: device = 'cuda' model = model.to(device) model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path, map_location=device) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) model.module.classifier = model.module.classifier.to(device) else: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.to(device) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()
# -*- coding:utf-8 -*- # @time :2019.09.07 # @IDE : pycharm # @author :lxztju # @github : https://github.com/lxztju import torch from models.densenet import densenet121 import cfg from load_data import val_dataloader, val_datasets ##定义模型的框架 model = densenet121(num_classes=cfg.NUM_CLASSES) print(model) ##将模型放置在gpu上运行 if torch.cuda.is_available(): model.cuda() ###读取网络模型的键值对 trained_model = cfg.TRAINED_MODEL state_dict = torch.load(trained_model) # create new OrderedDict that does not contain `module.` ##由于之前的模型是在多gpu上训练的,因而保存的模型参数,键前边有‘module’,需要去掉,和训练模型一样构建新的字典 from collections import OrderedDict new_state_dict = OrderedDict() for k, v in state_dict.items(): head = k[:7] if head == 'module.': name = k[7:] # remove `module.` else:
def test(file, class_names, data_dir, results_dir): import platform print(platform.platform()) import sys print('Python ', sys.version) import pydicom print('pydicom ', pydicom.__version__) # Sets device to GPU if available, else CPU device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # pylint: disable=no-member print('Using device:', device) # Additional about GPU if device.type == 'cuda': print(torch.cuda.get_device_name(0)) print('Memory Usage:') print('Allocated:', round(torch.cuda.memory_allocated(0) / 1024**3, 1), 'GB') print('Cached: ', round(torch.cuda.memory_cached(0) / 1024**3, 1), 'GB') # Optimiza la corrida cudnn.benchmark = True # Conjunto de datos con las transformaciones especificadas anteriormente adni_dataset = NumpyADNI_Dataset(data_dir=data_dir) # Entrenamiento y prueba train_size = int(0.75 * len(adni_dataset)) test_size = len(adni_dataset) - train_size _, test_dataset = torch.utils.data.random_split(adni_dataset, [train_size, test_size]) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, num_workers=4) print('%d MRI images in testing loader...' % (test_size)) # Inicializa, carga y corre el modelo model = densenet121(channels=1, num_classes=len(class_names), drop_rate=0.7).cuda() model = torch.nn.DataParallel(model).to(device) model.load_state_dict(torch.load(results_dir + '/' + file)) model.eval() test = [] predicted = [] with torch.no_grad(): for data in test_loader: # get the inputs; data is a list of [inputs, labels] inputs, labels = data labels = labels.to(device) _, label = torch.max(labels, 1) # pylint: disable=no-member test.append(label) outputs = model(inputs) _, predicted_value = torch.max(outputs.data, 1) # pylint: disable=no-member predicted.append(predicted_value) test = [x.item() for x in test] predicted = [x.item() for x in predicted] # Imprime estadísticas y gráficos print_info_and_plots(test, predicted, class_names)
def get_network(args): """ return given network """ if args.net == 'vgg11': from models.vgg import vgg11_bn net = vgg11_bn() elif args.net == 'vgg13': from models.vgg import vgg13_bn net = vgg13_bn() elif args.net == 'vgg16': from models.vgg import vgg16_bn net = vgg16_bn() elif args.net == 'vgg19': from models.vgg import vgg19_bn net = vgg19_bn() elif args.net == 'googlenet': from models.googLeNet import GoogLeNet net = GoogLeNet() elif args.net == 'inceptionv3': from models.inceptionv3 import Inceptionv3 net = Inceptionv3() elif args.net == 'resnet18': from models.resnet import resnet18 net = resnet18() elif args.net == 'resnet34': from models.resnet import resnet34 net = resnet34() elif args.net == 'resnet50': from models.resnet import resnet50 net = resnet50() elif args.net == 'resnet101': from models.resnet import resnet101 net = resnet101() elif args.net == 'resnet152': from models.resnet import resnet152 net = resnet152() elif args.net == 'wrn': from models.wideresnet import wideresnet net = wideresnet() elif args.net == 'densenet121': from models.densenet import densenet121 net = densenet121() elif args.net == 'densenet161': from models.densenet import densenet161 net = densenet161() elif args.net == 'densenet169': from models.densenet import densenet169 net = densenet169() elif args.net == 'densenet201': from models.densenet import densenet201 net = densenet201() else: print('the network name you have entered is not supported yet') sys.exit() if args.gpu: print("use gpu") net = net.cuda() return net
def generate_model(opt): assert opt.mode in ['score', 'feature'] if opt.mode == 'score': last_fc = True elif opt.mode == 'feature': last_fc = False assert opt.model_name in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] if opt.model_name == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] if opt.model_depth == 10: model = resnet.resnet10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 152: model = resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 200: model = resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_name == 'wideresnet': assert opt.model_depth in [50] if opt.model_depth == 50: model = wide_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_name == 'resnext': assert opt.model_depth in [50, 101, 152] if opt.model_depth == 50: model = resnext.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 101: model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 152: model = resnext.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_name == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_name == 'densenet': assert opt.model_depth in [121, 169, 201, 264] if opt.model_depth == 121: model = densenet.densenet121(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 169: model = densenet.densenet169(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 201: model = densenet.densenet201(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 264: model = densenet.densenet264(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) return model
def generate_C3D_model(opt): assert opt.mode in ['score', 'feature'] if opt.mode == 'score': last_fc = True elif opt.mode == 'feature': last_fc = False assert opt.c3d_model_name in ['resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet'] if opt.c3d_model_name == 'resnet': assert opt.c3d_model_depth in [10, 18, 34, 50, 101, 152, 200] if opt.c3d_model_depth == 10: model = resnet.resnet10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 152: model = resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 200: model = resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_name == 'wideresnet': assert opt.c3d_model_depth in [50] if opt.c3d_model_depth == 50: model = wide_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_name == 'resnext': assert opt.c3d_model_depth in [50, 101, 152] if opt.c3d_model_depth == 50: model = resnext.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 101: model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 152: model = resnext.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_name == 'preresnet': assert opt.c3d_model_depth in [18, 34, 50, 101, 152, 200] if opt.c3d_model_depth == 18: model = pre_act_resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 34: model = pre_act_resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 50: model = pre_act_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 101: model = pre_act_resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 152: model = pre_act_resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 200: model = pre_act_resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_name == 'densenet': assert opt.c3d_model_depth in [121, 169, 201, 264] if opt.c3d_model_depth == 121: model = densenet.densenet121(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 169: model = densenet.densenet169(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 201: model = densenet.densenet201(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 264: model = densenet.densenet264(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) # print(model) print('loading c3d model from: {}'.format(opt.c3d_model_checkpoint)) model_data = torch.load(opt.c3d_model_checkpoint) print(model_data['arch']) assert opt.arch == model_data['arch'] model_state_dict = {} for k, v in model_data['state_dict'].items(): model_state_dict[k[k.index('.') + 1:]] = v model.load_state_dict(model_state_dict) if not opt.no_cuda: model = model.to(opt.device) # model = nn.DataParallel(model, device_ids=None) # print(model) return model
def get_model(config): assert config.model in [ 'i3d', 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] print('Initializing {} model (num_classes={})...'.format( config.model, config.num_classes)) if config.model == 'i3d': from models.i3d import get_fine_tuning_parameters model = InceptionI3D(num_classes=config.num_classes, spatial_squeeze=True, final_endpoint='logits', in_channels=3, dropout_keep_prob=config.dropout_keep_prob) elif config.model == 'resnet': assert config.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if config.model_depth == 10: model = resnet.resnet10(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 18: model = resnet.resnet18(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 34: model = resnet.resnet34(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 50: model = resnet.resnet50(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 101: model = resnet.resnet101(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 152: model = resnet.resnet152(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 200: model = resnet.resnet200(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model == 'wideresnet': assert config.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if config.model_depth == 50: model = wide_resnet.resnet50( num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, k=config.wide_resnet_k, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model == 'resnext': assert config.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if config.model_depth == 50: model = resnext.resnet50(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, cardinality=config.resnext_cardinality, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 101: model = resnext.resnet101(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, cardinality=config.resnext_cardinality, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 152: model = resnext.resnet152(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, cardinality=config.resnext_cardinality, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model == 'densenet': assert config.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if config.model_depth == 121: model = densenet.densenet121( num_classes=config.num_classes, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 169: model = densenet.densenet169( num_classes=config.num_classes, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 201: model = densenet.densenet201( num_classes=config.num_classes, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 264: model = densenet.densenet264( num_classes=config.num_classes, spatial_size=config.spatial_size, sample_duration=config.sample_duration) if 'cuda' in config.device: print('Moving model to CUDA device...') # Move model to the GPU model = model.cuda() if config.model != 'i3d': model = nn.DataParallel(model, device_ids=None) if config.checkpoint_path: print('Loading pretrained model {}'.format(config.checkpoint_path)) assert os.path.isfile(config.checkpoint_path) checkpoint = torch.load(config.checkpoint_path) if config.model == 'i3d': pretrained_weights = checkpoint else: pretrained_weights = checkpoint['state_dict'] model.load_state_dict(pretrained_weights) # Setup finetuning layer for different number of classes # Note: the DataParallel adds 'module' dict to complicate things... print('Replacing model logits with {} output classes.'.format( config.finetune_num_classes)) if config.model == 'i3d': model.replace_logits(config.finetune_num_classes) elif config.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, config.finetune_num_classes) model.module.classifier = model.module.classifier.cuda() else: model.module.fc = nn.Linear(model.module.fc.in_features, config.finetune_num_classes) model.module.fc = model.module.fc.cuda() # Setup which layers to train assert config.model in ( 'i3d', 'resnet'), 'finetune params not implemented...' finetune_criterion = config.finetune_prefixes if config.model in ( 'i3d', 'resnet') else config.finetune_begin_index parameters_to_train = get_fine_tuning_parameters( model, finetune_criterion) return model, parameters_to_train else: raise ValueError('CPU training not supported.') return model, model.parameters()
def get_network(args): """ return given network """ if args.net == 'vgg16': from models.vgg import vgg16_bn net = vgg16_bn() elif args.net == 'vgg13': from models.vgg import vgg13_bn net = vgg13_bn() elif args.net == 'vgg11': from models.vgg import vgg11_bn net = vgg11_bn() elif args.net == 'vgg19': from models.vgg import vgg19_bn net = vgg19_bn() elif args.net == 'densenet121': from models.densenet import densenet121 net = densenet121() elif args.net == 'densenet161': from models.densenet import densenet161 net = densenet161() elif args.net == 'densenet169': from models.densenet import densenet169 net = densenet169() elif args.net == 'densenet201': from models.densenet import densenet201 net = densenet201() elif args.net == 'googlenet': from models.googlenet import googlenet net = googlenet() elif args.net == 'inceptionv3': from models.inceptionv3 import inceptionv3 net = inceptionv3() elif args.net == 'inceptionv4': from models.inceptionv4 import inceptionv4 net = inceptionv4() elif args.net == 'inceptionresnetv2': from models.inceptionv4 import inception_resnet_v2 net = inception_resnet_v2() elif args.net == 'xception': from models.xception import xception net = xception() elif args.net == 'resnet18': from models.resnet import resnet18 net = resnet18() elif args.net == 'resnet34': from models.resnet import resnet34 net = resnet34() elif args.net == 'resnet50': from models.resnet import resnet50 net = resnet50() elif args.net == 'resnet101': from models.resnet import resnet101 net = resnet101() elif args.net == 'resnet152': from models.resnet import resnet152 net = resnet152() elif args.net == 'preactresnet18': from models.preactresnet import preactresnet18 net = preactresnet18() elif args.net == 'preactresnet34': from models.preactresnet import preactresnet34 net = preactresnet34() elif args.net == 'preactresnet50': from models.preactresnet import preactresnet50 net = preactresnet50() elif args.net == 'preactresnet101': from models.preactresnet import preactresnet101 net = preactresnet101() elif args.net == 'preactresnet152': from models.preactresnet import preactresnet152 net = preactresnet152() elif args.net == 'resnext50': from models.resnext import resnext50 net = resnext50() elif args.net == 'resnext101': from models.resnext import resnext101 net = resnext101() elif args.net == 'resnext152': from models.resnext import resnext152 net = resnext152() elif args.net == 'shufflenet': from models.shufflenet import shufflenet net = shufflenet() elif args.net == 'shufflenetv2': from models.shufflenetv2 import shufflenetv2 net = shufflenetv2() elif args.net == 'squeezenet': from models.squeezenet import squeezenet net = squeezenet() elif args.net == 'mobilenet': from models.mobilenet import mobilenet net = mobilenet() elif args.net == 'mobilenetv2': from models.mobilenetv2 import mobilenetv2 net = mobilenetv2() elif args.net == 'nasnet': from models.nasnet import nasnet net = nasnet() elif args.net == 'attention56': from models.attention import attention56 net = attention56() elif args.net == 'attention92': from models.attention import attention92 net = attention92() elif args.net == 'seresnet18': from models.senet import seresnet18 net = seresnet18() elif args.net == 'seresnet34': from models.senet import seresnet34 net = seresnet34() elif args.net == 'seresnet50': from models.senet import seresnet50 net = seresnet50() elif args.net == 'seresnet101': from models.senet import seresnet101 net = seresnet101() elif args.net == 'seresnet152': from models.senet import seresnet152 net = seresnet152() elif args.net == 'wideresnet': from models.wideresidual import wideresnet net = wideresnet() elif args.net == 'stochasticdepth18': from models.stochasticdepth import stochastic_depth_resnet18 net = stochastic_depth_resnet18() elif args.net == 'stochasticdepth34': from models.stochasticdepth import stochastic_depth_resnet34 net = stochastic_depth_resnet34() elif args.net == 'stochasticdepth50': from models.stochasticdepth import stochastic_depth_resnet50 net = stochastic_depth_resnet50() elif args.net == 'stochasticdepth101': from models.stochasticdepth import stochastic_depth_resnet101 net = stochastic_depth_resnet101() elif args.net == 'normal_resnet': from models.normal_resnet import resnet18 net = resnet18() elif args.net == 'hyper_resnet': from models.hypernet_main import Hypernet_Main net = Hypernet_Main( encoder="resnet18", hypernet_params={'vqvae_dict_size': args.dict_size}) elif args.net == 'normal_resnet_wo_bn': from models.normal_resnet_wo_bn import resnet18 net = resnet18() elif args.net == 'hyper_resnet_wo_bn': from models.hypernet_main import Hypernet_Main net = Hypernet_Main( encoder="resnet18_wobn", hypernet_params={'vqvae_dict_size': args.dict_size}) else: print('the network name you have entered is not supported yet') sys.exit() if args.gpu: #use_gpu net = net.cuda() return net
def generate_model(opt): assert opt.model in [ 'resnet', 'resnet_skeleton', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnet_skeleton': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet_skeleton import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet_skeleton.resnet_skeleton10( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet_skeleton.resnet_skeleton18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet_skeleton.resnet_skeleton34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet_skeleton.resnet_skeleton50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet_skeleton.resnet_skeleton101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet_skeleton.resnet_skeleton152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet_skeleton.resnet_skeleton200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) if not opt.no_cuda: if opt.cuda_id is None: model = model.cuda() else: model = model.cuda(opt.cuda_id) # model = nn.DataParallel(model, device_ids=None) if opt.cuda_id is None: model = nn.DataParallel(model, device_ids=None) else: model = nn.DataParallel(model, device_ids=[opt.cuda_id]) if opt.pretrain_path: print(' loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) if opt.model == 'resnet_skeleton': pretrained_dict = pretrain['state_dict'] model_dict = model.state_dict() # print('----------------') # for k, v in pretrained_dict.items(): # if k in model_dict: # print(k) # print('----------------') # pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} pretrained_dict = { k: v for k, v in pretrained_dict.items() if k in model_dict and 'fc' not in k } ## for concatenate model_dict.update(pretrained_dict) model.load_state_dict(model_dict) else: assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) if opt.cuda_id is None: model.module.classifier = model.module.classifier.cuda() else: model.module.classifier = model.module.classifier.cuda( opt.cuda_id) else: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) if opt.cuda_id is None: model.module.fc = model.module.fc.cuda() else: model.module.fc = model.module.fc.cuda(opt.cuda_id) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.classifier = nn.Linear(model.classifier.in_features, opt.n_finetune_classes) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) model.module.classifier = model.module.classifier.cuda() else: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) # strip off the 'module.' for each module; this get's added when a model is saved using nn.DataParallel pretrain['state_dict'] = { k[7:]: v for k, v in pretrain['state_dict'].items() } assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.classifier = nn.Linear(model.classifier.in_features, opt.n_finetune_classes) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()
def train_and_val(args): # logging.basicConfig(filename='example.log', filemode='w', level=logging.DEBUG) # 第一步,创建一个logger logger = logging.getLogger('logger') logger.setLevel(logging.DEBUG) # Log等级总开关 # 第二步,创建一个handler,用于写入日志文件 rq = time.strftime('%Y%m%d%H%M', time.localtime(time.time())) log_path = os.path.dirname( os.getcwd()) + '/Diabetic_Reinopathy_Detection/Logs/' log_name = log_path + rq + '.log' logfile = log_name handler = logging.FileHandler(logfile, 'a') handler.setLevel(logging.DEBUG) # 输出到file的log等级的开关 # 第三步,定义handler的输出格式 formatter = logging.Formatter( "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s" ) handler.setFormatter(formatter) # 第四步,将logger添加到handler里面 logger.addHandler(handler) # writer = SummaryWriter(comment='') train_transform = transforms.Compose([ # transforms.RandomResizedCrop(224), # 从原图像随机切割一张(224, 224)的图像 # transforms.RandomHorizontalFlip(), # 以0.5的概率水平翻转 transforms.RandomVerticalFlip(), # 以0.5的概率垂直翻转 transforms.ColorJitter(0.05, 0.05, 0.05, 0.05), #HSV以及对比度变化 transforms.RandomAffine(45), transforms.RandomGrayscale(), transforms.RandomRotation(10), # 在(-10, 10)范围内旋转 transforms.Resize((width, height), interpolation=2), transforms.ToTensor(), # transforms.Normalize((.5, .5, .5), (.5, .5, .5)) ]) val_transform = transforms.Compose([ transforms.Resize((width, height), interpolation=2), transforms.ToTensor(), # transforms.Normalize((.5, .5, .5), (.5, .5, .5)) ]) train_ds = DRDataLoader('csv/train_gan.csv', '/root/lg/dr_datasets_1024/', transform=train_transform, train=True) train_loader = torch.utils.data.DataLoader(train_ds, batch_size=args.batch_size, sampler=RandomSampler(train_ds)) val_ds = DRDataLoader('csv/test.csv', '/root/lg/dr_datasets_1024/', transform=val_transform, train=False) val_loader = torch.utils.data.DataLoader(val_ds, batch_size=args.test_batch_size) model = densenet121(pretrained=False) # fc_features = model.fc.in_features 138448 model.classifier = nn.Linear(model.classifier.in_features, 2, bias=True) # print(model) # model = DataParallel(model) criterion = nn.CrossEntropyLoss(size_average=True) # # optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=5e-4) # Decay LR by a factor of 0.1 every 7 epochs if not args.disable_cuda and torch.cuda.is_available(): model.cuda() criterion.cuda() # Observe that all parameters are being optimized if args.resume: if os.path.isfile(os.path.join('./checkpoint', args.resume)): print("loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(os.path.join('./checkpoint', args.resume)) args.start_epoch = checkpoint['epoch'] args.acc = checkpoint['acc'] print('epoch', args.start_epoch, 'acc =', args.acc) model.load_state_dict(checkpoint['state_dict']) # optimizer.load_state_dict(checkpoint['optimizer']) # pretrained_dict = checkpoint['state_dict'] # model_dict = model.state_dict() # pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} # pretrained_dict.pop('classifier.weight') # pretrained_dict.pop('classifier.bias') # 2. overwrite entries in the existing state dict # model_dict.update(pretrained_dict) # 3. load the new state dict # model.load_state_dict(model_dict) logger.debug("loaded checkpoint '{}' (epoch {})".format( args.resume, checkpoint['epoch'])) else: print("no checkpoint found at '{}'".format(args.resume)) logger.debug("loaded checkpoint '{}')".format(args.resume)) else: logger.debug('checkping is none \n') logger.debug("loaded checkpoint '{}')".format(args.resume)) # for i in enumerate(model.modules()): # print(i) # for para in list(model.parameters())[:-3]: # # print(para) # para.requires_grad = False optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=5e-4) exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1) for epoch in range(args.start_epoch + 1, args.epochs): start = time.time() model.train() exp_lr_scheduler.step() train_loss = [] train_correct = 0 train_total = 0 # Iterate over data. for idx, (inputs, target, _) in enumerate(train_loader): if not args.disable_cuda and torch.cuda.is_available(): inputs = inputs.cuda() target = target.cuda() inputs = Variable(inputs) target = Variable(target) # zero the parameter gradients optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, target) loss.backward() optimizer.step() # import numpy as np # aa = np.log(np.sum(np.exp(outputs.data.cpu().numpy()), axis=1)) # myloss = -outputs.data.cpu().numpy()[0, target.data.cpu().numpy()] + aa _, preds = torch.max(outputs.data, 1) # statistics train_loss.append(loss.data[0]) train_total += inputs.size()[0] train_correct += (preds == target.data).sum() # ave_loss = ave_loss * 0.9 + loss.data[0] * 0.1 if idx % args.interval == 0: print( 'Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.5f} Acc: {:.3f}' .format(epoch, idx * len(inputs), len(train_loader.dataset), 100. * idx / len(train_loader), loss.data[0], train_correct * 1.0 / train_total)) s = 'Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.5f} Acc: {:.3f}'.format( epoch, idx * len(inputs), len(train_loader.dataset), 100. * idx / len(train_loader), loss.data[0], train_correct * 1.0 / train_total) logger.debug(s) train_epoch_loss = sum(train_loss) / len(train_loss) train_epoch_acc = 1.0 * train_correct / train_total # writer.add_scalar('train' + '/epoch_loss', epoch_loss, epoch) # writer.add_scalar('train' + '/epoch_acc', epoch_acc, epoch) print('{} Loss: {:.4f} Acc: {:.4f}'.format('Train', train_epoch_loss, train_epoch_acc)) logger.debug('{} Loss: {:.4f} Acc: {:.4f}'.format( 'Train', train_epoch_loss, train_epoch_acc)) end = time.time() training_time = end - start print('The training time is {:.0f}m {:.0f}s'.format( training_time // 60, training_time % 60)) model.eval() val_loss = [] val_correct = 0 val_total = 0 for idx, (inputs, target, _) in enumerate(val_loader): if not args.disable_cuda and torch.cuda.is_available(): inputs = inputs.cuda() target = target.cuda() inputs = Variable(inputs) target = Variable(target) outputs = model(inputs) loss = criterion(outputs, target) _, preds = torch.max(outputs.data, 1) # statistics val_loss.append(loss.data[0]) val_total += inputs.size()[0] val_correct += (preds == target.data).sum() print(val_correct, '====', val_total) # val_epoch_loss = sum(val_loss) / len(val_loss) val_epoch_acc = 1.0 * val_correct / val_total # logger.debug('*********************************************') print('{} Loss: {:.4f} Acc: {:.4f}'.format('Validate', val_epoch_loss, val_epoch_acc)) logger.debug('{} Loss: {:.4f} Acc: {:.4f}'.format( 'Validate', val_epoch_loss, val_epoch_acc)) if epoch % 5 == 0 or val_epoch_acc > args.acc: # if val_epoch_acc > args.acc and train_epoch_acc > 0.5: logger.debug('Saving...' + str(epoch)) state = { # 'net': net.module if not args.cuda else net, 'acc': val_epoch_acc, 'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict() } if not os.path.isdir('./checkpoint'): os.mkdir('./checkpoint') resume = 'epoch_' + str(epoch) + '_' + args.resume torch.save(state, os.path.join('./checkpoint', resume)) args.acc = val_epoch_acc print('Saving...' + str(epoch))
def test(args): val_transform = transforms.Compose([ transforms.Resize((width, height), interpolation=2), transforms.ToTensor(), # transforms.Normalize((.5, .5, .5), (.5, .5, .5)) ]) # '/dev/shm/dr_datasets_test/' val_ds = DRDataLoader('csv/test_gan_350.csv', '/root/lg/Fake/', transform=val_transform, train=False) val_loader = torch.utils.data.DataLoader(val_ds, batch_size=args.test_batch_size) model = densenet121(pretrained=False) model.classifier = nn.Linear(model.classifier.in_features, 2, bias=True) criterion = nn.CrossEntropyLoss(size_average=True) if not args.disable_cuda and torch.cuda.is_available(): model.cuda() criterion.cuda() if args.resume: if os.path.isfile(os.path.join('./checkpoint', args.resume)): print("loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(os.path.join('./checkpoint', args.resume)) args.start_epoch = checkpoint['epoch'] args.acc = checkpoint['acc'] print('epoch', args.start_epoch, 'acc =', args.acc) model.load_state_dict(checkpoint['state_dict']) else: print("no checkpoint found at '{}'".format(args.resume)) import csv start = time.time() model.eval() val_loss = [] val_correct = 0 val_total = 0 csvoutPath = './csv/test_gan_350_out_epoch_121.csv' with open(csvoutPath, 'w', newline='') as f: f_csv = csv.writer(f) f_csv.writerow(['file', 'groundtruth', 'predict', 'prob0', 'prob1']) for idx, (inputs, target, image_name) in enumerate(val_loader): if not args.disable_cuda and torch.cuda.is_available(): inputs = inputs.cuda() target = target.cuda() inputs = Variable(inputs) target = Variable(target) outputs = model(inputs) loss = criterion(outputs, target) _, preds = torch.max(outputs.data, 1) # statistics val_loss.append(loss.data[0]) val_total += inputs.size()[0] val_correct += (preds == target.data).sum() h_x = F.softmax(outputs, dim=1).data.squeeze() # probs, idx = h_x.sort(0, True) prob, predict = torch.max(outputs.data, 1) f_csv.writerow([ str(image_name[0]), target.data.cpu().numpy()[0], predict.cpu().numpy()[0], h_x.cpu().numpy()[0], h_x.cpu().numpy()[1] ]) print(idx) print(val_correct, '====', val_total) # val_epoch_loss = sum(val_loss) / len(val_loss) val_epoch_acc = 1.0 * val_correct / val_total # logger.debug('*********************************************') print('{} Loss: {:.4f} Acc: {:.4f}'.format('Validate', val_epoch_loss, val_epoch_acc)) # logger.debug('{} Loss: {:.4f} Acc: {:.4f}'.format( # 'Validate', val_epoch_loss, val_epoch_acc)) end = time.time() testing_time = end - start print('The training time is {:.0f}m {:.0f}s'.format( testing_time // 60, testing_time % 60))
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) model.module.classifier = model.module.classifier.cuda() else: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path, map_location='cpu') assert opt.arch == pretrain['arch'] from collections import OrderedDict new_state_dict = OrderedDict() for k, v in pretrain['state_dict'].items(): name = k[7:] # remove `module.` new_state_dict[name] = v # load params model.load_state_dict(new_state_dict) #model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.classifier = nn.Linear(model.classifier.in_features, opt.n_finetune_classes) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()
def main(df): # parser = argparse.ArgumentParser() # parser.add_argument('--batchSz', type=int, default=64) # parser.add_argument('--nEpochs', type=int, default=300) # parser.add_argument('--cuda', default=True) # parser.add_argument('--save') # parser.add_argument('--seed', type=int, default=1) # parser.add_argument('--opt', type=str, default='sgd', # choices=('sgd', 'adam', 'rmsprop')) # args = parser.parse_args() # args.cuda = not args.no_cuda and torch.cuda.is_available() # args.save = args.save or 'work/densenet.base' setproctitle.setproctitle(args.save) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) if os.path.exists(args.save): shutil.rmtree(args.save) os.makedirs(args.save, exist_ok=True) normMean = [0.49139968, 0.48215827, 0.44653124] normStd = [0.24703233, 0.24348505, 0.26158768] normTransform = transforms.Normalize(normMean, normStd) trainTransform = transforms.Compose([ transforms.ToPILImage(), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normTransform ]) testTransform = transforms.Compose([ transforms.ToPILImage(), transforms.RandomCrop(224), transforms.ToTensor(), normTransform ]) df = df.sample(frac=1).reset_index(drop=True) # df = df[:int(len(df)*0.1)] train_df = df[:int(len(df) * 0.7)] test_df = df[int(len(df) * 0.7):].reset_index(drop=True) kwargs = {'num_workers': 0, 'pin_memory': True} if args.cuda else {} trainset = CustomDataset( df=train_df, root_dir=r'F:\FDAI\NVH\NVH_data\AI_Exterior_Windnoise_image', transform=trainTransform) testset = CustomDataset( df=test_df, root_dir=r'F:\FDAI\NVH\NVH_data\AI_Exterior_Windnoise_image', transform=testTransform) trainLoader = DataLoader(trainset, batch_size=args.batchSz, shuffle=True, **kwargs) testLoader = DataLoader(testset, batch_size=args.batchSz, shuffle=False, **kwargs) net = densenet.densenet121() net = nn.DataParallel(net) print(' + Number of params: {}'.format( sum([p.data.nelement() for p in net.parameters()]))) if args.cuda: net = net.cuda() if args.opt == 'sgd': optimizer = optim.SGD(net.parameters(), lr=1e-1, momentum=0.9, weight_decay=1e-4) elif args.opt == 'adam': optimizer = optim.Adam(net.parameters(), weight_decay=1e-4) elif args.opt == 'rmsprop': optimizer = optim.RMSprop(net.parameters(), weight_decay=1e-4) trainF = open(os.path.join(args.save, 'train.csv'), 'w') testF = open(os.path.join(args.save, 'test.csv'), 'w') for epoch in range(1, args.nEpochs + 1): adjust_opt(args.opt, optimizer, epoch) train(args, epoch, net, trainLoader, optimizer, trainF) test(args, epoch, net, testLoader, optimizer, testF) torch.save(net, os.path.join(args.save, 'latest.pth')) os.system('./plot.py {} &'.format(args.save)) trainF.close() testF.close()
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.my_resnet_v2 import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] pretrain_dict = pretrain['state_dict'] model_dict = model.state_dict() pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict} # print(pretrain_dict.keys()) model_dict.update(pretrain_dict) model.load_state_dict(model_dict) # model.load_state_dict(pretrain['state_dict'] # if opt.model == 'densenet': # model.module.classifier = nn.Linear( # model.module.classifier.in_features, opt.n_finetune_classes) # model.module.classifier = model.module.classifier.cuda() # else: # model.module.fc = nn.Linear(model.module.fc.in_features, # opt.n_finetune_classes) # model.module.fc = model.module.fc.cuda() parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] pretrain_dict = pretrain['state_dict'] model_dict = model.state_dict() pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict} model_dict.update(pretrain_dict) model.load_state_dict(model_dict) # model.load_state_dict(pretrain['state_dict'] # if opt.model == 'densenet': # model.classifier = nn.Linear( # model.classifier.in_features, opt.n_finetune_classes) # else: # model.fc = nn.Linear(model.fc.in_features, # opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()#如果没有pretrain_path就输出模型的所有参数
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] ################################################################### # ResNet ################################################################### if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) ################################################################### # Wider ResNet ################################################################### elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) ################################################################### # ResNext ################################################################### elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) ################################################################### # Pre-ResNet ################################################################### elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) ################################################################### # DenseNet ################################################################### elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) ################################################################### # Finalizing the model ################################################################### if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=opt.device_ids) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] # ensure that pretrain model is the same architecture model.load_state_dict(pretrain['state_dict']) # change the fc layer output size if opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) model.module.classifier = model.module.classifier.cuda() else: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() # parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.classifier = nn.Linear( model.classifier.in_features, opt.n_finetune_classes) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'resnext_fa', 'densenet', 'p3d' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'p3d': assert opt.model_depth in [50, 101, 152] if opt.model_depth == 50: model = p3d.P3D63(num_classes=opt.n_classes) elif opt.model_depth == 101: model = p3d.P3D131(num_classes=opt.n_classes) elif opt.model_depth == 152: model = p3d.P3D199(num_classes=opt.n_classes) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext_fa import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext_fa': assert opt.model_depth in [50, 101, 152] from models.resnext_fa import get_fine_tuning_parameters, get_fine_tuning_parameters_fa if opt.model_depth == 50: model = resnext_fa.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext_fa.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext_fa.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) #pdb.set_trace(); #assert opt.arch == pretrain['arch'] model_dict = model.state_dict(); #pdb.set_trace(); model_dict.update(pretrain['state_dict']); model.load_state_dict(model_dict); #model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) model.module.classifier = model.module.classifier.cuda() # do not need to add new fc layer when finetuning model has the same class num elif (opt.n_classes != opt.n_finetune_classes): model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() if (opt.model == 'resnext_fa'): parameters = get_fine_tuning_parameters_fa(model, opt.learning_rate) else: parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.classifier = nn.Linear( model.classifier.in_features, opt.n_finetune_classes) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet', 'mobilenet', 'mobilenetv2' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'mobilenet': from models.mobilenet import get_fine_tuning_parameters model = mobilenet.get_model(num_classes=opt.n_classes, sample_size=opt.sample_size, width_mult=opt.width_mult) elif opt.model == 'mobilenetv2': from models.mobilenetv2 import get_fine_tuning_parameters model = mobilenetv2.get_model(num_classes=opt.n_classes, sample_size=opt.sample_size, width_mult=opt.width_mult) if not opt.no_cuda: if not opt.no_cuda_predict: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) print("Pretrain arch", pretrain['arch']) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) ft_begin_index = opt.ft_begin_index if opt.model in [ 'mobilenet', 'mobilenetv2', 'shufflenet', 'shufflenetv2' ]: model.module.classifier = nn.Sequential( nn.Dropout(0.9), nn.Linear(model.module.classifier[1].in_features, opt.n_finetune_classes)) model.module.classifier = model.module.classifier.cuda() ft_begin_index = 'complete' if ft_begin_index == 0 else 'last_layer' elif opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) model.module.classifier = model.module.classifier.cuda() else: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() print("Finetuning at:", ft_begin_index) parameters = get_fine_tuning_parameters(model, ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) ft_begin_index = opt.ft_begin_index if opt.model in [ 'mobilenet', 'mobilenetv2', 'shufflenet', 'shufflenetv2' ]: model.module.classifier = nn.Sequential( nn.Dropout(0.9), nn.Linear(model.module.classifier[1].in_features, opt.n_finetune_classes)) model.module.classifier = model.module.classifier.cuda() ft_begin_index = 'complete' if ft_begin_index == 0 else 'last_layer' elif opt.model == 'densenet': model.classifier = nn.Linear(model.classifier.in_features, opt.n_finetune_classes) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) print("Finetuning at:", ft_begin_index) parameters = get_fine_tuning_parameters(model, ft_begin_index) return model, parameters return model, model.parameters()
def get_network(args, use_gpu=True): """ return given network """ if args.net == 'vgg16': from models.vgg import vgg16_bn net = vgg16_bn() elif args.net == 'vgg13': from models.vgg import vgg13_bn net = vgg13_bn() elif args.net == 'vgg11': from models.vgg import vgg11_bn net = vgg11_bn() elif args.net == 'vgg19': from models.vgg import vgg19_bn net = vgg19_bn() elif args.net == 'densenet121': from models.densenet import densenet121 net = densenet121() elif args.net == 'densenet161': from models.densenet import densenet161 net = densenet161() elif args.net == 'densenet169': from models.densenet import densenet169 net = densenet169() elif args.net == 'densenet201': from models.densenet import densenet201 net = densenet201() elif args.net == 'googlenet': from models.googlenet import googlenet net = googlenet() elif args.net == 'inceptionv3': from models.inceptionv3 import inceptionv3 net = inceptionv3() elif args.net == 'inceptionv4': from models.inceptionv4 import inceptionv4 net = inceptionv4() elif args.net == 'inceptionresnetv2': from models.inceptionv4 import inception_resnet_v2 net = inception_resnet_v2() elif args.net == 'xception': from models.xception import xception net = xception() elif args.net == 'resnet18': from models.resnet import resnet18 net = resnet18() elif args.net == 'resnet34': from models.resnet import resnet34 net = resnet34() elif args.net == 'resnet50': from models.resnet import resnet50 net = resnet50() elif args.net == 'resnet101': from models.resnet import resnet101 net = resnet101() elif args.net == 'resnet152': from models.resnet import resnet152 net = resnet152() elif args.net == 'preactresnet18': from models.preactresnet import preactresnet18 net = preactresnet18() elif args.net == 'preactresnet34': from models.preactresnet import preactresnet34 net = preactresnet34() elif args.net == 'preactresnet50': from models.preactresnet import preactresnet50 net = preactresnet50() elif args.net == 'preactresnet101': from models.preactresnet import preactresnet101 net = preactresnet101() elif args.net == 'preactresnet152': from models.preactresnet import preactresnet152 net = preactresnet152() elif args.net == 'resnext50': from models.resnext import resnext50 net = resnext50() elif args.net == 'resnext101': from models.resnext import resnext101 net = resnext101() elif args.net == 'resnext152': from models.resnext import resnext152 net = resnext152() elif args.net == 'shufflenet': from models.shufflenet import shufflenet net = shufflenet() elif args.net == 'shufflenetv2': from models.shufflenetv2 import shufflenetv2 net = shufflenetv2() elif args.net == 'squeezenet': from models.squeezenet import squeezenet net = squeezenet() elif args.net == 'mobilenet': from models.mobilenet import mobilenet net = mobilenet() elif args.net == 'mobilenetv2': from models.mobilenetv2 import mobilenetv2 net = mobilenetv2() elif args.net == 'nasnet': from models.nasnet import nasnet net = nasnet() elif args.net == 'attention56': from models.attention import attention56 net = attention56() elif args.net == 'attention92': from models.attention import attention92 net = attention92() elif args.net == 'seresnet18': from models.senet import seresnet18 net = seresnet18() elif args.net == 'seresnet34': from models.senet import seresnet34 net = seresnet34() elif args.net == 'seresnet50': from models.senet import seresnet50 net = seresnet50() elif args.net == 'seresnet101': from models.senet import seresnet101 net = seresnet101() elif args.net == 'seresnet152': from models.senet import seresnet152 net = seresnet152() else: print('the network name you have entered is not supported yet') sys.exit() if use_gpu: net = net.cuda() return net
def get_network(netname, use_gpu=True): """ return given network """ if netname == 'vgg16': from models.vgg import vgg16_bn #! net = vgg16_bn() elif netname == 'vgg16_cbn': from models.vgg_nobn import vgg16_cbn #! net = vgg16_cbn() elif netname == 'vgg11': from models.vgg import vgg11_bn #! net = vgg11_bn() elif netname == 'vgg11_cbn': from models.vgg_nobn import vgg11_cbn #! net = vgg11_cbn() elif netname == 'vgg11_nobn': from models.vgg_nobn import vgg11_nobn #! net = vgg11_nobn() elif netname == 'vgg16_nobn': from models.vgg_nobn import vgg16_nobn #! net = vgg16_nobn() elif netname == 'resnet18': from models.resnet import resnet18 net = resnet18() elif netname == 'resnet50': from models.resnet import resnet50 net = resnet50() elif netname == 'resnet101': from models.resnet import resnet101 net = resnet101() elif netname == 'resnet18_nobn': from models.resnet_nobn import resnet18_nobn net = resnet18_nobn() elif netname == 'resnet18_fixup': from models.resnet_fixup import resnet18 net = resnet18() elif netname == 'resnet50_fixup': from models.resnet_fixup import resnet50 net = resnet50() elif netname == 'resnet18_cbn': from models.resnet_nobn import resnet18_cbn net = resnet18_cbn() elif netname == 'resnet50_cbn': from models.resnet_nobn import resnet50_cbn net = resnet50_cbn() elif netname == 'resnet50_nobn': from models.resnet_nobn import resnet50_nobn net = resnet50_nobn() elif netname == 'resnet101_cbn': from models.resnet_nobn import resnet101_cbn net = resnet101_cbn() elif netname == 'densenet121': from models.densenet import densenet121 net = densenet121() elif netname == 'densenet121_cbn': from models.densenet_nobn import densenet121 net = densenet121() elif netname == 'shufflenetv2': from models.shufflenetv2 import shufflenetv2 net = shufflenetv2() elif netname == 'shufflenetv2_cbn': from models.shufflenetv2_nobn import shufflenetv2_cbn net = shufflenetv2_cbn() elif netname == 'shufflenetv2_nobn': from models.shufflenetv2_nobn import shufflenetv2_nobn net = shufflenetv2_nobn() elif netname == 'squeezenet': from models.squeezenet import squeezenet net = squeezenet() elif netname == 'squeezenet_nobn': from models.squeezenet_nobn import squeezenet_nobn net = squeezenet_nobn() elif netname == 'squeezenet_cbn': from models.squeezenet_nobn import squeezenet_cbn net = squeezenet_cbn() elif netname == 'seresnet18': from models.senet import seresnet18 net = seresnet18() elif netname == 'seresnet50': from models.senet import seresnet50 net = seresnet50() elif netname == 'seresnet18_cbn': from models.senet_nobn import seresnet18 net = seresnet18() elif netname == 'seresnet50_cbn': from models.senet_nobn import seresnet50 net = seresnet50() elif netname == 'fixup_cbn': from models.fixup_resnet_cifar import fixup_resnet56 net = fixup_resnet56(cbn=True) elif netname == 'fixup': from models.fixup_resnet_cifar import fixup_resnet56 net = fixup_resnet56() elif netname == 'mobilenetv2': from models.mobilenetv2 import mobilenetv2 net = mobilenetv2() elif netname == 'mobilenetv2_cbn': from models.mobilenetv2_nobn import mobilenetv2 net = mobilenetv2() else: print(netname) print('the network name you have entered is not supported yet') sys.exit() if use_gpu: # net = torch.nn.parallel.DataParallel(net) net = net.cuda() return net
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet', 'i3d', 'i3dv2' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == "i3d": from models.i3dpt import get_fine_tuning_parameters model = i3dpt.I3D(num_classes=opt.n_classes, dropout_prob=0.5) elif opt.model == "i3dv2": from models.I3D_Pytorch import get_fine_tuning_parameters model = I3D_Pytorch.I3D(num_classes=opt.n_classes, dropout_keep_prob=0.5) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) if opt.model != "i3d" and opt.model != "i3dv2": assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) else: pretrain = {"module." + k: v for k, v in pretrain.items()} model_dict = model.state_dict() model_dict.update(pretrain) model.load_state_dict(model_dict) if opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) model.module.classifier = model.module.classifier.cuda() else: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.classifier = nn.Linear(model.classifier.in_features, opt.n_finetune_classes) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()
def get_network(args, use_gpu=True): """ return given network """ if args.net == 'mobilenet': from models.mobilenet import mobilenet net = mobilenet(args) elif args.net == 'mobilenetv2': from models.mobilenetv2 import mobilenetv2 net = mobilenetv2(args) elif args.net == 'vgg13': from models.vgg import vgg13_bn net = vgg13_bn(args) elif args.net == 'vgg11': from models.vgg import vgg11_bn net = vgg11_bn(args) elif args.net == 'vgg19': # from models.vgg import vgg19_bn # net = vgg19_bn(args) from torchvision.models import vgg19_bn import torch.nn as nn net = vgg19_bn(pretrained=True) net.classifier[6] = nn.Linear(4096, args.nc) elif args.net == 'densenet121': from models.densenet import densenet121 net = densenet121(args) elif args.net == 'densenet161': from models.densenet import densenet161 net = densenet161(args) elif args.net == 'densenet169': from models.densenet import densenet169 net = densenet169(args) elif args.net == 'densenet201': from models.densenet import densenet201 net = densenet201(args) elif args.net == 'googlenet': from models.googlenet import googlenet net = googlenet(args) elif args.net == 'inceptionv3': from models.inceptionv3 import inceptionv3 net = inceptionv3(args) elif args.net == 'inceptionv4': from models.inceptionv4 import inceptionv4 net = inceptionv4(args) elif args.net == 'inceptionresnetv2': from models.inceptionv4 import inception_resnet_v2 net = inception_resnet_v2(args) elif args.net == 'xception': from models.xception import xception net = xception(args) elif args.net == 'resnet18': # from models.resnet import resnet18 # net = resnet18(args) from torchvision.models import resnet18 import torch.nn as nn net = resnet18(pretrained=True) net.fc = nn.Linear(512, args.nc) elif args.net == 'resnet34': from models.resnet import resnet34 net = resnet34(args) elif args.net == 'resnet50': from models.resnet import resnet50 net = resnet50(args) elif args.net == 'resnet101': from models.resnet import resnet101 net = resnet101(args) elif args.net == 'resnet152': from models.resnet import resnet152 net = resnet152(args) elif args.net == 'preactresnet18': from models.preactresnet import preactresnet18 net = preactresnet18(args) elif args.net == 'preactresnet34': from models.preactresnet import preactresnet34 net = preactresnet34(args) elif args.net == 'preactresnet50': from models.preactresnet import preactresnet50 net = preactresnet50(args) elif args.net == 'preactresnet101': from models.preactresnet import preactresnet101 net = preactresnet101(args) elif args.net == 'preactresnet152': from models.preactresnet import preactresnet152 net = preactresnet152(args) elif args.net == 'resnext50': from models.resnext import resnext50 net = resnext50(args) elif args.net == 'resnext101': from models.resnext import resnext101 net = resnext101(args) elif args.net == 'resnext152': from models.resnext import resnext152 net = resnext152(args) elif args.net == 'shufflenet': from models.shufflenet import shufflenet net = shufflenet(args) elif args.net == 'shufflenetv2': from models.shufflenetv2 import shufflenetv2 net = shufflenetv2(args) elif args.net == 'squeezenet': from models.squeezenet import squeezenet net = squeezenet(args) elif args.net == 'mobilenet': from models.mobilenet import mobilenet net = mobilenet(args) elif args.net == 'mobilenetv2': from models.mobilenetv2 import mobilenetv2 net = mobilenetv2(args) elif args.net == 'mobilenetv3': from models.mobilenetv3 import mobileNetv3 net = mobileNetv3(args) elif args.net == 'mobilenetv3_l': from models.mobilenetv3 import mobileNetv3 net = mobileNetv3(args, mode='large') elif args.net == 'mobilenetv3_s': from models.mobilenetv3 import mobileNetv3 net = mobileNetv3(args, mode='small') elif args.net == 'nasnet': from models.nasnet import nasnetalarge net = nasnetalarge(args) elif args.net == 'attention56': from models.attention import attention56 net = attention56(args) elif args.net == 'attention92': from models.attention import attention92 net = attention92(args) elif args.net == 'seresnet18': from models.senet import seresnet18 net = seresnet18(args) elif args.net == 'seresnet34': from models.senet import seresnet34 net = seresnet34(args) elif args.net == 'seresnet50': from models.senet import seresnet50 net = seresnet50(args) elif args.net == 'seresnet101': from models.senet import seresnet101 net = seresnet101(args) elif args.net == 'seresnet152': from models.senet import seresnet152 net = seresnet152(args) elif args.net.lower() == 'sqnxt_23_1x': from models.SqueezeNext import SqNxt_23_1x net = SqNxt_23_1x(args) elif args.net.lower() == 'sqnxt_23_1xv5': from models.SqueezeNext import SqNxt_23_1x_v5 net = SqNxt_23_1x_v5(args) elif args.net.lower() == 'sqnxt_23_2x': from models.SqueezeNext import SqNxt_23_2x net = SqNxt_23_2x(args) elif args.net.lower() == 'sqnxt_23_2xv5': from models.SqueezeNext import SqNxt_23_2x_v5 net = SqNxt_23_2x_v5(args) elif args.net.lower() == 'mnasnet': # from models.MnasNet import mnasnet # net = mnasnet(args) from models.nasnet_mobile import nasnet_Mobile net = nasnet_Mobile(args) elif args.net == 'efficientnet_b0': from models.efficientnet import efficientnet_b0 net = efficientnet_b0(args) elif args.net == 'efficientnet_b1': from models.efficientnet import efficientnet_b1 net = efficientnet_b1(args) elif args.net == 'efficientnet_b2': from models.efficientnet import efficientnet_b2 net = efficientnet_b2(args) elif args.net == 'efficientnet_b3': from models.efficientnet import efficientnet_b3 net = efficientnet_b3(args) elif args.net == 'efficientnet_b4': from models.efficientnet import efficientnet_b4 net = efficientnet_b4(args) elif args.net == 'efficientnet_b5': from models.efficientnet import efficientnet_b5 net = efficientnet_b5(args) elif args.net == 'efficientnet_b6': from models.efficientnet import efficientnet_b6 net = efficientnet_b6(args) elif args.net == 'efficientnet_b7': from models.efficientnet import efficientnet_b7 net = efficientnet_b7(args) elif args.net == 'mlp': from models.mlp import MLPClassifier net = MLPClassifier(args) elif args.net == 'alexnet': from torchvision.models import alexnet import torch.nn as nn net = alexnet(pretrained=True) net.classifier[6] = nn.Linear(4096, args.nc) elif args.net == 'lambda18': from models._lambda import LambdaResnet18 net = LambdaResnet18(num_classes=args.nc, channels=args.cs) elif args.net == 'lambda34': from models._lambda import LambdaResnet34 net = LambdaResnet34(num_classes=args.nc, channels=args.cs) elif args.net == 'lambda50': from models._lambda import LambdaResnet50 net = LambdaResnet50(num_classes=args.nc, channels=args.cs) elif args.net == 'lambda101': from models._lambda import LambdaResnet101 net = LambdaResnet101(num_classes=args.nc) elif args.net == 'lambda152': from models._lambda import LambdaResnet152 net = LambdaResnet152(num_classes=args.nc, channels=args.cs) else: print('the network name you have entered is not supported yet') sys.exit() if use_gpu: net = net.cuda() return net
def get_model(class_num): if (MODEL_TYPE == 'alexnet'): model = alexnet.alexnet(pretrained=FINETUNE) elif (MODEL_TYPE == 'vgg'): if (MODEL_DEPTH_OR_VERSION == 11): model = vgg.vgg11(pretrained=FINETUNE) elif (MODEL_DEPTH_OR_VERSION == 13): model = vgg.vgg13(pretrained=FINETUNE) elif (MODEL_DEPTH_OR_VERSION == 16): model = vgg.vgg16(pretrained=FINETUNE) elif (MODEL_DEPTH_OR_VERSION == 19): model = vgg.vgg19(pretrained=FINETUNE) else: print('Error : VGG should have depth of either [11, 13, 16, 19]') sys.exit(1) elif (MODEL_TYPE == 'squeezenet'): if (MODEL_DEPTH_OR_VERSION == 0 or MODEL_DEPTH_OR_VERSION == 'v0'): model = squeezenet.squeezenet1_0(pretrained=FINETUNE) elif (MODEL_DEPTH_OR_VERSION == 1 or MODEL_DEPTH_OR_VERSION == 'v1'): model = squeezenet.squeezenet1_1(pretrained=FINETUNE) else: print('Error : Squeezenet should have version of either [0, 1]') sys.exit(1) elif (MODEL_TYPE == 'resnet'): if (MODEL_DEPTH_OR_VERSION == 18): model = resnet.resnet18(pretrained=FINETUNE) elif (MODEL_DEPTH_OR_VERSION == 34): model = resnet.resnet34(pretrained=FINETUNE) elif (MODEL_DEPTH_OR_VERSION == 50): model = resnet.resnet50(pretrained=FINETUNE) elif (MODEL_DEPTH_OR_VERSION == 101): model = resnet.resnet101(pretrained=FINETUNE) elif (MODEL_DEPTH_OR_VERSION == 152): model = resnet.resnet152(pretrained=FINETUNE) else: print( 'Error : Resnet should have depth of either [18, 34, 50, 101, 152]' ) sys.exit(1) elif (MODEL_TYPE == 'densenet'): if (MODEL_DEPTH_OR_VERSION == 121): model = densenet.densenet121(pretrained=FINETUNE) elif (MODEL_DEPTH_OR_VERSION == 169): model = densenet.densenet169(pretrained=FINETUNE) elif (MODEL_DEPTH_OR_VERSION == 161): model = densenet.densenet161(pretrained=FINETUNE) elif (MODEL_DEPTH_OR_VERSION == 201): model = densenet.densenet201(pretrained=FINETUNE) else: print( 'Error : Densenet should have depth of either [121, 169, 161, 201]' ) sys.exit(1) elif (MODEL_TYPE == 'inception'): if (MODEL_DEPTH_OR_VERSION == 3 or MODEL_DEPTH_OR_VERSION == 'v3'): model = inception.inception_v3(pretrained=FINETUNE) else: print('Error : Inception should have version of either [3, ]') sys.exit(1) else: print( 'Error : Network should be either [alexnet / squeezenet / vgg / resnet / densenet / inception]' ) sys.exit(1) if (MODEL_TYPE == 'alexnet' or MODEL_TYPE == 'vgg'): num_ftrs = model.classifier[6].in_features feature_model = list(model.classifier.children()) feature_model.pop() feature_model.append(nn.Linear(num_ftrs, class_num)) model.classifier = nn.Sequential(*feature_model) elif (MODEL_TYPE == 'resnet' or MODEL_TYPE == 'inception'): num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, class_num) elif (MODEL_TYPE == 'densenet'): num_ftrs = model.classifier.in_features model.classifier = nn.Linear(num_ftrs, class_num) return model
import copy device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # alexnet = models.alexnet(pretrained=True) # resnet18 = models.resnet18(pretrained=True).to(device) # resnet18.device = device # vgg16 = models.vgg16(pretrained=True) # densenet = models.densenet161(pretrained=True) # squeezenet = models.squeezenet1_0(pretrained=True) resnet18 = resnet.resnet18(pretrained=True).to(device) resnet18.device = device resnet18.name = "resnet18" densenet = densenet.densenet121(pretrained=True).to(device) densenet.device = device densenet.name = "densenet" vgg16 = vgg.vgg16_bn(pretrained=True).to(device) vgg16.device = device vgg16.name = "vgg16" net_list = [resnet18, densenet, vgg16] normalize = transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]) transform = transforms.Compose([transforms.ToTensor(), normalize]) testset = datasets.CIFAR10(root="./data", train=False,
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, model_type=opt.model_type) elif opt.model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, model_type=opt.model_type) elif opt.model_depth == 152: model = resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, model_type=opt.model_type) elif opt.model_depth == 200: model = resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, model_type=opt.model_type) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) if not opt.no_cuda: import os # os.environ['CUDA_VISIBLE_DEVICES'] = f'{opt.cuda_id}' model = model.cuda(device=opt.cuda_id) model = nn.DataParallel(model, device_ids=[0]) # CUDA change if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) print(pretrain['arch']) arch = f'{opt.model}-{opt.model_depth}' # arch = opt.model + '-' + opt.model_depth print(arch) assert arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) model.module.classifier = model.module.classifier.cuda( device=opt.cuda_id) # elif opt.use_quadriplet: # model = EmbeddingModel(model, opt.n_finetune_classes, not opt.no_cuda, opt.cuda_id) else: model.module.fc = nn.Sequential( nn.Dropout(0.4), nn.Linear(model.module.fc.in_features, 512), nn.ReLU6(), nn.Dropout(0.4), nn.Linear(512, 128), nn.ReLU6(), nn.Linear(128, opt.n_finetune_classes)).cuda(device=opt.cuda_id) # model.module.fc = nn.Linear(model.module.fc.in_features, # opt.n_finetune_classes) # model.module.fc = model.module.fc.cuda(device=opt.cuda_id) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) print(len(list(parameters)), 'params to fine tune') return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.classifier = nn.Linear(model.classifier.in_features, opt.n_finetune_classes) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) model.module.classifier = model.module.classifier.cuda() else: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.classifier = nn.Linear( model.classifier.in_features, opt.n_finetune_classes) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()