def generate_model(opt): assert opt.model in ['resnext'] assert opt.model_depth in [101] from models.resnext import get_fine_tuning_parameters model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, input_channels=opt.input_channels, output_layers=opt.output_layers) model = model.cuda() model = nn.DataParallel(model) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()
def main(): # torch.cuda.set_device(1) os.environ["CUDA_VISIBLE_DEVICES"] = "1" opt = parse_opts() model = resnext.resnet101( num_classes=opt.n_finetune_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) model.cuda() # print(model.cuda()) model = nn.DataParallel(model, device_ids=None) checkpoint = torch.load( 'trained_models/best-4-1.pth.tar') model.load_state_dict(checkpoint['state_dict']) best_score = checkpoint['best_prec1'] print(best_score) # for param_group in optimizer['param_groups']: # print(param_group) # model.cpu() # model.cuda() model.eval() rgb_mean = [0.485, 0.456, 0.406] rgb_std = [0.229, 0.224, 0.225] opt.scales = [1] transform_val = Compose([ MultiScaleCornerCrop(opt.scales, opt.sample_size, crop_positions=['c']), ToTensor(), Normalize(rgb_mean, rgb_std), ])
def generate_model(opt): assert opt.model in ['mfnet', 'resnext', 'resnet'] opt.cbam = 0 if opt.model == 'mfnet': if opt.mult_loss: model = MFNET_3D_T(opt) elif opt.time_focus: model = MFNET_3D_C(opt) #elif opt.mv: # model = MFNET_MV(opt) elif opt.small: model = MFNET_3D_S(opt) pretrain = torch.load(opt.pretrain_path) compressed_dict = compress_dict(pretrain, opt) model.cuda() model = nn.DataParallel(model) model.load_state_dict(compressed_dict, strict=True) else: model = MFNET_3D(opt.n_classes) elif opt.model == 'resnext': model = resnext.resnet101(opt) elif opt.model == 'resnet': model = resnet.resnet101(opt) if opt.mult_loss: param = model.student_model.parameters() else: param = model.parameters() return model, param
def build_resnext(): model = resnext.resnet101(num_classes=400, shortcut_type='B', cardinality=32, sample_size=112, sample_duration=16, last_fc=False) model = model.cuda() model = nn.DataParallel(model, device_ids=None) assert os.path.exists('data/preprocess/pretrained/resnext-101-kinetics.pth') model_data = torch.load('data/preprocess/pretrained/resnext-101-kinetics.pth', map_location='cpu') model.load_state_dict(model_data['state_dict']) model.eval() return model
def generate_model(opt): assert opt.model in ['resnext'] assert opt.model_depth in [101] model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, input_channels=opt.input_channels, output_layers=opt.output_layers) if opt.MARS_premodel_path != '' and opt.input_channels == 3: print('loading pretrained model {}'.format(opt.MARS_premodel_path)) para_dict, _ = fluid.dygraph.load_dygraph(opt.MARS_premodel_path) #设置网络模型参数为读取的模型参数 model.set_dict(para_dict) model.fc = fluid.dygraph.Linear( model.lastfeature_size, opt.n_finetune_classes, param_attr=fluid.ParamAttr( initializer=fluid.initializer.MSRAInitializer(uniform=True)), bias_attr=paddle.fluid.ParamAttr(initializer=None), act="softmax") parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters elif opt.Flow_premodel_path != '' and opt.input_channels == 2: print('loading pretrained model {}'.format(opt.Flow_premodel_path)) para_dict, _ = fluid.dygraph.load_dygraph(opt.Flow_premodel_path) #设置网络模型参数为读取的模型参数 model.set_dict(para_dict) model.fc = fluid.dygraph.Linear( model.lastfeature_size, opt.n_finetune_classes, param_attr=fluid.ParamAttr( initializer=fluid.initializer.MSRAInitializer(uniform=True)), bias_attr=paddle.fluid.ParamAttr(initializer=None), act="softmax") parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters elif opt.RGB_premodel_path != '' and opt.input_channels == 3: print('loading pretrained model {}'.format(opt.RGB_premodel_path)) para_dict, _ = fluid.dygraph.load_dygraph(opt.RGB_premodel_path) #设置网络模型参数为读取的模型参数 model.set_dict(para_dict) model.fc = fluid.dygraph.Linear( model.lastfeature_size, opt.n_finetune_classes, param_attr=fluid.ParamAttr( initializer=fluid.initializer.MSRAInitializer(uniform=True)), bias_attr=paddle.fluid.ParamAttr(initializer=None), act="softmax") parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()
def test_on_testset(): os.environ["CUDA_VISIBLE_DEVICES"] = "1" args = parse_opts() data_loader = get_iterator(args, isTrain=False) acc_meter = tnt.meter.AverageValueMeter() model = resnext.resnet101(num_classes=args.n_finetune_classes, shortcut_type=args.resnet_shortcut, cardinality=args.resnext_cardinality, sample_size=args.sample_size, sample_duration=args.sample_duration) model.cuda() model = nn.DataParallel(model, device_ids=None) model.load_state_dict( torch.load('trained_models/checkpoint.pth.tar')['state_dict']) model.eval() total = 0 result = {} with torch.no_grad(): for data in data_loader: input = data[0].cuda() label = data[1].cuda() video_id = data[2] output = torch.sigmoid(model(input)) label_indexes = (label == 1) acc, bt, _ = calculate_accuracy(output, label, video_id=video_id, thresh_hold=0.4) for i, vid in enumerate(video_id): if vid not in result: result[vid] = [] result[vid].append( label_indexes[i].nonzero().squeeze(1).tolist()) if sum(output[i] > 0.4) > 0: indexes = (output[i] > 0.4) else: indexes = (output[i] >= output[i].max(0)[0]) tmp_index = indexes.nonzero() if len(tmp_index) <= 0: indx = [] else: indx = indexes.nonzero().squeeze(1).tolist() result[vid].append(indx) total += bt acc_meter.add(acc, bt) print( 'Now tested %d samples,batch Average Acc is %.4f, Average Acc is %.4f' % (total, acc / bt, acc_meter.value()[0])) # print(result) torch.save(result, './result14-0.4-max.pkl')
def generate_model(): from models.resnext import get_fine_tuning_parameters model = resnext.resnet101(num_classes=51, shortcut_type='B', cardinality=32, sample_size=112, sample_duration=64, input_channels=3) model = model.cuda() model = nn.DataParallel(model) return model
def main(): opt = parse_opts() model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) model.load_state_dict(torch.load('./trained_models/best.pth.tar')) model = model.cuda() model = nn.DataParallel(model, device_ids=[0]) clip = video_loader( root_path='/home/zengh/Dataset/AIChallenger/test/group0/1000007124', frame_indices=range(16)) indexes = test(clip, model) print(indexes)
def main(args): device = 'cuda' print('Loading ResNext101 model...') model = nn.DataParallel(resnet101(sample_duration=16).cuda()) model.load_state_dict(torch.load('resnext-101-kinetics.pth')['state_dict']) print('Loading video paths...') if args.dataset == 'uva': files = glob.glob(args.data_path + '/*.mp4') data_type = 'video' else: raise NotImplementedError mu, sigma = fid.calculate_activation_statistics(files, data_type, model, args.batch_size, args.size, args.length, args.dims, device) np.savez_compressed('./stats/'+args.dataset+'.npz', mu=mu, sigma=sigma) print('finished')
def calculate_fid_given_paths(paths, batch_size, size, length, dims, device): """ calculates the fid of two paths """ for p in paths: if not os.path.exists(p): raise RuntimeError('Invalid path: %s' % p) model = nn.DataParallel(resnet101(sample_duration=16).cuda()) model.load_state_dict(torch.load('resnext-101-kinetics.pth')['state_dict']) m1, s1 = _compute_statistics_of_path(paths[0], model, batch_size, size, length, dims, device) m2, s2 = _compute_statistics_of_path(paths[1], model, batch_size, size, length, dims, device) fid_value = calculate_frechet_distance(m1, s1, m2, s2) return fid_value
def initial_model(): opt = parse_opts() model = resnext.resnet101(num_classes=opt.n_finetune_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) model = model.cuda() model = nn.DataParallel(model, device_ids=None) model_path = './trained_models/best.pth10.tar' if not os.path.exists(model_path): print("model path is not true!!") return model.load_state_dict( torch.load('./trained_models/best.pth10.tar')['state_dict']) model.eval() return model
def get_model(): # os.environ["CUDA_VISIBLE_DEVICES"] = "1" opt = parse_opts() model = resnext.resnet101( num_classes=opt.n_finetune_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) model.cuda() # print(model.cuda()) model = nn.DataParallel(model, device_ids=None) model.load_state_dict(torch.load( './trained_models/best.pth10.tar')['state_dict']) model.cpu() # model.cuda() model.eval() rgb_mean = [0.485, 0.456, 0.406] rgb_std = [0.229, 0.224, 0.225] opt.scales = [1] return model
def generate_model(opt): load_device = 'cuda' if torch.cuda.is_available() else 'cpu' device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') assert opt.model in ['resnext'] assert opt.model_depth in [101] ######################### # Define the model ######################### model = resnext.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, # resnet cardinality sample_size=opt.sample_size, sample_duration=opt.sample_duration, input_channels=opt.input_channels, output_layers=opt.output_layers) model = model.to(device) model = nn.DataParallel(model) ### If use pretrained if opt.pretrain_path: from models.resnext import get_fine_tuning_parameters print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path, map_location=load_device) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.to(device) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters # print("!"*50, '\n', model, '\n', "!"*50) return model, model.parameters()
def predict(model, sindex): start_time = time.time() opt = parse_opts() model = resnext.resnet101(num_classes=opt.n_finetune_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) model = model.cuda() model = nn.DataParallel(model, device_ids=None) model.load_state_dict( torch.load('./trained_models/best.pth.tar')['state_dict']) duration = (time.time() - start_time) * 1000 print('restore time %.3f ms' % duration) model.eval() rgb_mean = [0.485, 0.456, 0.406] rgb_std = [0.229, 0.224, 0.225] opt.scales = [1] transform_val = Compose([ MultiScaleCornerCrop(opt.scales, opt.sample_size, crop_positions=['c']), ToTensor(), Normalize(rgb_mean, rgb_std), ]) start_time = time.time() clip = video_loader( root_path='/home/zengh/Dataset/AIChallenger/train/group5/567700300', frame_indices=range(3, 19), transform=transform_val) clip = clip.unsqueeze(0) #print("clip",clip) duration = (time.time() - start_time) * 1000 print('pic time %.3f ms' % duration) #print("clip",clip.shape) start_time = time.time() indexes = test(clip, model) duration = (time.time() - start_time) * 1000 print('pre time %.3f ms' % duration)
def generate_model(opt): assert opt.model in ['resnet', 'resnext'] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10(opt=opt) elif opt.model_depth == 18: model = resnet.resnet18(opt=opt) elif opt.model_depth == 34: model = resnet.resnet34(opt=opt) elif opt.model_depth == 50: model = resnet.resnet50(opt=opt) elif opt.model_depth == 101: model = resnet.resnet101(opt=opt) elif opt.model_depth == 152: model = resnet.resnet152(opt=opt) elif opt.model_depth == 200: model = resnet.resnet200(opt=opt) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50(opt=opt) elif opt.model_depth == 101: model = resnext.resnet101(opt=opt) elif opt.model_depth == 152: model = resnext.resnet152(opt=opt) if not opt.no_cuda: model = model.cuda() return model, model.parameters()
def load_pretrained_resnet101(opt): # construct model architecture model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) model = model.cuda() # wrap the current model again in nn.DataParallel / or we can just remove the .module keys. model = nn.DataParallel(model, device_ids=None) # Note: please ccustomize the pretrained model path pretrain = torch.load(opt.pretrained_model_path) pretrain_dict = pretrain['state_dict'] # do not load the last layer pretrain_dict.pop('module.fc.weight') pretrain_dict.pop('module.fc.bias') model_dict = model.state_dict() model_dict.update(pretrain_dict) model.load_state_dict(model_dict) return model
import paddle.fluid as fluid from collections import OrderedDict torch_weight = torch.load('RGB_Kinetics_16f.pth',map_location=torch.device('cpu'))#这里需要改成你下载的torch的权重的位置!! for torch_key in torch_weight['state_dict'].keys(): print(torch_key) from models import resnext with fluid.dygraph.guard(): # 这里提供的参数示例是从Kinetics400预训练模型转化,下面三个分别是对RGB stream、Flow stream、MARS stream进行转化 # paddle_model = resnext.resnet101(num_classes=400,shortcut_type='B',cardinality=32, # sample_size=112,sample_duration=16,input_channels=3,output_layers=[],curr_mode='RGB' # paddle_model = resnext.resnet101(num_classes=400,shortcut_type='B',cardinality=32, # sample_size=112,sample_duration=16,input_channels=2,output_layers=[],curr_mode='Flow' # paddle_model = resnext.resnet101(num_classes=400,shortcut_type='B',cardinality=32, # sample_size=112,sample_duration=16,input_channels=3,output_layers=[],curr_mode='MARS' paddle_model = resnext.resnet101(parameter) # 这里需要对照训练过程传入的参数进行设置 paddle_weight = paddle_model.state_dict() for paddle_key in paddle_weight: print(paddle_key) paddle_weight = paddle_model.state_dict() new_weight_dict = OrderedDict() for torch_key, paddle_key in zip(torch_weight['state_dict'].keys(), paddle_weight.keys()): if torch_key.find('fc') > -1: # paddle的fc层的weight与竞品不太一致,需要转置一下 new_weight_dict[paddle_key] = torch_weight['state_dict'][torch_key].detach().numpy().T else: new_weight_dict[paddle_key] = torch_weight['state_dict'][torch_key].detach().numpy() paddle_model.set_dict(new_weight_dict) fluid.dygraph.save_dygraph(paddle_model.state_dict(),"RGB_Kinetics_16f")#修改成自己的预训练模型 print('OK!!!')
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'resnext_fa', 'densenet', 'p3d' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'p3d': assert opt.model_depth in [50, 101, 152] if opt.model_depth == 50: model = p3d.P3D63(num_classes=opt.n_classes) elif opt.model_depth == 101: model = p3d.P3D131(num_classes=opt.n_classes) elif opt.model_depth == 152: model = p3d.P3D199(num_classes=opt.n_classes) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext_fa import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext_fa': assert opt.model_depth in [50, 101, 152] from models.resnext_fa import get_fine_tuning_parameters, get_fine_tuning_parameters_fa if opt.model_depth == 50: model = resnext_fa.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext_fa.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext_fa.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) #pdb.set_trace(); #assert opt.arch == pretrain['arch'] model_dict = model.state_dict(); #pdb.set_trace(); model_dict.update(pretrain['state_dict']); model.load_state_dict(model_dict); #model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) model.module.classifier = model.module.classifier.cuda() # do not need to add new fc layer when finetuning model has the same class num elif (opt.n_classes != opt.n_finetune_classes): model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() if (opt.model == 'resnext_fa'): parameters = get_fine_tuning_parameters_fa(model, opt.learning_rate) else: parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.classifier = nn.Linear( model.classifier.in_features, opt.n_finetune_classes) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()
def get_model(config): assert config.model in [ 'i3d', 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] print('Initializing {} model (num_classes={})...'.format( config.model, config.num_classes)) if config.model == 'i3d': from models.i3d import get_fine_tuning_parameters model = InceptionI3D(num_classes=config.num_classes, spatial_squeeze=True, final_endpoint='logits', in_channels=3, dropout_keep_prob=config.dropout_keep_prob) elif config.model == 'resnet': assert config.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if config.model_depth == 10: model = resnet.resnet10(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 18: model = resnet.resnet18(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 34: model = resnet.resnet34(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 50: model = resnet.resnet50(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 101: model = resnet.resnet101(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 152: model = resnet.resnet152(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 200: model = resnet.resnet200(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model == 'wideresnet': assert config.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if config.model_depth == 50: model = wide_resnet.resnet50( num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, k=config.wide_resnet_k, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model == 'resnext': assert config.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if config.model_depth == 50: model = resnext.resnet50(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, cardinality=config.resnext_cardinality, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 101: model = resnext.resnet101(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, cardinality=config.resnext_cardinality, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 152: model = resnext.resnet152(num_classes=config.num_classes, shortcut_type=config.resnet_shortcut, cardinality=config.resnext_cardinality, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model == 'densenet': assert config.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if config.model_depth == 121: model = densenet.densenet121( num_classes=config.num_classes, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 169: model = densenet.densenet169( num_classes=config.num_classes, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 201: model = densenet.densenet201( num_classes=config.num_classes, spatial_size=config.spatial_size, sample_duration=config.sample_duration) elif config.model_depth == 264: model = densenet.densenet264( num_classes=config.num_classes, spatial_size=config.spatial_size, sample_duration=config.sample_duration) if 'cuda' in config.device: print('Moving model to CUDA device...') # Move model to the GPU model = model.cuda() if config.model != 'i3d': model = nn.DataParallel(model, device_ids=None) if config.checkpoint_path: print('Loading pretrained model {}'.format(config.checkpoint_path)) assert os.path.isfile(config.checkpoint_path) checkpoint = torch.load(config.checkpoint_path) if config.model == 'i3d': pretrained_weights = checkpoint else: pretrained_weights = checkpoint['state_dict'] model.load_state_dict(pretrained_weights) # Setup finetuning layer for different number of classes # Note: the DataParallel adds 'module' dict to complicate things... print('Replacing model logits with {} output classes.'.format( config.finetune_num_classes)) if config.model == 'i3d': model.replace_logits(config.finetune_num_classes) elif config.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, config.finetune_num_classes) model.module.classifier = model.module.classifier.cuda() else: model.module.fc = nn.Linear(model.module.fc.in_features, config.finetune_num_classes) model.module.fc = model.module.fc.cuda() # Setup which layers to train assert config.model in ( 'i3d', 'resnet'), 'finetune params not implemented...' finetune_criterion = config.finetune_prefixes if config.model in ( 'i3d', 'resnet') else config.finetune_begin_index parameters_to_train = get_fine_tuning_parameters( model, finetune_criterion) return model, parameters_to_train else: raise ValueError('CPU training not supported.') return model, model.parameters()
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.my_resnet_v2 import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] pretrain_dict = pretrain['state_dict'] model_dict = model.state_dict() pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict} # print(pretrain_dict.keys()) model_dict.update(pretrain_dict) model.load_state_dict(model_dict) # model.load_state_dict(pretrain['state_dict'] # if opt.model == 'densenet': # model.module.classifier = nn.Linear( # model.module.classifier.in_features, opt.n_finetune_classes) # model.module.classifier = model.module.classifier.cuda() # else: # model.module.fc = nn.Linear(model.module.fc.in_features, # opt.n_finetune_classes) # model.module.fc = model.module.fc.cuda() parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] pretrain_dict = pretrain['state_dict'] model_dict = model.state_dict() pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict} model_dict.update(pretrain_dict) model.load_state_dict(model_dict) # model.load_state_dict(pretrain['state_dict'] # if opt.model == 'densenet': # model.classifier = nn.Linear( # model.classifier.in_features, opt.n_finetune_classes) # else: # model.fc = nn.Linear(model.fc.in_features, # opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()#如果没有pretrain_path就输出模型的所有参数
def generate_C3D_model(opt): assert opt.mode in ['score', 'feature'] if opt.mode == 'score': last_fc = True elif opt.mode == 'feature': last_fc = False assert opt.c3d_model_name in ['resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet'] if opt.c3d_model_name == 'resnet': assert opt.c3d_model_depth in [10, 18, 34, 50, 101, 152, 200] if opt.c3d_model_depth == 10: model = resnet.resnet10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 152: model = resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 200: model = resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_name == 'wideresnet': assert opt.c3d_model_depth in [50] if opt.c3d_model_depth == 50: model = wide_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_name == 'resnext': assert opt.c3d_model_depth in [50, 101, 152] if opt.c3d_model_depth == 50: model = resnext.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 101: model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 152: model = resnext.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_name == 'preresnet': assert opt.c3d_model_depth in [18, 34, 50, 101, 152, 200] if opt.c3d_model_depth == 18: model = pre_act_resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 34: model = pre_act_resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 50: model = pre_act_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 101: model = pre_act_resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 152: model = pre_act_resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 200: model = pre_act_resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_name == 'densenet': assert opt.c3d_model_depth in [121, 169, 201, 264] if opt.c3d_model_depth == 121: model = densenet.densenet121(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 169: model = densenet.densenet169(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 201: model = densenet.densenet201(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.c3d_model_depth == 264: model = densenet.densenet264(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) # print(model) print('loading c3d model from: {}'.format(opt.c3d_model_checkpoint)) model_data = torch.load(opt.c3d_model_checkpoint) print(model_data['arch']) assert opt.arch == model_data['arch'] model_state_dict = {} for k, v in model_data['state_dict'].items(): model_state_dict[k[k.index('.') + 1:]] = v model.load_state_dict(model_state_dict) if not opt.no_cuda: model = model.to(opt.device) # model = nn.DataParallel(model, device_ids=None) # print(model) return model
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) model.module.classifier = model.module.classifier.cuda() else: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path, map_location='cpu') assert opt.arch == pretrain['arch'] from collections import OrderedDict new_state_dict = OrderedDict() for k, v in pretrain['state_dict'].items(): name = k[7:] # remove `module.` new_state_dict[name] = v # load params model.load_state_dict(new_state_dict) #model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.classifier = nn.Linear(model.classifier.in_features, opt.n_finetune_classes) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()
def generate_model(opt): assert opt.model in ['resnet', 'resnetl', 'resnext', 'c3d'] if opt.model == 'resnet': assert opt.model_depth in [10] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnetl': assert opt.model_depth in [10] from models.resnetl import get_fine_tuning_parameters if opt.model_depth == 10: model = resnetl.resnetl10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [101] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 101: model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'c3d': assert opt.model_depth in [10] from models.c3d import get_fine_tuning_parameters if opt.model_depth == 10: model = c3d.c3d_v1(sample_size=opt.sample_size, sample_duration=opt.sample_duration, num_classes=opt.n_classes) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'c3d': # CHECK HERE model.module.fc = nn.Linear(model.module.fc[0].in_features, opt.n_finetune_classes) else: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() if opt.modality == 'RGB' and opt.model != 'c3d': print("[INFO]: RGB model is used for init model") model = _modify_first_conv_layer( model, 3, 3) ##### Check models trained (3,7,7) or (7,7,7) elif opt.modality == 'Depth': print( "[INFO]: Converting the pretrained model to Depth init model") model = _construct_depth_model(model) print("[INFO]: Done. Flow model ready.") elif opt.modality == 'RGB-D': print( "[INFO]: Converting the pretrained model to RGB+D init model") model = _construct_rgbdepth_model(model) print("[INFO]: Done. RGB-D model ready.") modules = list(model.modules()) first_conv_idx = list( filter(lambda x: isinstance(modules[x], nn.Conv3d), list(range(len(modules)))))[0] conv_layer = modules[first_conv_idx] if conv_layer.kernel_size[0] > opt.sample_duration: model = _modify_first_conv_layer(model, int(opt.sample_duration / 2), 1) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.modality == 'RGB' and opt.model != 'c3d': print("[INFO]: RGB model is used for init model") model = _modify_first_conv_layer(model, 3, 3) elif opt.modality == 'Depth': print( "[INFO]: Converting the pretrained model to Depth init model") model = _construct_depth_model(model) print("[INFO]: Deoth model ready.") elif opt.modality == 'RGB-D': print( "[INFO]: Converting the pretrained model to RGB-D init model") model = _construct_rgbdepth_model(model) print("[INFO]: Done. RGB-D model ready.") modules = list(model.modules()) first_conv_idx = list( filter(lambda x: isinstance(modules[x], nn.Conv3d), list(range(len(modules)))))[0] conv_layer = modules[first_conv_idx] if conv_layer.kernel_size[0] > opt.sample_duration: print("[INFO]: RGB model is used for init model") model = _modify_first_conv_layer(model, int(opt.sample_duration / 2), 1) if opt.model == 'c3d': # CHECK HERE model.fc = nn.Linear(model.fc[0].in_features, model.fc[0].out_features) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters
def generate_model(opt): assert opt.model in [ 'c3d', 'squeezenet', 'mobilenet', 'resnext', 'resnet', 'resnetl', 'shufflenet', 'mobilenetv2', 'shufflenetv2' ] if opt.model == 'c3d': from models.c3d import get_fine_tuning_parameters model = c3d.get_model(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'squeezenet': from models.squeezenet import get_fine_tuning_parameters model = squeezenet.get_model(version=opt.version, num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'shufflenet': from models.shufflenet import get_fine_tuning_parameters model = shufflenet.get_model(groups=opt.groups, width_mult=opt.width_mult, num_classes=opt.n_classes) elif opt.model == 'shufflenetv2': from models.shufflenetv2 import get_fine_tuning_parameters model = shufflenetv2.get_model(num_classes=opt.n_classes, sample_size=opt.sample_size, width_mult=opt.width_mult) elif opt.model == 'mobilenet': from models.mobilenet import get_fine_tuning_parameters model = mobilenet.get_model(num_classes=opt.n_classes, sample_size=opt.sample_size, width_mult=opt.width_mult) elif opt.model == 'mobilenetv2': from models.mobilenetv2 import get_fine_tuning_parameters model = mobilenetv2.get_model(num_classes=opt.n_classes, sample_size=opt.sample_size, width_mult=opt.width_mult) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnetl': assert opt.model_depth in [10] from models.resnetl import get_fine_tuning_parameters if opt.model_depth == 10: model = resnetl.resnetl10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) print("Total number of trainable parameters: ", pytorch_total_params) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path, map_location=torch.device('cpu')) # print(opt.arch) # print(pretrain['arch']) # assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model in [ 'mobilenet', 'mobilenetv2', 'shufflenet', 'shufflenetv2' ]: model.module.classifier = nn.Sequential( nn.Dropout(0.5), nn.Linear(model.module.classifier[1].in_features, opt.n_finetune_classes)) model.module.classifier = model.module.classifier.cuda() elif opt.model == 'squeezenet': model.module.classifier = nn.Sequential( nn.Dropout(p=0.5), nn.Conv3d(model.module.classifier[1].in_channels, opt.n_finetune_classes, kernel_size=1), nn.ReLU(inplace=True), nn.AvgPool3d((1, 4, 4), stride=1)) model.module.classifier = model.module.classifier.cuda() else: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() # model = _modify_first_conv_layer(model) # model = model.cuda() parameters = get_fine_tuning_parameters(model, opt.ft_portion) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model in [ 'mobilenet', 'mobilenetv2', 'shufflenet', 'shufflenetv2' ]: model.module.classifier = nn.Sequential( nn.Dropout(0.9), nn.Linear(model.module.classifier[1].in_features, opt.n_finetune_classes)) elif opt.model == 'squeezenet': model.module.classifier = nn.Sequential( nn.Dropout(p=0.5), nn.Conv3d(model.module.classifier[1].in_channels, opt.n_finetune_classes, kernel_size=1), nn.ReLU(inplace=True), nn.AvgPool3d((1, 4, 4), stride=1)) else: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()
def generate_model(opt): assert opt.mode in ['score', 'feature'] if opt.mode == 'score': last_fc = True elif opt.mode == 'feature': last_fc = False assert opt.model_name in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] if opt.model_name == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] if opt.model_depth == 10: model = resnet.resnet10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 152: model = resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 200: model = resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_name == 'wideresnet': assert opt.model_depth in [50] if opt.model_depth == 50: model = wide_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_name == 'resnext': assert opt.model_depth in [50, 101, 152] if opt.model_depth == 50: model = resnext.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 101: model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 152: model = resnext.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_name == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_name == 'densenet': assert opt.model_depth in [121, 169, 201, 264] if opt.model_depth == 121: model = densenet.densenet121(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 169: model = densenet.densenet169(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 201: model = densenet.densenet201(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) elif opt.model_depth == 264: model = densenet.densenet264(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration, last_fc=last_fc) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) return model
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet', 'i3d', 'i3dv2' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == "i3d": from models.i3dpt import get_fine_tuning_parameters model = i3dpt.I3D(num_classes=opt.n_classes, dropout_prob=0.5) elif opt.model == "i3dv2": from models.I3D_Pytorch import get_fine_tuning_parameters model = I3D_Pytorch.I3D(num_classes=opt.n_classes, dropout_keep_prob=0.5) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) if opt.model != "i3d" and opt.model != "i3dv2": assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) else: pretrain = {"module." + k: v for k, v in pretrain.items()} model_dict = model.state_dict() model_dict.update(pretrain) model.load_state_dict(model_dict) if opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) model.module.classifier = model.module.classifier.cuda() else: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.classifier = nn.Linear(model.classifier.in_features, opt.n_finetune_classes) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()
def main(): #994513477.mp4 995153247.mp4 996259932.mp4 start_time = time.time() video_path = '/home/zengh/Dataset/AIChallenger/group5/995153247.mp4' if os.path.exists(video_path): print("exists!") cap = cv2.VideoCapture(video_path) #15ms duration = (time.time() - start_time) * 1000 print('1 time %.3f ms' % duration) start_time = time.time() print(id(cv2.CAP_PROP_POS_FRAMES)) #cap.set(cv2.CAP_PROP_POS_FRAMES,50) #40ms #print("id",id(cv2.CAP_PROP_POS_FRAMES)) duration = (time.time() - start_time) * 1000 print('2 time %.3f ms' % duration) start_time = time.time() ret, frame = cap.read() #1ms duration = (time.time() - start_time) * 1000 #print("ret",ret) print('3 time %.3f ms' % duration) ''' count = 1 frames = [] while(1): ret, frame = cap.read() if frame is None: break if count % 5 == 0: frames.append(frame) count = count + 1''' #v = pims.Video('/home/zengh/Dataset/AIChallenger/group5/982006190.mp4') #duration = (time.time() - start_time) * 1000 #print('cv video time %.3f ms' % duration) opt = parse_opts() start_time = time.time() model = resnext.resnet101(num_classes=opt.n_finetune_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) model = model.cuda() model = nn.DataParallel(model, device_ids=None) model.load_state_dict( torch.load('./trained_models/best.pth10.tar')['state_dict']) duration = (time.time() - start_time) * 1000 print('restore time %.3f ms' % duration) #model = nn.DataParallel(model) model.eval() rgb_mean = [0.485, 0.456, 0.406] rgb_std = [0.229, 0.224, 0.225] opt.scales = [1] transform_val = Compose([ MultiScaleCornerCrop(opt.scales, opt.sample_size, crop_positions=['c']), ToTensor(), Normalize(rgb_mean, rgb_std), ]) start_time = time.time() clip = video_loader( root_path='/home/zengh/Dataset/AIChallenger/train/group5/567700300', frame_indices=range(3, 19), transform=transform_val) clip = clip.unsqueeze(0) print("clip", clip) duration = (time.time() - start_time) * 1000 print('pic time %.3f ms' % duration) #print("clip",clip.shape) start_time = time.time() indexes = test(clip, model) duration = (time.time() - start_time) * 1000 print('pre time %.3f ms' % duration)
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, model_type=opt.model_type) elif opt.model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, model_type=opt.model_type) elif opt.model_depth == 152: model = resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, model_type=opt.model_type) elif opt.model_depth == 200: model = resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, model_type=opt.model_type) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) if not opt.no_cuda: import os # os.environ['CUDA_VISIBLE_DEVICES'] = f'{opt.cuda_id}' model = model.cuda(device=opt.cuda_id) model = nn.DataParallel(model, device_ids=[0]) # CUDA change if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) print(pretrain['arch']) arch = f'{opt.model}-{opt.model_depth}' # arch = opt.model + '-' + opt.model_depth print(arch) assert arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) model.module.classifier = model.module.classifier.cuda( device=opt.cuda_id) # elif opt.use_quadriplet: # model = EmbeddingModel(model, opt.n_finetune_classes, not opt.no_cuda, opt.cuda_id) else: model.module.fc = nn.Sequential( nn.Dropout(0.4), nn.Linear(model.module.fc.in_features, 512), nn.ReLU6(), nn.Dropout(0.4), nn.Linear(512, 128), nn.ReLU6(), nn.Linear(128, opt.n_finetune_classes)).cuda(device=opt.cuda_id) # model.module.fc = nn.Linear(model.module.fc.in_features, # opt.n_finetune_classes) # model.module.fc = model.module.fc.cuda(device=opt.cuda_id) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) print(len(list(parameters)), 'params to fine tune') return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.classifier = nn.Linear(model.classifier.in_features, opt.n_finetune_classes) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet', 'mobilenet', 'mobilenetv2' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264(num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'mobilenet': from models.mobilenet import get_fine_tuning_parameters model = mobilenet.get_model(num_classes=opt.n_classes, sample_size=opt.sample_size, width_mult=opt.width_mult) elif opt.model == 'mobilenetv2': from models.mobilenetv2 import get_fine_tuning_parameters model = mobilenetv2.get_model(num_classes=opt.n_classes, sample_size=opt.sample_size, width_mult=opt.width_mult) if not opt.no_cuda: if not opt.no_cuda_predict: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) print("Pretrain arch", pretrain['arch']) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) ft_begin_index = opt.ft_begin_index if opt.model in [ 'mobilenet', 'mobilenetv2', 'shufflenet', 'shufflenetv2' ]: model.module.classifier = nn.Sequential( nn.Dropout(0.9), nn.Linear(model.module.classifier[1].in_features, opt.n_finetune_classes)) model.module.classifier = model.module.classifier.cuda() ft_begin_index = 'complete' if ft_begin_index == 0 else 'last_layer' elif opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) model.module.classifier = model.module.classifier.cuda() else: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() print("Finetuning at:", ft_begin_index) parameters = get_fine_tuning_parameters(model, ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) ft_begin_index = opt.ft_begin_index if opt.model in [ 'mobilenet', 'mobilenetv2', 'shufflenet', 'shufflenetv2' ]: model.module.classifier = nn.Sequential( nn.Dropout(0.9), nn.Linear(model.module.classifier[1].in_features, opt.n_finetune_classes)) model.module.classifier = model.module.classifier.cuda() ft_begin_index = 'complete' if ft_begin_index == 0 else 'last_layer' elif opt.model == 'densenet': model.classifier = nn.Linear(model.classifier.in_features, opt.n_finetune_classes) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) print("Finetuning at:", ft_begin_index) parameters = get_fine_tuning_parameters(model, ft_begin_index) return model, parameters return model, model.parameters()
def generate_model(opt): assert opt.model in [ 'resnet', 'resnext' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, isSource = opt.isSource, transfer_module = opt.transfer_module, sourceKind = opt.sourceKind, layer_num = opt.layer_num, multi_source = opt.multi_source) elif opt.model_depth == 18: model = resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, isSource = opt.isSource, transfer_module = opt.transfer_module, sourceKind = opt.sourceKind, layer_num = opt.layer_num, multi_source = opt.multi_source) elif opt.model_depth == 34: model = resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, isSource = opt.isSource, transfer_module = opt.transfer_module, sourceKind = opt.sourceKind, layer_num = opt.layer_num, multi_source = opt.multi_source) elif opt.model_depth == 50: model = resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, isSource = opt.isSource, transfer_module = opt.transfer_module, sourceKind = opt.sourceKind, layer_num = opt.layer_num, multi_source = opt.multi_source) elif opt.model_depth == 101: model = resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, isSource = opt.isSource, transfer_module = opt.transfer_module, sourceKind = opt.sourceKind, layer_num = opt.layer_num, multi_source = opt.multi_source) elif opt.model_depth == 152: model = resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, isSource = opt.isSource, transfer_module = opt.transfer_module, sourceKind = opt.sourceKind, layer_num = opt.layer_num, multi_source = opt.multi_source) elif opt.model_depth == 200: model = resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration, isSource = opt.isSource, transfer_module = opt.transfer_module, sourceKind = opt.sourceKind, layer_num = opt.layer_num, multi_source = opt.multi_source) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, isSource = opt.isSource, transfer_module = opt.transfer_module, sourceKind = opt.sourceKind, layer_num = opt.layer_num, multi_source = opt.multi_source) elif opt.model_depth == 101: model = resnext.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, isSource = opt.isSource, transfer_module = opt.transfer_module, sourceKind = opt.sourceKind, layer_num = opt.layer_num, multi_source = opt.multi_source) elif opt.model_depth == 152: model = resnext.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration, isSource = opt.isSource, transfer_module = opt.transfer_module, sourceKind = opt.sourceKind, layer_num = opt.layer_num, multi_source = opt.multi_source) print(opt.no_cuda) print(type(opt.no_cuda)) if not opt.no_cuda: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) print('loading pretrained model arch', pretrain['arch'], opt.arch) assert opt.arch == pretrain['arch'] pretrained_dict = pretrain['state_dict'] model_dict = model.state_dict() # pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} pretrained_dict = {str.replace(k,'module.',''): v for k,v in pretrained_dict.items()} model_dict.update(pretrained_dict) model.load_state_dict(model_dict) model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.inference == False: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) print(model) return model, parameters elif opt.inference: model = model.cuda() model = nn.DataParallel(model, device_ids=None) return model, model.parameters() else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) print('loading pretrained model arch', pretrain['arch']) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) model = model.cuda() model = nn.DataParallel(model, device_ids=None) return model, parameters return model, model.parameters()
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) model.module.classifier = model.module.classifier.cuda() else: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.classifier = nn.Linear( model.classifier.in_features, opt.n_finetune_classes) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()