workspace.FeedBlob(key, wts[key]) workspace.FeedBlob('data', data) workspace.RunNet(c2_net.net.Proto().name) c2_blobs = {key: workspace.FetchBlob(key) for key in workspace.Blobs()} #-----------------------------------------------------------------------------------------------# torch.backends.cudnn.enabled = False from models import resnet data = torch.from_numpy(data).cuda() # load pretrained weights if args.model=='r50': pth_net = resnet.i3_res50(num_classes=400) key_map = torch.load('pretrained/i3d_r50_kinetics.pth.keymap') elif args.model=='r50_nl': pth_net = resnet.i3_res50_nl(num_classes=400) key_map = torch.load('pretrained/i3d_r50_nl_kinetics.pth.keymap') key_map = {'.'.join(k.split('.')[:-1]): '_'.join(v.split('_')[:-1]) for k, v in key_map.items()} pth_net.cuda().eval() def hook(module, input, output): setattr(module, "_value_hook", output) for name, module in pth_net.named_modules(): module.register_forward_hook(hook) pth_net({'frames':data})
['%s: %.3f' % (k, v.value()[0]) for k, v in loss_meters.items()]) print(log_str) #----------------------------------------------------------------------------------------------------------------------------------------# from data import kinetics from models import resnet if args.mode == 'video': testset = kinetics.KineticsMultiCrop(root='data/kinetics/', split='val', clip_len=32) elif args.mode == 'clip': testset = kinetics.Kinetics(root='data/kinetics/', split='val', clip_len=32) testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=8) net = resnet.i3_res50(num_classes=len(testset.labels)) net.cuda() if args.parallel: net = nn.DataParallel(net) with torch.no_grad(): test()
def get_model( num_classes, model_name='TSN', freeze_layers=True, model_path=None, ): class_counts = (125, 352) segment_count = 8 base_model = 'resnet50' if model_name == 'BLV': backbone_setting = { 'dropout': 0.5, 'pretrained': True, 'alpha': 2, 'depth': 101, 'beta': 4, 'input_channels': 3, 'num_classes': 174, 'dataset': 'st2stv2', 'groups': 64, 'imagenet_blnet_pretrained': False, 'blending_frames': 3 } net = bLVNet_TAM(backbone_setting) elif model_name == 'slowfast': args = parse_args() cfg = load_config(args) cfg.NUM_GPUS = 1 cfg.TRAIN.CHECKPOINT_FILE_PATH = "SLOWFAST_4x16_R50.pkl" net = build_model(cfg) elif model_name == 'TSM': net = get_tsm(num_classes) # net = torch.hub.load(repo, 'TSM', class_counts, segment_count, 'RGB', # base_model=base_model, # pretrained='epic-kitchens') elif model_name == 'I3D': net = resnet.i3_res50(400) else: net = torch.hub.load(repo, model_name, class_counts, segment_count, 'RGB', base_model=base_model, pretrained='epic-kitchens', force_reload=True) if freeze_layers: for param in net.parameters(): param.requires_grad = False if model_name == 'TSN': # or model_name == 'TSM': net.fc_verb = torch.nn.Linear(2048, num_classes) elif model_name == 'TRN': net.consensus.classifiers[0] = torch.nn.Linear(512, num_classes) elif model_name == 'BLV': net.new_fc = torch.nn.Linear(2048, num_classes) elif model_name == 'slowfast': net.head.projection = torch.nn.Linear(2304, num_classes) net.head.act = None elif model_name == 'I3D': net.fc = torch.nn.Linear(2048, num_classes) elif model_name == 'TSM': net.new_fc = torch.nn.Linear(2048, num_classes) if model_path is not None: load_model(net, model_path) if model_name == 'BLV': for param in net.baseline_model.layer4.parameters(): param.requires_grad = True elif model_name in ['TRN', 'TSN', 'TSM']: for param in net.base_model.layer4.parameters(): param.requires_grad = True elif model_name == 'I3D': for param in net.layer4.parameters(): param.requires_grad = True use_gpu = torch.cuda.is_available() if use_gpu: net = net.cuda() return net
encoding='latin')['blobs'] for key in wts: if type(wts[key]) == np.ndarray: workspace.FeedBlob(key, wts[key]) workspace.FeedBlob('data', data) workspace.RunNet(c2_net.net.Proto().name) c2_blobs = {key: workspace.FetchBlob(key) for key in workspace.Blobs()} #-----------------------------------------------------------------------------------------------# torch.backends.cudnn.enabled = False from models import resnet data = torch.from_numpy(data).cuda() pth_net = resnet.i3_res50(num_classes=400).cuda().eval() def hook(module, input, output): setattr(module, "_value_hook", output) for name, module in pth_net.named_modules(): module.register_forward_hook(hook) pth_net({'frames': data}) pth_blobs = {} for name, module in pth_net.named_modules(): try: if len(name) > 0:
#----------------------------------------------------------------------------------------------------------------------------------------# from tensorboardX import SummaryWriter writer = SummaryWriter('%s/tb.log' % args.cv_dir) from data import kinetics from models import resnet trainset = kinetics.Kinetics(root='data/kinetics/', split='train', clip_len=32) trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) net = resnet.i3_res50(len(testset.verbs)) net.cuda() optim_params = list(filter(lambda p: p.requires_grad, net.parameters())) print('Optimizing %d paramters' % len(optim_params)) optimizer = optim.SGD(optim_params, lr=args.lr, weight_decay=args.weight_decay) start_iter = 0 if args.load: checkpoint = torch.load(args.load, map_location='cpu') start_iter = checkpoint['iter'] net.load_state_dict(checkpoint['net']) optimizer.load_state_dict(checkpoint['optimizer']) print('Loaded checkpoint from %s' % os.path.basename(args.load)) if args.parallel: