def run(cls, model, criterion, epoch, args): task = cls(model, epoch, args) loader, = get_dataset(args, splits=('val', ), dataset=args.dataset) model = FeatureExtractorWrapper(model, args) # model = set_distributed_backend(model, args) model.eval() return task.stabilize_all(loader, model, epoch, args)
def main(): global args, best_top1 args = parse() if not args.no_logger: tee.Tee(args.cache + '/log.txt') print(vars(args)) seed(args.manual_seed) model, criterion, optimizer = create_model(args) if args.resume: best_top1 = checkpoints.load(args, model, optimizer) print(model) trainer = train.Trainer() loaders = get_dataset(args) train_loader = loaders[0] if args.evaluate: scores = validate(trainer, loaders, model, criterion, args) checkpoints.score_file(scores, "{}/model_000.txt".format(args.cache)) return for epoch in range(args.start_epoch, args.epochs): if args.distributed: trainer.train_sampler.set_epoch(epoch) scores = {} scores.update(trainer.train(train_loader, model, criterion, optimizer, epoch, args)) scores.update(validate(trainer, loaders, model, criterion, args, epoch)) is_best = scores[args.metric] > best_top1 best_top1 = max(scores[args.metric], best_top1) checkpoints.save(epoch, args, model, optimizer, is_best, scores, args.metric) if not args.nopdb: pdb.set_trace()
def run(cls, model, criterion, epoch, args): model = ActorObserverClassifierWrapper(model, args) model = set_distributed_backend(model, args) criterion = DefaultCriterion(args) task = cls(model, epoch, args) loader, = get_dataset(args, splits=('val_video', ), dataset=args.actor_observer_classification_task_dataset) return task.validate_video(loader, model, criterion, epoch, args)
def simpletest1(): # test if the code can learn a simple sequence opt = parse() opts(opt) epochs = 40 train_loader, val_loader, valvideo_loader = get_dataset(opt) trainer = train.Trainer() basemodel = nn.Linear(100, 5) model = AsyncTFBase(basemodel, 100, opt).cuda() criterion = AsyncTFCriterion(opt).cuda() optimizer = torch.optim.SGD(model.parameters(), opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay) epoch = -1 for i in range(epochs): top1, _ = trainer.train(train_loader, model, criterion, optimizer, i, opt) print('cls weights: {}, aa weights: {}'.format( model.mA.parameters().next().norm().data[0], model.mAAa.parameters().next().norm().data[0])) top1, _ = trainer.validate(train_loader, model, criterion, epochs, opt) for i in range(5): top1val, _ = trainer.validate(val_loader, model, criterion, epochs + i, opt) print('top1val: {}'.format(top1val)) ap = trainer.validate_video(valvideo_loader, model, criterion, epoch, opt) return top1, top1val, ap
def main(): best_score = 0 args = parse() if not args.no_logger: tee.Tee(args.cache + '/log.txt') print(vars(args)) print('experiment folder: {}'.format(experiment_folder())) print('git hash: {}'.format(get_script_dir_commit_hash())) seed(args.manual_seed) cudnn.benchmark = not args.disable_cudnn_benchmark cudnn.enabled = not args.disable_cudnn metrics = get_metrics(args.metrics) tasks = get_tasks(args.tasks) model, criterion = get_model(args) if args.optimizer == 'sgd': optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) elif args.optimizer == 'adam': optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay) else: assert False, "invalid optimizer" if args.resume: best_score = checkpoints.load(args, model, optimizer) print(model) trainer = train.Trainer() train_loader, val_loader = get_dataset(args) if args.evaluate: scores = validate(trainer, val_loader, model, criterion, args, metrics, tasks, -1) print(scores) score_file(scores, "{}/model_999.txt".format(args.cache)) return if args.warmups > 0: for i in range(args.warmups): print('warmup {}'.format(i)) trainer.validate(train_loader, model, criterion, -1, metrics, args) for epoch in range(args.start_epoch, args.epochs): if args.distributed: trainer.train_sampler.set_epoch(epoch) scores = {} scores.update( trainer.train(train_loader, model, criterion, optimizer, epoch, metrics, args)) scores.update( validate(trainer, val_loader, model, criterion, args, metrics, tasks, epoch)) is_best = scores[args.metric] > best_score best_score = max(scores[args.metric], best_score) checkpoints.save(epoch, args, model, optimizer, is_best, scores, args.metric)
def run(cls, model, criterion, epoch, args): task = cls(model, epoch, args) train_loader, val_loader = get_dataset(args, splits=('train', 'val'), dataset=args.dataset) model.eval() task.visualize_all(train_loader, model, epoch, args, 'train') task.visualize_all(val_loader, model, epoch, args, 'val') return {'visualization_task': args.cache}
def run(cls, model, criterion, epoch, args): model = ActorObserverClassifierWrapper(model, args) model = set_distributed_backend(model, args) criterion = DefaultCriterion(args) task = cls(model, epoch, args) newargs = copy.deepcopy(args) if ';' in args.train_file: vars(newargs).update({ 'train_file': args.train_file.split(';')[1], 'val_file': args.val_file.split(';')[1], 'data': args.data.split(';')[1] }) if '3d' in args.arch: loader, = get_dataset(newargs, splits=('val_video', ), dataset='charades_video') else: loader, = get_dataset(newargs, splits=('val_video', ), dataset='charades') return task.validate_video(loader, model, criterion, epoch, args)
def run(cls, model, criterion, epoch, args): task = cls(model, epoch, args) loader, = get_dataset(args, splits=('val', ), dataset=args.dataset) model = model.module model.eval() return task.stabilize_all(loader, model, epoch, args)
def run(cls, model, criterion, epoch, args): task = cls(model, epoch, args) loader, = get_dataset(args, splits=('val_video', )) return task.validate_video(loader, model, criterion, epoch, args)