def main(): global opt, best_prec1 opt = parser.parse_args() opt.logdir = opt.logdir + '/' + opt.name logger = 'hi' best_prec1 = 0 print(opt) # Initialize the model, criterion and the optimizer model = init.load_model(opt) model, criterion, optimizer = init.setup(model, opt) # Display the model structure print(model) # Setup trainer and validation trainer = train.Trainer(model, criterion, optimizer, opt, logger) validator = train.Validator(model, criterion, opt, logger) # Load model from a checkpoint if mentioned in opts if opt.resume: if os.path.isfile(opt.resume): model, optimizer, opt, best_prec1 = init.resumer( opt, model, optimizer) else: print("=> no checkpoint found at '{}'".format(opt.resume)) cudnn.benchmark = True # Setup the train and validation data loaders dataloader = init_data.load_data(opt) train_loader = dataloader.train_loader val_loader = dataloader.val_loader for epoch in range(opt.start_epoch, opt.epochs): utils.adjust_learning_rate(opt, optimizer, epoch) print("Starting epoch number:", epoch + 1, "Learning rate:", optimizer.param_groups[0]["lr"]) if opt.testOnly == False: # Train the network over the training data trainer.train(train_loader, epoch, opt) #if opt.tensorboard: #logger.scalar_summary('learning_rate', opt.lr, epoch) # Measure the validation accuracy acc = validator.validate(val_loader, epoch, opt) best_prec1 = max(acc, best_prec1) if best_prec1 == acc: # Save the new model if the accuracy is better than the previous saved model init.save_checkpoint(opt, model, optimizer, best_prec1, epoch) print('Best accuracy: [{0:.3f}]\t'.format(best_prec1))
def main(): global opt, best_prec1 opt = parser.parse_args() opt.logdir = opt.logdir+'/'+opt.name logger = None#Logger(opt.logdir) opt.lr = opt.maxlr print(opt) best_prec1 = 0 cudnn.benchmark = True model = init_model.load_model(opt) if opt.model_def.startswith('alexnet') or opt.model_def.startswith('vgg'): model.features = torch.nn.DataParallel(model.features) model.cuda() elif opt.ngpus > 1: model = torch.nn.DataParallel(model).cuda() print(model) model, criterion, optimizer = init_model.setup(model,opt) trainer = train.Trainer(model, criterion, optimizer, opt, logger) validator = train.Validator(model, criterion, opt, logger) if opt.resume: if os.path.isfile(opt.resume): model, optimizer, opt, best_acc = init_model.resumer(opt, model, optimizer) else: print("=> no checkpoint found at '{}'".format(opt.resume)) dataloader = init_data.load_data(opt) train_loader = dataloader.train_loader #print(utils.get_mean_and_std(train_loader)) val_loader = dataloader.val_loader for epoch in range(opt.start_epoch, opt.epochs): utils.adjust_learning_rate(opt, optimizer, epoch) print("Starting epoch number:",epoch,"Learning rate:", opt.lr) if opt.testOnly == False: trainer.train(train_loader, epoch, opt) if opt.tensorboard: logger.scalar_summary('learning_rate', opt.lr, epoch) prec1 = validator.validate(val_loader, epoch, opt) best_prec1 = max(prec1, best_prec1) init_model.save_checkpoint(opt, model, optimizer, best_prec1, epoch) print('Best Prec@1: [{0:.3f}]\t'.format(best_prec1))
def main(): global opt, best_err1 opt = parser.parse_args() best_err1 = 1000000 print(opt) model = init.load_model(opt) model, criterion, optimizer = init.setup(model, opt) print(model) trainer = train.Trainer(model, criterion, optimizer, opt, writer) validator = train.Validator(model, criterion, opt, writer) random.seed(opt.seed) torch.manual_seed(opt.seed) cudnn.deterministic = True if opt.resume: if os.path.isfile(opt.resume): model, optimizer, opt, best_err1 = init.resumer( opt, model, optimizer) else: print("=> no checkpoint found at '{}'".format(opt.resume)) cudnn.benchmark = True dataloader = ld.GazeFollow(opt) train_loader = dataloader.train_loader val_loader = dataloader.val_loader for epoch in range(opt.start_epoch, opt.epochs): utils.adjust_learning_rate(opt, optimizer, epoch) print("Starting epoch number:", epoch + 1, "Learning rate:", optimizer.param_groups[0]["lr"]) if opt.testOnly == False: trainer.train(train_loader, epoch, opt) err = validator.validate(val_loader, epoch, opt) best_err1 = min(err, best_err1) if epoch % 10 == 0: init.save_checkpoint(opt, model, optimizer, best_err1, epoch) print('Best error: [{0:.3f}]\t'.format(best_err1))
def main(): global opt, best_err1 opt = parser.parse_args() best_err1 = 1000000 print(opt) model = tracknet.Net(opt) if opt.cuda: model = model.cuda() model, criterion, optimizer = init.setup(model, opt) print(model) trainer = train.Trainer(model, criterion, optimizer, opt) # validator = train.Validator(model, criterion, opt) if opt.resume: if os.path.isfile(opt.resume): model, optimizer, opt, best_err1 = init.resumer( opt, model, optimizer) else: print("=> no checkpoint found at '{}'".format(opt.resume)) cudnn.benchmark = True dataloader = ld.SynthLoader(opt) train_loader = dataloader.train_loader for epoch in range(opt.start_epoch, opt.epochs): utils.adjust_learning_rate(opt, optimizer, epoch) print("Starting epoch number:", epoch + 1, "Learning rate:", optimizer.param_groups[0]["lr"]) trainer.train(train_loader, epoch, opt) # err = validator.validate(val_loader, epoch, opt) # best_err1 = min(err, best_err1) # print('Best error: [{0:.3f}]\t'.format(best_err1)) if epoch % 3 == 0 and epoch > 0 and opt.tosave == True: init.save_checkpoint(opt, model, optimizer, best_err1, epoch)
def main(): global opt, best_prec1 opt = parser.parse_args() print(opt) # Data loading train_transform = custom_transforms.Compose([ custom_transforms.RandomHorizontalFlip(), custom_transforms.RandomScaleCrop(), custom_transforms.ArrayToTensor(), custom_transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) valid_transform = custom_transforms.Compose([ custom_transforms.ArrayToTensor(), custom_transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) print('Loading scenes in', opt.data_dir) train_set = SequenceFolder(opt.data_dir, transform=train_transform, seed=opt.seed, train=True, sequence_length=opt.sequence_length) val_set = ValidationSet(opt.data_dir, transform=valid_transform) print(len(train_set), 'samples found') print(len(val_set), 'samples found') train_loader = torch.utils.data.DataLoader(train_set, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers, pin_memory=True) # val_loader = torch.utils.data.DataLoader(val_set, batch_size=opt.batch_size, # shuffle=False, num_workers=opt.workers, # pin_memory=True) if opt.epoch == 0: opt.epoch_size = len(train_loader) # Done loading disp_model = dispnet.DispNet().cuda() pose_model = posenet.PoseNet().cuda() disp_model, pose_model, optimizer = init.setup(disp_model, pose_model, opt) print(disp_model, pose_model) trainer = train.Trainer(disp_model, pose_model, optimizer, opt) if opt.resume: if os.path.isfile(opt.resume): # disp_model, pose_model, optimizer, opt, best_prec1 = init.resumer(opt, disp_model, pose_model, optimizer) disp_model, pose_model, optimizer, opt = init.resumer( opt, disp_model, pose_model, optimizer) else: print("=> no checkpoint found at '{}'".format(opt.resume)) cudnn.benchmark = True for epoch in range(opt.start_epoch, opt.epochs): utils.adjust_learning_rate(opt, optimizer, epoch) print("Starting epoch number:", epoch + 1, "Learning rate:", optimizer.param_groups[0]["lr"]) if opt.testOnly == False: trainer.train(train_loader, epoch, opt) # init.save_checkpoint(opt, disp_model, pose_model, optimizer, best_prec1, epoch) init.save_checkpoint(opt, disp_model, pose_model, optimizer, epoch)
def main(): global opt, best_studentprec1 cudnn.benchmark = True opt = parser.parse_args() opt.logdir = opt.logdir + '/' + opt.name logger = Logger(opt.logdir) print(opt) best_studentprec1 = 0.0 print('Loading models...') teacher = init.load_model(opt, 'teacher') student = init.load_model(opt, 'student') discriminator = init.load_model(opt, 'discriminator') teacher = init.setup(teacher, opt, 'teacher') student = init.setup(student, opt, 'student') discriminator = init.setup(discriminator, opt, 'discriminator') #Write the code to classify it in the 11th class print(teacher) print(student) print(discriminator) advCriterion = nn.BCELoss().cuda() similarityCriterion = nn.L1Loss().cuda() derivativeCriterion = nn.SmoothL1Loss().cuda() discclassifyCriterion = nn.CrossEntropyLoss(size_average=True).cuda() studOptim = getOptim(opt, student, 'student') discrecOptim = getOptim(opt, discriminator, 'discriminator') trainer = train.Trainer(student, teacher, discriminator, discclassifyCriterion, advCriterion, similarityCriterion, derivativeCriterion, studOptim, discrecOptim, opt, logger) validator = train.Validator(student, teacher, discriminator, opt, logger) #To update. Does not work as of now if opt.resume: if os.path.isfile(opt.resume): model, optimizer, opt, best_prec1 = init.resumer( opt, model, optimizer) else: print("=> no checkpoint found at '{}'".format(opt.resume)) dataloader = init_data.load_data(opt) train_loader = dataloader.train_loader val_loader = dataloader.val_loader for epoch in range(opt.start_epoch, opt.epochs): utils.adjust_learning_rate(opt, studOptim, epoch) utils.adjust_learning_rate(opt, discrecOptim, epoch) print("Starting epoch number:", epoch + 1, "Learning rate:", studOptim.param_groups[0]["lr"]) if opt.testOnly == False: trainer.train(train_loader, epoch, opt) if opt.tensorboard: logger.scalar_summary('learning_rate', opt.lr, epoch) student_prec1 = validator.validate(val_loader, epoch, opt) best_studentprec1 = max(student_prec1, best_studentprec1) init.save_checkpoint(opt, teacher, student, discriminator, studOptim, discrecOptim, student_prec1, epoch) print('Best accuracy: [{0:.3f}]\t'.format(best_studentprec1))