def main():
    global opt, best_prec1

    opt = parser.parse_args()
    opt.logdir = opt.logdir + '/' + opt.name
    logger = 'hi'

    best_prec1 = 0
    print(opt)

    # Initialize the model, criterion and the optimizer
    model = init.load_model(opt)
    model, criterion, optimizer = init.setup(model, opt)
    # Display the model structure
    print(model)

    # Setup trainer and validation
    trainer = train.Trainer(model, criterion, optimizer, opt, logger)
    validator = train.Validator(model, criterion, opt, logger)

    # Load model from a checkpoint if mentioned in opts
    if opt.resume:
        if os.path.isfile(opt.resume):
            model, optimizer, opt, best_prec1 = init.resumer(
                opt, model, optimizer)
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    cudnn.benchmark = True

    # Setup the train and validation data loaders
    dataloader = init_data.load_data(opt)
    train_loader = dataloader.train_loader
    val_loader = dataloader.val_loader

    for epoch in range(opt.start_epoch, opt.epochs):
        utils.adjust_learning_rate(opt, optimizer, epoch)
        print("Starting epoch number:", epoch + 1, "Learning rate:",
              optimizer.param_groups[0]["lr"])

        if opt.testOnly == False:
            # Train the network over the training data
            trainer.train(train_loader, epoch, opt)

        #if opt.tensorboard:
        #logger.scalar_summary('learning_rate', opt.lr, epoch)

        # Measure the validation accuracy
        acc = validator.validate(val_loader, epoch, opt)
        best_prec1 = max(acc, best_prec1)
        if best_prec1 == acc:
            # Save the new model if the accuracy is better than the previous saved model
            init.save_checkpoint(opt, model, optimizer, best_prec1, epoch)

        print('Best accuracy: [{0:.3f}]\t'.format(best_prec1))
Example #2
0
def main():

    # Dataloaders for GOO
    batch_size = args.batch_size

    print('==> Loading Train Dataset')
    train_set = GooDataset(args.train_dir, args.train_annotation, 'train', use_gazemask=args.gazemask)
    train_data_loader = DataLoader(train_set, batch_size=batch_size,
                                   shuffle=True, num_workers=16)
    
    if args.test_dir is not None:
        print('==> Loading Test Dataset')
        test_set = GooDataset(args.test_dir, args.test_annotation, 'test')
        test_data_loader = DataLoader(test_set, batch_size=batch_size//2,
                                    shuffle=False, num_workers=8)

    # Loads model
    net = GazeNet()
    net.cuda()

    # Hyperparameters
    start_epoch = 0
    max_epoch = 25
    learning_rate = args.init_lr

    # Initializes Optimizer
    gaze_opt = GazeOptimizer(net, learning_rate)
    optimizer = gaze_opt.getOptimizer(start_epoch)

    # Resuming Training
    resume_training = args.resume_training
    if resume_training:
        net, optimizer, start_epoch = resume_checkpoint(net, optimizer, args.resume_path)       
        if args.test_dir is not None: 
            test(net, test_data_loader,logger)

    for epoch in range(start_epoch, max_epoch):
        
        # Update optimizer
        optimizer = gaze_opt.getOptimizer(epoch)

        # Train model
        train(net, train_data_loader, optimizer, epoch, logger)

        # Save model and optimizer at the last 5 epochs
        if epoch > max_epoch-5:
            save_path = args.save_model_dir
            save_checkpoint(net, optimizer, epoch+1, save_path)
        
        # Evaluate model
        if args.test_dir is not None:
            test(net, test_data_loader, logger)
Example #3
0
def main():
    global opt, best_prec1

    opt = parser.parse_args()
    opt.logdir = opt.logdir+'/'+opt.name
    logger = None#Logger(opt.logdir)
    opt.lr = opt.maxlr

    print(opt)
    best_prec1 = 0
    cudnn.benchmark = True
    model = init_model.load_model(opt)
    if opt.model_def.startswith('alexnet') or opt.model_def.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    elif opt.ngpus > 1:
        model = torch.nn.DataParallel(model).cuda()
    print(model)
    model, criterion, optimizer = init_model.setup(model,opt)

    trainer = train.Trainer(model, criterion, optimizer, opt, logger)
    validator = train.Validator(model, criterion, opt, logger)

    if opt.resume:
        if os.path.isfile(opt.resume):
            model, optimizer, opt, best_acc = init_model.resumer(opt, model, optimizer)
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    dataloader = init_data.load_data(opt)
    train_loader = dataloader.train_loader
    #print(utils.get_mean_and_std(train_loader))
    val_loader = dataloader.val_loader

    for epoch in range(opt.start_epoch, opt.epochs):
        utils.adjust_learning_rate(opt, optimizer, epoch)
        print("Starting epoch number:",epoch,"Learning rate:", opt.lr)

        if opt.testOnly == False:
            trainer.train(train_loader, epoch, opt)
        if opt.tensorboard:
            logger.scalar_summary('learning_rate', opt.lr, epoch)

        prec1 = validator.validate(val_loader, epoch, opt)
        best_prec1 = max(prec1, best_prec1)
        init_model.save_checkpoint(opt, model, optimizer, best_prec1, epoch)

        print('Best Prec@1: [{0:.3f}]\t'.format(best_prec1))
def main():
    global opt, best_err1
    opt = parser.parse_args()
    best_err1 = 1000000
    print(opt)

    model = init.load_model(opt)
    model, criterion, optimizer = init.setup(model, opt)
    print(model)

    trainer = train.Trainer(model, criterion, optimizer, opt, writer)
    validator = train.Validator(model, criterion, opt, writer)

    random.seed(opt.seed)
    torch.manual_seed(opt.seed)
    cudnn.deterministic = True

    if opt.resume:
        if os.path.isfile(opt.resume):
            model, optimizer, opt, best_err1 = init.resumer(
                opt, model, optimizer)
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    cudnn.benchmark = True

    dataloader = ld.GazeFollow(opt)

    train_loader = dataloader.train_loader
    val_loader = dataloader.val_loader

    for epoch in range(opt.start_epoch, opt.epochs):
        utils.adjust_learning_rate(opt, optimizer, epoch)
        print("Starting epoch number:", epoch + 1, "Learning rate:",
              optimizer.param_groups[0]["lr"])

        if opt.testOnly == False:
            trainer.train(train_loader, epoch, opt)

        err = validator.validate(val_loader, epoch, opt)
        best_err1 = min(err, best_err1)

        if epoch % 10 == 0:
            init.save_checkpoint(opt, model, optimizer, best_err1, epoch)

        print('Best error: [{0:.3f}]\t'.format(best_err1))
Example #5
0
def main():
    global opt, best_err1
    opt = parser.parse_args()
    best_err1 = 1000000
    print(opt)
    model = tracknet.Net(opt)
    if opt.cuda:
        model = model.cuda()

    model, criterion, optimizer = init.setup(model, opt)
    print(model)

    trainer = train.Trainer(model, criterion, optimizer, opt)
    # validator = train.Validator(model, criterion, opt)
    if opt.resume:
        if os.path.isfile(opt.resume):
            model, optimizer, opt, best_err1 = init.resumer(
                opt, model, optimizer)
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    cudnn.benchmark = True
    dataloader = ld.SynthLoader(opt)
    train_loader = dataloader.train_loader

    for epoch in range(opt.start_epoch, opt.epochs):
        utils.adjust_learning_rate(opt, optimizer, epoch)
        print("Starting epoch number:", epoch + 1, "Learning rate:",
              optimizer.param_groups[0]["lr"])
        trainer.train(train_loader, epoch, opt)

        # err = validator.validate(val_loader, epoch, opt)
        # best_err1 = min(err, best_err1)
        # print('Best error: [{0:.3f}]\t'.format(best_err1))
        if epoch % 3 == 0 and epoch > 0 and opt.tosave == True:
            init.save_checkpoint(opt, model, optimizer, best_err1, epoch)
Example #6
0
def main():
    # transform = _get_transform(args.input_resolution)

    # Prepare data
    print("Loading Data")

    batch_size = args.batch_size
    train_set = GazeDataset(args.train_dir, args.train_annotation, 'train')
    train_data_loader = DataLoader(dataset=train_set,
                                   batch_size=batch_size,
                                   shuffle=False,
                                   num_workers=8)

    if args.test_dir is not None:
        print('==> Loading Test Dataset')
        test_set = GazeDataset(args.test_dir, args.test_annotation, 'test')
        test_data_loader = DataLoader(test_set,
                                      batch_size=1,
                                      shuffle=False,
                                      num_workers=0)  # half train batch

    # Loads model
    print("Constructing model")
    net = ModelSpatial()
    net.cuda()
    # net.cuda().to(device)

    # Hyperparameters
    start_epoch = 0
    # max_epoch = 25
    max_epoch = 45

    learning_rate = args.init_lr

    # Initial weights chong
    if args.init_weights:
        model_dict = net.state_dict()
        pretrained_dict = torch.load(args.init_weights)
        pretrained_dict = pretrained_dict['model']
        model_dict.update(pretrained_dict)
        net.load_state_dict(model_dict)

    # Initializes Optimizer
    gaze_opt = GazeOptimizer(net, learning_rate)
    optimizer = gaze_opt.getOptimizer(start_epoch)

    # Resuming Training
    resume_training = args.resume_training
    print(resume_training)
    if resume_training:
        net, optimizer, start_epoch = resume_checkpoint(
            net, optimizer, args.resume_path)
        if args.test_dir is not None:
            test(net, test_data_loader, logger)

    for epoch in range(start_epoch, max_epoch):

        # Update optimizer
        optimizer = gaze_opt.getOptimizer(epoch)

        # Train model
        train(net, train_data_loader, optimizer, epoch, logger)

        # # Save model and optimizer at the last 5 epochs
        if epoch % 4 == 0:
            save_path = './saved_models_gazefollow/temp_chong/'
            save_checkpoint(net, optimizer, epoch + 1, save_path)
def main():
    global opt, best_prec1
    opt = parser.parse_args()
    print(opt)

    # Data loading
    train_transform = custom_transforms.Compose([
        custom_transforms.RandomHorizontalFlip(),
        custom_transforms.RandomScaleCrop(),
        custom_transforms.ArrayToTensor(),
        custom_transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ])
    valid_transform = custom_transforms.Compose([
        custom_transforms.ArrayToTensor(),
        custom_transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ])
    print('Loading scenes in', opt.data_dir)
    train_set = SequenceFolder(opt.data_dir,
                               transform=train_transform,
                               seed=opt.seed,
                               train=True,
                               sequence_length=opt.sequence_length)

    val_set = ValidationSet(opt.data_dir, transform=valid_transform)

    print(len(train_set), 'samples found')
    print(len(val_set), 'samples found')

    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.workers,
                                               pin_memory=True)
    # val_loader = torch.utils.data.DataLoader(val_set, batch_size=opt.batch_size,
    #                                             shuffle=False, num_workers=opt.workers,
    #                                             pin_memory=True)
    if opt.epoch == 0:
        opt.epoch_size = len(train_loader)
    # Done loading

    disp_model = dispnet.DispNet().cuda()
    pose_model = posenet.PoseNet().cuda()
    disp_model, pose_model, optimizer = init.setup(disp_model, pose_model, opt)
    print(disp_model, pose_model)
    trainer = train.Trainer(disp_model, pose_model, optimizer, opt)
    if opt.resume:
        if os.path.isfile(opt.resume):
            # disp_model, pose_model, optimizer, opt, best_prec1 = init.resumer(opt, disp_model, pose_model, optimizer)
            disp_model, pose_model, optimizer, opt = init.resumer(
                opt, disp_model, pose_model, optimizer)
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    cudnn.benchmark = True
    for epoch in range(opt.start_epoch, opt.epochs):
        utils.adjust_learning_rate(opt, optimizer, epoch)
        print("Starting epoch number:", epoch + 1, "Learning rate:",
              optimizer.param_groups[0]["lr"])
        if opt.testOnly == False:
            trainer.train(train_loader, epoch, opt)
        # init.save_checkpoint(opt, disp_model, pose_model, optimizer, best_prec1, epoch)
        init.save_checkpoint(opt, disp_model, pose_model, optimizer, epoch)
Example #8
0
def main():
    global opt, best_studentprec1
    cudnn.benchmark = True

    opt = parser.parse_args()
    opt.logdir = opt.logdir + '/' + opt.name
    logger = Logger(opt.logdir)

    print(opt)
    best_studentprec1 = 0.0

    print('Loading models...')
    teacher = init.load_model(opt, 'teacher')
    student = init.load_model(opt, 'student')
    discriminator = init.load_model(opt, 'discriminator')
    teacher = init.setup(teacher, opt, 'teacher')
    student = init.setup(student, opt, 'student')
    discriminator = init.setup(discriminator, opt, 'discriminator')

    #Write the code to classify it in the 11th class
    print(teacher)
    print(student)
    print(discriminator)

    advCriterion = nn.BCELoss().cuda()
    similarityCriterion = nn.L1Loss().cuda()
    derivativeCriterion = nn.SmoothL1Loss().cuda()
    discclassifyCriterion = nn.CrossEntropyLoss(size_average=True).cuda()

    studOptim = getOptim(opt, student, 'student')
    discrecOptim = getOptim(opt, discriminator, 'discriminator')

    trainer = train.Trainer(student, teacher, discriminator,
                            discclassifyCriterion, advCriterion,
                            similarityCriterion, derivativeCriterion,
                            studOptim, discrecOptim, opt, logger)
    validator = train.Validator(student, teacher, discriminator, opt, logger)

    #To update. Does not work as of now
    if opt.resume:
        if os.path.isfile(opt.resume):
            model, optimizer, opt, best_prec1 = init.resumer(
                opt, model, optimizer)
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    dataloader = init_data.load_data(opt)
    train_loader = dataloader.train_loader
    val_loader = dataloader.val_loader

    for epoch in range(opt.start_epoch, opt.epochs):
        utils.adjust_learning_rate(opt, studOptim, epoch)
        utils.adjust_learning_rate(opt, discrecOptim, epoch)
        print("Starting epoch number:", epoch + 1, "Learning rate:",
              studOptim.param_groups[0]["lr"])

        if opt.testOnly == False:
            trainer.train(train_loader, epoch, opt)
        if opt.tensorboard:
            logger.scalar_summary('learning_rate', opt.lr, epoch)

        student_prec1 = validator.validate(val_loader, epoch, opt)
        best_studentprec1 = max(student_prec1, best_studentprec1)
        init.save_checkpoint(opt, teacher, student, discriminator, studOptim,
                             discrecOptim, student_prec1, epoch)

        print('Best accuracy: [{0:.3f}]\t'.format(best_studentprec1))