Exemplo n.º 1
0
def main():
    global opt, best_prec1

    opt = parser.parse_args()
    opt.logdir = opt.logdir + '/' + opt.name
    logger = 'hi'

    best_prec1 = 0
    print(opt)

    # Initialize the model, criterion and the optimizer
    model = init.load_model(opt)
    model, criterion, optimizer = init.setup(model, opt)
    # Display the model structure
    print(model)

    # Setup trainer and validation
    trainer = train.Trainer(model, criterion, optimizer, opt, logger)
    validator = train.Validator(model, criterion, opt, logger)

    # Load model from a checkpoint if mentioned in opts
    if opt.resume:
        if os.path.isfile(opt.resume):
            model, optimizer, opt, best_prec1 = init.resumer(
                opt, model, optimizer)
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    cudnn.benchmark = True

    # Setup the train and validation data loaders
    dataloader = init_data.load_data(opt)
    train_loader = dataloader.train_loader
    val_loader = dataloader.val_loader

    for epoch in range(opt.start_epoch, opt.epochs):
        utils.adjust_learning_rate(opt, optimizer, epoch)
        print("Starting epoch number:", epoch + 1, "Learning rate:",
              optimizer.param_groups[0]["lr"])

        if opt.testOnly == False:
            # Train the network over the training data
            trainer.train(train_loader, epoch, opt)

        #if opt.tensorboard:
        #logger.scalar_summary('learning_rate', opt.lr, epoch)

        # Measure the validation accuracy
        acc = validator.validate(val_loader, epoch, opt)
        best_prec1 = max(acc, best_prec1)
        if best_prec1 == acc:
            # Save the new model if the accuracy is better than the previous saved model
            init.save_checkpoint(opt, model, optimizer, best_prec1, epoch)

        print('Best accuracy: [{0:.3f}]\t'.format(best_prec1))
Exemplo n.º 2
0
def main():
    global opt, best_prec1

    opt = parser.parse_args()
    opt.logdir = opt.logdir+'/'+opt.name
    logger = None#Logger(opt.logdir)
    opt.lr = opt.maxlr

    print(opt)
    best_prec1 = 0
    cudnn.benchmark = True
    model = init_model.load_model(opt)
    if opt.model_def.startswith('alexnet') or opt.model_def.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    elif opt.ngpus > 1:
        model = torch.nn.DataParallel(model).cuda()
    print(model)
    model, criterion, optimizer = init_model.setup(model,opt)

    trainer = train.Trainer(model, criterion, optimizer, opt, logger)
    validator = train.Validator(model, criterion, opt, logger)

    if opt.resume:
        if os.path.isfile(opt.resume):
            model, optimizer, opt, best_acc = init_model.resumer(opt, model, optimizer)
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    dataloader = init_data.load_data(opt)
    train_loader = dataloader.train_loader
    #print(utils.get_mean_and_std(train_loader))
    val_loader = dataloader.val_loader

    for epoch in range(opt.start_epoch, opt.epochs):
        utils.adjust_learning_rate(opt, optimizer, epoch)
        print("Starting epoch number:",epoch,"Learning rate:", opt.lr)

        if opt.testOnly == False:
            trainer.train(train_loader, epoch, opt)
        if opt.tensorboard:
            logger.scalar_summary('learning_rate', opt.lr, epoch)

        prec1 = validator.validate(val_loader, epoch, opt)
        best_prec1 = max(prec1, best_prec1)
        init_model.save_checkpoint(opt, model, optimizer, best_prec1, epoch)

        print('Best Prec@1: [{0:.3f}]\t'.format(best_prec1))
Exemplo n.º 3
0
def main():
    global opt, best_err1
    opt = parser.parse_args()
    best_err1 = 1000000
    print(opt)

    model = init.load_model(opt)
    model, criterion, optimizer = init.setup(model, opt)
    print(model)

    trainer = train.Trainer(model, criterion, optimizer, opt, writer)
    validator = train.Validator(model, criterion, opt, writer)

    random.seed(opt.seed)
    torch.manual_seed(opt.seed)
    cudnn.deterministic = True

    if opt.resume:
        if os.path.isfile(opt.resume):
            model, optimizer, opt, best_err1 = init.resumer(
                opt, model, optimizer)
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    cudnn.benchmark = True

    dataloader = ld.GazeFollow(opt)

    train_loader = dataloader.train_loader
    val_loader = dataloader.val_loader

    for epoch in range(opt.start_epoch, opt.epochs):
        utils.adjust_learning_rate(opt, optimizer, epoch)
        print("Starting epoch number:", epoch + 1, "Learning rate:",
              optimizer.param_groups[0]["lr"])

        if opt.testOnly == False:
            trainer.train(train_loader, epoch, opt)

        err = validator.validate(val_loader, epoch, opt)
        best_err1 = min(err, best_err1)

        if epoch % 10 == 0:
            init.save_checkpoint(opt, model, optimizer, best_err1, epoch)

        print('Best error: [{0:.3f}]\t'.format(best_err1))
Exemplo n.º 4
0
def main():
    global opt, best_studentprec1
    cudnn.benchmark = True

    opt = parser.parse_args()
    opt.logdir = opt.logdir + '/' + opt.name
    logger = Logger(opt.logdir)

    print(opt)
    best_studentprec1 = 0.0

    print('Loading models...')
    teacher = init.load_model(opt, 'teacher')
    student = init.load_model(opt, 'student')
    discriminator = init.load_model(opt, 'discriminator')
    teacher = init.setup(teacher, opt, 'teacher')
    student = init.setup(student, opt, 'student')
    discriminator = init.setup(discriminator, opt, 'discriminator')

    #Write the code to classify it in the 11th class
    print(teacher)
    print(student)
    print(discriminator)

    advCriterion = nn.BCELoss().cuda()
    similarityCriterion = nn.L1Loss().cuda()
    derivativeCriterion = nn.SmoothL1Loss().cuda()
    discclassifyCriterion = nn.CrossEntropyLoss(size_average=True).cuda()

    studOptim = getOptim(opt, student, 'student')
    discrecOptim = getOptim(opt, discriminator, 'discriminator')

    trainer = train.Trainer(student, teacher, discriminator,
                            discclassifyCriterion, advCriterion,
                            similarityCriterion, derivativeCriterion,
                            studOptim, discrecOptim, opt, logger)
    validator = train.Validator(student, teacher, discriminator, opt, logger)

    #To update. Does not work as of now
    if opt.resume:
        if os.path.isfile(opt.resume):
            model, optimizer, opt, best_prec1 = init.resumer(
                opt, model, optimizer)
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    dataloader = init_data.load_data(opt)
    train_loader = dataloader.train_loader
    val_loader = dataloader.val_loader

    for epoch in range(opt.start_epoch, opt.epochs):
        utils.adjust_learning_rate(opt, studOptim, epoch)
        utils.adjust_learning_rate(opt, discrecOptim, epoch)
        print("Starting epoch number:", epoch + 1, "Learning rate:",
              studOptim.param_groups[0]["lr"])

        if opt.testOnly == False:
            trainer.train(train_loader, epoch, opt)
        if opt.tensorboard:
            logger.scalar_summary('learning_rate', opt.lr, epoch)

        student_prec1 = validator.validate(val_loader, epoch, opt)
        best_studentprec1 = max(student_prec1, best_studentprec1)
        init.save_checkpoint(opt, teacher, student, discriminator, studOptim,
                             discrecOptim, student_prec1, epoch)

        print('Best accuracy: [{0:.3f}]\t'.format(best_studentprec1))