Esempio n. 1
0
def main():
    best_acc = 0
    opt = parse_option()

    # build data loader
    train_loader, val_loader = set_loader(opt)

    # build model and criterion
    model, classifier, criterion = set_model(opt)

    # build optimizer
    # optimizer = set_optimizer(opt, [classifier])
    optimizer = set_optimizer(opt, [classifier, model])

    # training routine
    for epoch in range(1, opt.epochs + 1):
        adjust_learning_rate(opt, optimizer, epoch)

        # train for one epoch
        time1 = time.time()
        loss, acc = train(train_loader, model, classifier, criterion,
                          optimizer, epoch, opt)
        time2 = time.time()
        print('Train epoch {}, total time {:.2f}, accuracy:{:.2f}'.format(
            epoch, time2 - time1, acc))

        # eval for one epoch
        loss, val_acc = validate(val_loader, model, classifier, criterion, opt)
        if val_acc > best_acc:
            best_acc = val_acc

    print('best accuracy: {:.2f}'.format(best_acc))
Esempio n. 2
0
def main():
    best_acc = 0
    opt = parse_option()

    # build data loader
    train_loader, val_loader = set_loader(opt)

    # build model and criterion
    model, criterion = set_model(opt)

    # build optimizer
    optimizer = set_optimizer(opt, model)

    # tensorboard
    writer = SummaryWriter(log_dir=opt.tb_folder, flush_secs=2)

    # training routine
    for epoch in range(1, opt.epochs + 1):
        adjust_learning_rate(opt, optimizer, epoch)

        # train for one epoch
        time1 = time.time()
        loss, train_acc = train(train_loader, model, criterion, optimizer,
                                epoch, opt)
        time2 = time.time()
        print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))

        # tensorboard logger
        writer.add_scalar('train_loss', loss, global_step=epoch)
        writer.add_scalar('train_acc', train_acc, global_step=epoch)
        writer.add_scalar('learning_rate',
                          optimizer.param_groups[0]['lr'],
                          global_step=epoch)

        # evaluation
        loss, val_acc = validate(val_loader, model, criterion, opt)
        writer.add_scalar('val_loss', loss, global_step=epoch)
        writer.add_scalar('val_acc', val_acc, global_step=epoch)

        if val_acc > best_acc:
            best_acc = val_acc

        if epoch % opt.save_freq == 0:
            save_file = os.path.join(
                opt.save_folder, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))
            save_model(model, optimizer, opt, epoch, save_file)

    # save the last model
    save_file = os.path.join(opt.save_folder, 'last.pth')
    save_model(model, optimizer, opt, opt.epochs, save_file)

    print('best accuracy: {:.2f}'.format(best_acc))
def main():
    best_acc = 0
    best_acc5 = 0
    opt = parse_option()

    # build data loader
    train_loader, val_loader = set_loader(opt)

    # build model and criterion
    model, classifier, criterion = set_model(opt)

    # build optimizer
    optimizer = set_optimizer(opt, classifier)

    logger = tb_logger.Logger(logdir=opt.tb_folder, flush_secs=2)

    # training routine
    for epoch in range(1, opt.epochs + 1):
        adjust_learning_rate(opt, optimizer, epoch)

        # train for one epoch
        time1 = time.time()
        loss, acc, acc5 = train(train_loader, model, classifier, criterion,
                                optimizer, epoch, opt)
        time2 = time.time()
        logging.info(
            'Train epoch {}, total time {:.2f}, accuracy:{:.2f}'.format(
                epoch, time2 - time1, acc))

        logger.log_value('classifier/train_loss', loss, epoch)
        logger.log_value('classifier/train_acc1', acc, epoch)
        logger.log_value('classifier/train_acc5', acc5, epoch)

        # eval for one epoch
        loss, val_acc, val_acc5 = validate(val_loader, model, classifier,
                                           criterion, opt)
        logger.log_value('classifier/val_loss', loss, epoch)
        logger.log_value('classifier/val_acc1', val_acc, epoch)
        logger.log_value('classifier/val_acc5', val_acc5, epoch)
        if val_acc > best_acc:
            best_acc = val_acc
            best_acc5 = val_acc5

    logging.info('best accuracy: {:.2f}, accuracy5: {:.2f}'.format(
        best_acc, best_acc5))
Esempio n. 4
0
def main():
    best_acc = 0
    opt = parse_option()

    # build data loader
    train_loader, val_loader = set_loader(opt)

    # build model and criterion
    model, classifier, criterion = set_model(opt)

    # build optimizer
    optimizer = set_optimizer(opt, classifier)

    # tensorboard
    logger = tb_logger.Logger(logdir=opt.tb_folder, flush_secs=2)

    # training routine
    for epoch in range(1, opt.epochs + 1):
        adjust_learning_rate(opt, optimizer, epoch)

        # train for one epoch
        time1 = time.time()
        loss, acc = train(train_loader, model, classifier, criterion,
                          optimizer, epoch, opt)
        time2 = time.time()
        print('Train epoch {}, total time {:.2f}, accuracy:{:.2f}'.format(
            epoch, time2 - time1, acc))

        # eval for one epoch
        loss, val_acc = validate(val_loader, model, classifier, criterion, opt)
        if val_acc > best_acc:
            best_acc = val_acc

        # tensorboard logger
        logger.log_value('loss', loss, epoch)
        logger.log_value('learning_rate', optimizer.param_groups[0]['lr'],
                         epoch)

        if epoch % opt.save_freq == 0:
            save_file = os.path.join(
                opt.save_folder, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))
            save_model(model, optimizer, opt, epoch, save_file, classifier)

    print('best accuracy: {:.2f}'.format(best_acc))
Esempio n. 5
0
def main():
    best_acc = 0
    best_classifier = None
    opt = parse_option()

    # build data loader
    train_loader, val_loader = set_loader(opt)

    # build model and criterion
    model, classifier, criterion = set_model(opt)
    best_classifier = classifier

    # build optimizer
    optimizer = set_optimizer(opt, classifier)

    if opt.eval:
        loss, val_acc = validate(val_loader, model, classifier, criterion, opt)
    else:
        # training routine
        for epoch in range(1, opt.epochs + 1):
            adjust_learning_rate(opt, optimizer, epoch)

            # train for one epoch
            time1 = time.time()
            loss, acc = train(train_loader, model, classifier, criterion,
                              optimizer, epoch, opt)
            time2 = time.time()
            print('Train epoch {}, total time {:.2f}, accuracy:{:.2f}'.format(
                epoch, time2 - time1, acc))

            # eval for one epoch
            loss, val_acc = validate(val_loader, model, classifier, criterion,
                                     opt)
            if val_acc > best_acc:
                best_acc = val_acc
                best_classifier = classifier

        print('best accuracy: {:.2f}'.format(best_acc))

    for epsilon in opt.epsilons:
        loss, acc, adv_acc = adveval(val_loader, model, best_classifier,
                                     criterion, opt, epsilon)
        print('adv accuracy at epsilon {:.2f}: {:.2f}'.format(
            epsilon, adv_acc))
Esempio n. 6
0
def main():
    opt = parse_option()

    # build data loader
    train_loader = set_loader(opt)

    # build model and criterion
    model, criterion = set_model(opt)

    # build optimizer
    optimizer = set_optimizer(opt, model)

    # tensorboard
    logger = tb_logger.Logger(logdir=opt.tb_folder, flush_secs=2)

    # training routine
    for epoch in range(1, opt.epochs + 1):
        adjust_learning_rate(opt, optimizer, epoch)

        # train for one epoch
        time1 = time.time()
        loss = train(train_loader, model, criterion, optimizer, epoch, opt)
        time2 = time.time()
        print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))

        # tensorboard logger
        logger.log_value('loss', loss, epoch)
        logger.log_value(
            'learning_rate', optimizer.param_groups[0]['lr'], epoch)

        if epoch % opt.save_freq == 0:
            save_file = os.path.join(
                opt.save_folder, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))
            save_model(model, optimizer, opt, epoch, save_file)

    # save the last model
    save_file = os.path.join(
        opt.save_folder, 'last.pth')
    save_model(model, optimizer, opt, opt.epochs, save_file)
Esempio n. 7
0
def train(args):

    # get transformations
    train_trainsforms = util.transformation_composer(args, "train", "CheXpert")
    valid_trainsforms = util.transformation_composer(args, "valid", "CheXpert")

    # get dataloader
    train_loader = get_dataloader(args, train_trainsforms, "train", "CheXpert")
    valid_loader = get_dataloader(args, valid_trainsforms, "valid", "CheXpert")

    # get model and put on device 
    model = CheXpertModel(
        model_name=args.model_name, 
        num_classes=14
    )
    if args.device == "cuda":
        model = torch.nn.DataParallel(model, args.gpu_ids)
    model = model.to(args.device)

    # optimizer 
    optimizer = util.set_optimizer(opt=args, model=model)

    # loss function 
    loss_fn = torch.nn.BCEWithLogitsLoss(reduction="mean")

    # define logger
    logger = Logger(
        log_dir=args.log_dir, 
        metrics_name=args.eval_metrics, 
        args=args
    )

    # iterate over epoch
    global_step = 0
    model.train()
    for epoch in range(args.num_epoch):

        # training loop
        for inputs, targets in tqdm.tqdm(train_loader, desc=f"[epoch {epoch}]"):

            # validate every N training iteration
            if global_step % args.iters_per_eval == 0:
                model.eval()
                probs, gt = [], []
                with torch.no_grad():

                    # validation loop
                    for val_inputs, val_targets in valid_loader:

                        batch_logits = model(val_inputs.to(args.device))
                        batch_probs = torch.sigmoid(batch_logits)

                        probs.append(batch_probs.cpu())
                        gt.append(val_targets.cpu())

                # evaluate results 
                metrics = util.evaluate(probs, gt, args.threshold)
                avg_metric, metric_dict = util.aggregate_metrics(metrics)

                # log image
                logger.log_iteration(metric_dict, global_step, "val")
                logger.log_dict(avg_metric, global_step, "val")
                logger.log_image(inputs, global_step)

                # save checkpoint
                logger.save_checkpoint(model, metrics, global_step)

                model.to(args.device)

            model.train()
            with torch.set_grad_enabled(True):

                # Run the minibatch through the model.
                logits = model(inputs.to(args.device))

                # Compute the minibatch loss.
                loss = loss_fn(logits, targets.to(args.device))

                logger.log_dict({"train/loss": loss}, global_step, "train")

                # Perform a backward pass.
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

            global_step += 1 
Esempio n. 8
0
def main():
    best_acc = 0
    opt = parse_option()

    # build data loader
    train_loader, val_loader = set_loader(opt)

    # build model and criterion
    model, criterion = set_model(opt)

    # build optimizer
    optimizer = set_optimizer(opt, model)

    # tensorboard
    logger = tb_logger.Logger(logdir=opt.tb_folder, flush_secs=2)

    if opt.eval:
        loss, acc = validate(val_loader, model, criterion, opt)
        print('clean accuracy: {:.2f}'.format(acc))
        for epsilon in opt.epsilons:
            loss, acc, adv_acc = adveval(val_loader, model, criterion, opt,
                                         epsilon)
            print('adv accuracy at epsilon {:.2f}: {:.2f}'.format(
                epsilon, adv_acc))

    else:
        # training routine
        for epoch in range(1, opt.epochs + 1):
            adjust_learning_rate(opt, optimizer, epoch)

            # train for one epoch
            time1 = time.time()
            loss, train_acc = train(train_loader, model, criterion, optimizer,
                                    epoch, opt)
            time2 = time.time()
            print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))

            # tensorboard logger
            logger.log_value('train_loss', loss, epoch)
            logger.log_value('train_acc', train_acc, epoch)
            logger.log_value('learning_rate', optimizer.param_groups[0]['lr'],
                             epoch)

            # evaluation
            loss, val_acc = validate(val_loader, model, criterion, opt)
            logger.log_value('val_loss', loss, epoch)
            logger.log_value('val_acc', val_acc, epoch)

            if val_acc > best_acc:
                best_acc = val_acc

            if epoch % opt.save_freq == 0:
                save_file = os.path.join(
                    opt.save_folder,
                    'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))
                save_model(model, optimizer, opt, epoch, save_file)

        # save the last model
        save_file = os.path.join(opt.save_folder, 'last.pth')
        save_model(model, optimizer, opt, opt.epochs, save_file)

        print('best accuracy: {:.2f}'.format(best_acc))