Exemple #1
0
def validate(epoch, val_loader, criterion, model, args):
    """"""
    batch_time = AverageMeter(name='batch_time')
    loss_meter = AverageMeter(name='losses')
    mapmeter = mAPMeter()
    num_samples = len(val_loader)

    model.eval()
    end = time.time()
    for batch_idx, (data, target) in enumerate(val_loader):
        bs, n_crops, c, h, w = data.size()
        data = data.view(-1, c, h, w)

        if args.cuda:
            data = data.cuda(non_blocking=True)
            target = target.cuda(non_blocking=True)

        output = model(data)
        output = output.view(bs, n_crops, -1).mean(1)
        loss = criterion(output, target)

        loss_meter.update(loss.item(), data.size(0))
        mapmeter.update(output, target)

        if batch_idx % args.log_interval == 0 and batch_idx > 0:
            log_progress('Validation', epoch, args.num_epochs, batch_idx, num_samples, batch_time, loss_meter, mapmeter)

    if epoch == args.num_epochs:
        del data, target
        torch.cuda.empty_cache()

    return loss_meter.avg
Exemple #2
0
def main():
    torch.backends.cudnn.benchmark = True

    args = parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    # Data loading
    loaders = XRayLoaders(data_dir=args.data, batch_size=args.batch_size)
    val_loader = loaders.val_loader(imagetxt=args.valtxt)

    encoder = DenseNet121()
    model = TransferNet(encoder)

    if args.resume:
        if os.path.isfile(args.savefile):
            print("=> loading checkpoint '{}'".format(args.savefile))
            checkpoint = torch.load(args.savefile)
            # must make sure the save states align.
            save_state = get_save_state(checkpoint)
            load_save_state(model, save_state)
            print("=> loaded checkpoint '{}'".format(args.savefile))
        else:
            print("=> no checkpoint found at '{}'".format(args.savefile))

    if args.parallel:
        model = nn.DataParallel(model)
        model = model.cuda()

    if args.cuda and not args.parallel:
        model.cuda()

    optimizer = optim.Adadelta(model.parameters(), lr=args.lr)

    criterion = nn.BCEWithLogitsLoss(size_average=True)
    if args.cuda:
        criterion.cuda()

    val_meters = {
        'val_loss': AverageMeter(name='valloss'),
        'val_time': AverageMeter(name='valtime'),
        'val_mavep': mAPMeter(),
        'val_accuracy': AverageMeter(name='valaccuracy')
    }

    val_loss, val_map = validate(val_loader, criterion, model, val_meters,
                                 args)
Exemple #3
0
def main():
    torch.backends.cudnn.benchmark = True

    args = parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    # Data loading
    loaders = XRayLoaders(data_dir=args.data, batch_size=args.batch_size)
    train_loader = loaders.train_loader(imagetxt=args.traintxt)
    val_loader = loaders.val_loader(imagetxt=args.valtxt)

    model = LungXnet()
    if args.parallel:
        #        model = nn.DataParallel(model)
        model = model.cuda().half()

    if args.cuda and not args.parallel:
        model.cuda().half()

    optimizer = optim.Adadelta(model.parameters(), lr=args.lr)

    criterion = nn.BCEWithLogitsLoss(size_average=True)
    if args.cuda:
        criterion.cuda()

    train_meters = {
        'train_loss': AverageMeter(name='trainloss'),
        'train_time': AverageMeter(name='traintime'),
        'train_mavep': mAPMeter()
    }

    val_meters = {
        'val_loss': AverageMeter(name='valloss'),
        'val_time': AverageMeter(name='valtime'),
        'val_mavep': mAPMeter()
    }

    epoch_time = AverageMeter(name='epoch_time')
    end = time.time()
    print(f'Number of epochs: {args.num_epochs}')
    for epoch in range(1, args.num_epochs + 1):
        print(f'epoch: {epoch}')
        train_loss, train_map = train(train_loader,
                                      optimizer,
                                      criterion,
                                      model,
                                      train_meters,
                                      args,
                                      epoch=epoch)
        val_loss, val_map = validate(val_loader,
                                     criterion,
                                     model,
                                     val_meters,
                                     args,
                                     epoch=epoch)
        epoch_time.update(time.time() - end)
        end = time.time()

    print(
        f"\nJob's done! Total runtime: {epoch_time.sum}, Average runtime: {epoch_time.avg}"
    )
Exemple #4
0
def main():
    torch.backends.cudnn.benchmark = True

    args = parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    # Data loading
    loaders = XRayLoaders(data_dir=args.data, batch_size=args.batch_size)
    train_loader = loaders.train_loader(imagetxt=args.traintxt)
    val_loader = loaders.val_loader(imagetxt=args.valtxt)

    encoder = DenseNet121()

    if args.resume:
        if os.path.isfile(args.savefile):
            print("=> loading checkpoint '{}'".format(args.savefile))
            checkpoint = torch.load(args.savefile)
            encoder.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}'".format(args.savefile))
        else:
            print("=> no checkpoint found at '{}'".format(args.savefile))

    model = TransferNet(encoder)
    if args.parallel:
        model = nn.DataParallel(model)
        model = model.cuda()

    if args.cuda and not args.parallel:
        model.cuda()

    optimizer = optim.Adadelta(model.parameters(), lr=args.lr)

    criterion = nn.BCEWithLogitsLoss(size_average=True)
    if args.cuda:
        criterion.cuda()

    train_meters = {
        'train_loss': AverageMeter(name='trainloss'),
        'train_time': AverageMeter(name='traintime'),
        'train_mavep': mAPMeter()
    }

    val_meters = {
        'val_loss': AverageMeter(name='valloss'),
        'val_time': AverageMeter(name='valtime'),
        'val_mavep': mAPMeter(),
        'val_accuracy': AverageMeter(name='valaccuracy')
    }

    epoch_time = AverageMeter(name='epoch_time')
    end = time.time()
    print(f'Number of epochs: {args.num_epochs}')
    for epoch in range(1, args.num_epochs + 1):
        print(f'epoch: {epoch}')
        train_loss, train_map = train(train_loader,
                                      optimizer,
                                      criterion,
                                      model,
                                      train_meters,
                                      args,
                                      epoch=epoch)

        if epoch % 10 == 0:
            save_checkpoint({
                'epoch': epoch,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            })

        val_loss, val_map = validate(val_loader,
                                     criterion,
                                     model,
                                     val_meters,
                                     args,
                                     epoch=epoch)
        epoch_time.update(time.time() - end)
        end = time.time()

    print(
        f"\nJob's done! Total runtime: {epoch_time.sum}, Average runtime: {epoch_time.avg}"
    )
    train_meters['train_loss'].save('/home/ygx/lungs/lungs')
    val_meters['val_loss'].save('/home/ygx/lungs/lungs')
    val_meters['val_accuracy'].save('/home/ygx/lungs/lungs/acculogs')