def main(): args = parse_args() loaders = XRayLoaders(data_dir=args.data, batch_size=args.batch_size) trainloader = loaders.train_loader(imagetxt=args.traintxt) hyperparameters = HyperParameters() features = MiniFeatures(hyperparameters) model = MiniCNN(features) print(f'Model: {model}') for batch_idx, (data, target) in enumerate(trainloader)
def main(): torch.backends.cudnn.benchmark = True args = parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # Data loading loaders = XRayLoaders(data_dir=args.data, batch_size=args.batch_size) val_loader = loaders.val_loader(imagetxt=args.valtxt) encoder = DenseNet121() model = TransferNet(encoder) if args.resume: if os.path.isfile(args.savefile): print("=> loading checkpoint '{}'".format(args.savefile)) checkpoint = torch.load(args.savefile) # must make sure the save states align. save_state = get_save_state(checkpoint) load_save_state(model, save_state) print("=> loaded checkpoint '{}'".format(args.savefile)) else: print("=> no checkpoint found at '{}'".format(args.savefile)) if args.parallel: model = nn.DataParallel(model) model = model.cuda() if args.cuda and not args.parallel: model.cuda() optimizer = optim.Adadelta(model.parameters(), lr=args.lr) criterion = nn.BCEWithLogitsLoss(size_average=True) if args.cuda: criterion.cuda() val_meters = { 'val_loss': AverageMeter(name='valloss'), 'val_time': AverageMeter(name='valtime'), 'val_mavep': mAPMeter(), 'val_accuracy': AverageMeter(name='valaccuracy') } val_loss, val_map = validate(val_loader, criterion, model, val_meters, args)
def main(): torch.backends.cudnn.benchmark = True args = parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # Data loading loaders = XRayLoaders(data_dir=args.data, batch_size=args.batch_size) train_loader = loaders.train_loader(imagetxt=args.traintxt) val_loader = loaders.val_loader(imagetxt=args.valtxt) encoder = DenseNet121() decoder = LinearDecoder(200, 300) model = AutoEncoder(encoder, decoder) if args.cuda and not args.parallel: model.cuda() optimizer = optim.Adadelta(model.parameters(), lr=args.lr) criterion = nn.MSELoss() if args.cuda: criterion.cuda() meters = { 'loss': AverageMeter(name='trainloss'), 'train_time': AverageMeter(name='traintime'), } epoch_time = AverageMeter(name='epoch_time') end = time.time() print(f'Number of epochs: {args.num_epochs}') for epoch in range(1, args.num_epochs + 1): train(train_loader, optimizer, criterion, model, meters, epoch, args) epoch_time.update(time.time() - end) end = time.time() print( f"\nJob's done! Total runtime: {epoch_time.sum}, Average runtime: {epoch_time.avg}" ) meters['loss'].save('/home/ygx/lungs/lungs/koda')
def main(): torch.backends.cudnn.benchmark = True args = parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # Data loading loaders = XRayLoaders(data_dir=args.data, batch_size=args.batch_size) train_loader = loaders.train_loader(imagetxt=args.traintxt) val_loader = loaders.val_loader(imagetxt=args.valtxt) model = LungXnet() if args.parallel: # model = nn.DataParallel(model) model = model.cuda().half() if args.cuda and not args.parallel: model.cuda().half() optimizer = optim.Adadelta(model.parameters(), lr=args.lr) criterion = nn.BCEWithLogitsLoss(size_average=True) if args.cuda: criterion.cuda() train_meters = { 'train_loss': AverageMeter(name='trainloss'), 'train_time': AverageMeter(name='traintime'), 'train_mavep': mAPMeter() } val_meters = { 'val_loss': AverageMeter(name='valloss'), 'val_time': AverageMeter(name='valtime'), 'val_mavep': mAPMeter() } epoch_time = AverageMeter(name='epoch_time') end = time.time() print(f'Number of epochs: {args.num_epochs}') for epoch in range(1, args.num_epochs + 1): print(f'epoch: {epoch}') train_loss, train_map = train(train_loader, optimizer, criterion, model, train_meters, args, epoch=epoch) val_loss, val_map = validate(val_loader, criterion, model, val_meters, args, epoch=epoch) epoch_time.update(time.time() - end) end = time.time() print( f"\nJob's done! Total runtime: {epoch_time.sum}, Average runtime: {epoch_time.avg}" )
def main(): torch.backends.cudnn.benchmark = True args = parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # Data loading loaders = XRayLoaders(data_dir=args.data, batch_size=args.batch_size) train_loader = loaders.train_loader(imagetxt=args.traintxt) val_loader = loaders.val_loader(imagetxt=args.valtxt) encoder = DenseNet121() if args.resume: if os.path.isfile(args.savefile): print("=> loading checkpoint '{}'".format(args.savefile)) checkpoint = torch.load(args.savefile) encoder.load_state_dict(checkpoint['state_dict']) print("=> loaded checkpoint '{}'".format(args.savefile)) else: print("=> no checkpoint found at '{}'".format(args.savefile)) model = TransferNet(encoder) if args.parallel: model = nn.DataParallel(model) model = model.cuda() if args.cuda and not args.parallel: model.cuda() optimizer = optim.Adadelta(model.parameters(), lr=args.lr) criterion = nn.BCEWithLogitsLoss(size_average=True) if args.cuda: criterion.cuda() train_meters = { 'train_loss': AverageMeter(name='trainloss'), 'train_time': AverageMeter(name='traintime'), 'train_mavep': mAPMeter() } val_meters = { 'val_loss': AverageMeter(name='valloss'), 'val_time': AverageMeter(name='valtime'), 'val_mavep': mAPMeter(), 'val_accuracy': AverageMeter(name='valaccuracy') } epoch_time = AverageMeter(name='epoch_time') end = time.time() print(f'Number of epochs: {args.num_epochs}') for epoch in range(1, args.num_epochs + 1): print(f'epoch: {epoch}') train_loss, train_map = train(train_loader, optimizer, criterion, model, train_meters, args, epoch=epoch) if epoch % 10 == 0: save_checkpoint({ 'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), }) val_loss, val_map = validate(val_loader, criterion, model, val_meters, args, epoch=epoch) epoch_time.update(time.time() - end) end = time.time() print( f"\nJob's done! Total runtime: {epoch_time.sum}, Average runtime: {epoch_time.avg}" ) train_meters['train_loss'].save('/home/ygx/lungs/lungs') val_meters['val_loss'].save('/home/ygx/lungs/lungs') val_meters['val_accuracy'].save('/home/ygx/lungs/lungs/acculogs')