def main(): global args, logger args = parser.parse_args() set_prefix(args.prefix, __file__) model = model_builder() optimizer = optim.Adam(model.parameters(), lr=args.lr) # accelerate the speed of training cudnn.benchmark = True train_loader, val_loader = load_dataset() # class_names=['LESION', 'NORMAL'] class_names = train_loader.dataset.class_names print(class_names) # learning rate decay per epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) since = time.time() print('-' * 10) for epoch in range(args.epochs): # adjust weight once unet can be nearly seen as an identical mapping exp_lr_scheduler.step() train(train_loader, model, optimizer, epoch) time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) with torch.no_grad(): validate(model, train_loader, val_loader) # save_typical_result(model) torch.save(model.state_dict(), add_prefix(args.prefix, 'locator.pkl')) write(vars(args), add_prefix(args.prefix, 'paras.txt'))
def main(): global args, min_loss, best_acc args = parser.parse_args() device_counts = torch.cuda.device_count() print('there is %d gpus in usage' % (device_counts)) # save source script set_prefix(args.prefix, __file__) model = model_selector(args.model_type) print(model) if args.cuda: model = DataParallel(model).cuda() else: raise RuntimeError('there is no gpu') optimizer = optim.Adam(model.parameters(), lr=args.lr) # accelerate the speed of training cudnn.benchmark = True train_loader, val_loader = load_dataset() # class_names=['LESION', 'NORMAL'] class_names = train_loader.dataset.class_names print(class_names) criterion = nn.BCELoss().cuda() # learning rate decay per epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) since = time.time() print('-' * 10) for epoch in range(args.epochs): exp_lr_scheduler.step() train(train_loader, model, optimizer, criterion, epoch) cur_loss, cur_acc = validate(model, val_loader, criterion) is_best = cur_loss < min_loss best_loss = min(cur_loss, min_loss) if is_best: best_acc = cur_acc save_checkpoint( { 'epoch': epoch + 1, 'arch': args.model_type, 'state_dict': model.state_dict(), 'min_loss': best_loss, 'acc': best_acc, 'optimizer': optimizer.state_dict(), }, is_best) time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) check_point = torch.load(add_prefix(args.prefix, args.best_model_path)) print('min_loss=%.4f, best_acc=%.4f' % (check_point['min_loss'], check_point['acc'])) write(vars(args), add_prefix(args.prefix, 'paras.txt'))
def main(): global args, best_acc args = parser.parse_args() # save source script set_prefix(args.prefix, __file__) model = models.densenet121(pretrained=False, num_classes=2) if args.cuda: model = DataParallel(model).cuda() else: warnings.warn('there is no gpu') optimizer = optim.Adam(model.parameters(), lr=args.lr) # accelerate the speed of training cudnn.benchmark = True train_loader, val_loader = load_dataset() # class_names=['LESION', 'NORMAL'] class_names = train_loader.dataset.classes print(class_names) if args.is_focal_loss: print('try focal loss!!') criterion = FocalLoss().cuda() else: criterion = nn.CrossEntropyLoss().cuda() # learning rate decay per epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) since = time.time() print('-' * 10) for epoch in range(args.epochs): exp_lr_scheduler.step() train(train_loader, model, optimizer, criterion, epoch) cur_accuracy = validate(model, val_loader, criterion) is_best = cur_accuracy > best_acc best_acc = max(cur_accuracy, best_acc) save_checkpoint( { 'epoch': epoch + 1, 'arch': 'resnet18', 'state_dict': model.state_dict(), 'best_accuracy': best_acc, 'optimizer': optimizer.state_dict(), }, is_best) time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) # compute validate meter such as confusion matrix compute_validate_meter(model, add_prefix(args.prefix, args.best_model_path), val_loader) # save running parameter setting to json write(vars(args), add_prefix(args.prefix, 'paras.txt'))
def main(): global args, logger args = parser.parse_args() # logger = Logger(add_prefix(args.prefix, 'logs')) set_prefix(args.prefix, __file__) model = UNet(3, depth=5, in_channels=3) print(model) print('load unet with depth=5') if args.cuda: model = DataParallel(model).cuda() else: raise RuntimeError('there is no gpu') criterion = nn.L1Loss(reduce=False).cuda() print('use l1_loss') optimizer = optim.Adam(model.parameters(), lr=args.lr) # accelerate the speed of training cudnn.benchmark = True data_loader = get_dataloader() # class_names=['LESION', 'NORMAL'] # class_names = data_loader.dataset.class_names # print(class_names) since = time.time() print('-' * 10) for epoch in range(1, args.epochs + 1): train(data_loader, model, optimizer, criterion, epoch) if epoch % 40 == 0: validate(model, epoch, data_loader) time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) validate(model, args.epochs, data_loader) # save model parameter torch.save(model.state_dict(), add_prefix(args.prefix, 'identical_mapping.pkl')) # save running parameter setting to json write(vars(args), add_prefix(args.prefix, 'paras.txt'))