device=device) # Create trainer trainer = Trainer( model=model, criterion=criterion, optimizer=optimizer, device=device, train_dataset=train_dataset, valid_dataset=valid_dataset, batch_size=1, num_workers=2, save_root=save_root, exp_name=args.exp_name, example_input=example_input, enable_save_trace=enable_save_trace, schedulers={'lr': lr_sched}, valid_metrics=valid_metrics, preview_batch=preview_batch, preview_interval=5, inference_kwargs=inference_kwargs, hparams=hparams, # enable_videos=True, # Uncomment to enable videos in tensorboard out_channels=out_channels, ipython_shell=args.ipython, # extra_save_steps=range(0, max_steps, 10_000), # mixed_precision=True, # Enable to use Apex for mixed precision training ) if args.deterministic: assert trainer.num_workers <= 1, 'num_workers > 1 introduces indeterministic behavior'
criterion = CombinedLoss([crossentropy, dice], weight=[0.5, 0.5], device=device) # Create trainer trainer = Trainer( model=model, criterion=criterion, optimizer=optimizer, device=device, train_dataset=train_dataset, valid_dataset=valid_dataset, batchsize=1, num_workers=1, save_root=save_root, exp_name=args.exp_name, example_input=example_input, enable_save_trace=enable_save_trace, schedulers={'lr': lr_sched}, valid_metrics=valid_metrics, preview_batch=preview_batch, preview_interval=5, # enable_videos=True, # Uncomment to enable videos in tensorboard offset=train_dataset.offset, apply_softmax_for_prediction=True, num_classes=train_dataset.num_classes, # TODO: Tune these: preview_tile_shape=(32, 64, 64), preview_overlap_shape=(32, 64, 64), # mixed_precision=True, # Enable to use Apex for mixed precision training ) # Archiving training script, src folder, env info Backup(script_path=__file__,save_path=trainer.save_path).archive_backup()
]: valid_metrics[f'val_{evaluator.name}_mean'] = evaluator() # Mean metrics for c in range(out_channels): valid_metrics[f'val_{evaluator.name}_c{c}'] = evaluator(c) criterion = nn.CrossEntropyLoss().to(device) # Create trainer trainer = Trainer( model=model, criterion=criterion, optimizer=optimizer, device=device, train_dataset=train_dataset, valid_dataset=valid_dataset, batch_size=batch_size, num_workers=1, save_root=save_root, exp_name=args.exp_name, save_jit='script', schedulers={"lr": lr_sched}, valid_metrics=valid_metrics, out_channels=out_channels, ) # Archiving training script, src folder, env info bk = Backup(script_path=__file__, save_path=trainer.save_path).archive_backup() # Start training trainer.run(max_steps)
valid_metrics = {} criterion = HybridDiceLoss() # Create trainer trainer = Trainer( model=model, criterion=criterion, optimizer=optimizer, device=device, train_dataset=train_dataset, valid_dataset=valid_dataset, batchsize=4, num_workers=8, save_root=save_root, exp_name=args.exp_name, example_input=example_input, enable_save_trace=enable_save_trace, schedulers=schedulers, valid_metrics=valid_metrics, enable_videos=False, # Uncomment to get rid of videos in tensorboard offset=train_dataset.offset, apply_softmax_for_prediction=True, num_classes=train_dataset.num_classes, ipython_shell=False, ) # Archiving training script, src folder, env info Backup(script_path=__file__, save_path=trainer.save_path).archive_backup() # Start training
# Create trainer trainer = Trainer( model=model, criterion=criterion, optimizer=optimizer, device=device, train_dataset=train_dataset, valid_dataset=valid_dataset, batchsize=1, num_workers=1, save_root=save_root, exp_name=args.exp_name, example_input=example_input, enable_save_trace=enable_save_trace, schedulers= schedulers, #{"lr": optim.lr_scheduler.StepLR(optimizer, 1000, 0.995)}, valid_metrics=valid_metrics, #preview_batch=preview_batch, #preview_interval=5, enable_videos=False, # Uncomment to get rid of videos in tensorboard offset=train_dataset.offset, apply_softmax_for_prediction=True, num_classes=train_dataset.num_classes, ipython_shell=False, # TODO: Tune these: #preview_tile_shape=(48, 96, 96), #preview_overlap_shape=(48, 96, 96), #sample_plotting_handler = handlers._tb_log_sample_images_Synapse, #mixed_precision=True, # Enable to use Apex for mixed precision training )
weight_decay=0.5e-4, lr=lr, amsgrad=True) lr_sched = optim.lr_scheduler.StepLR(optimizer, lr_stepsize, lr_dec) # criterion = LovaszLoss().to(device) criterion = DiceLoss().to(device) # Create and run trainer trainer = Trainer( model=model, criterion=criterion, optimizer=optimizer, device=device, train_dataset=train_dataset, valid_dataset=valid_dataset, batchsize=batch_size, num_workers=2, save_root=save_root, exp_name=args.exp_name, schedulers={"lr": lr_sched}, ipython_shell=False, mixed_precision=False, # Enable to use Apex for mixed precision training ) # Archiving training script, src folder, env info bk = Backup(script_path=__file__, save_path=trainer.save_path).archive_backup() trainer.run(max_steps)
} # Class weights for imbalanced dataset class_weights = torch.tensor([0.2653, 0.7347]) # criterion = nn.CrossEntropyLoss(weight=class_weights) criterion = DiceLoss() # Create trainer trainer = Trainer( model=model, criterion=criterion, optimizer=optimizer, device=device, train_dataset=train_dataset, valid_dataset=valid_dataset, batchsize=batch_size, num_workers=2, save_root=save_root, exp_name=args.exp_name, schedulers={"lr": lr_sched}, valid_metrics=valid_metrics, ) # Archiving training script, src folder, env info Backup(script_path=__file__,save_path=trainer.save_path).archive_backup() # Start training trainer.train(max_steps=max_steps, max_runtime=max_runtime) # How to re-calculate mean, std and class_weights for other datasets:
crossentropy = nn.CrossEntropyLoss() # weight=torch.tensor((0.2, 0.8))) dice = DiceLoss() # weight=torch.tensor((0.2, 0.8)), apply_softmax=True) criterion = CombinedLoss([crossentropy, dice], weight=[0.5, 0.5], device=device) # Create trainer trainer = Trainer( model=model, criterion=criterion, optimizer=optimizer, device=device, train_dataset=train_dataset, valid_dataset=valid_dataset, batchsize=1, num_workers=1, save_root=save_root, exp_name=args.exp_name, schedulers={'lr': lr_sched}, valid_metrics=valid_metrics, enable_videos=True, offset=train_dataset.offset, apply_softmax_for_prediction=True, num_classes=train_dataset.num_classes, ) # Archiving training script, src folder, env info Backup(script_path=__file__, save_path=trainer.save_path).archive_backup() # Start training trainer.run(max_steps=max_steps, max_runtime=max_runtime)