def set_up_training(project_directory, config, data_config, load_pretrained_model): # Get model if load_pretrained_model: model = Trainer().load(from_directory=project_directory, filename='Weights/checkpoint.pytorch').model else: model_name = config.get('model_name') model = getattr(models, model_name)(**config.get('model_kwargs')) criterion = SorensenDiceLoss() loss_train = LossWrapper(criterion=criterion, transforms=Compose(ApplyAndRemoveMask(), InvertTarget())) loss_val = LossWrapper(criterion=criterion, transforms=Compose(RemoveSegmentationFromTarget(), ApplyAndRemoveMask(), InvertTarget())) # Build trainer and validation metric logger.info("Building trainer.") smoothness = 0.95 offsets = data_config['volume_config']['segmentation']['affinity_config'][ 'offsets'] metric = ArandErrorFromMulticut(average_slices=False, use_2d_ws=True, n_threads=8, weight_edges=True, offsets=offsets) trainer = Trainer(model)\ .save_every((1000, 'iterations'), to_directory=os.path.join(project_directory, 'Weights'))\ .build_criterion(loss_train)\ .build_validation_criterion(loss_val)\ .build_optimizer(**config.get('training_optimizer_kwargs'))\ .evaluate_metric_every('never')\ .validate_every((100, 'iterations'), for_num_iterations=1)\ .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\ .build_metric(metric)\ .register_callback(AutoLR(factor=0.98, patience='100 iterations', monitor_while='validating', monitor_momentum=smoothness, consider_improvement_with_respect_to='previous'))\ .register_callback(GarbageCollection()) logger.info("Building logger.") # Build logger tensorboard = TensorboardLogger( log_scalars_every=(1, 'iteration'), log_images_every=(100, 'iterations'), log_histograms_every='never').observe_states( ['validation_input', 'validation_prediction, validation_target'], observe_while='validating') trainer.build_logger(tensorboard, log_directory=os.path.join(project_directory, 'Logs')) return trainer
def set_up_training(project_directory, config, data_config, criterion, balance, load_pretrained_model): # Get model if load_pretrained_model: model = Trainer().load(from_directory=project_directory, filename='Weights/checkpoint.pytorch').model else: model_name = config.get('model_name') model = getattr(models, model_name)(**config.get('model_kwargs')) # TODO logger.info("Using criterion: %s" % criterion) # TODO this should go somewhere more prominent affinity_offsets = data_config['volume_config']['segmentation'][ 'affinity_offsets'] # TODO implement affinities on gpu again ?! criterion = CRITERIA[criterion] loss = LossWrapper( criterion=criterion(), transforms=Compose(MaskTransitionToIgnoreLabel(affinity_offsets), RemoveSegmentationFromTarget(), InvertTarget()), weight_function=BalanceAffinities( ignore_label=0, offsets=affinity_offsets) if balance else None) # Build trainer and validation metric logger.info("Building trainer.") smoothness = 0.95 # use multicut pipeline for validation metric = ArandErrorFromSegmentationPipeline( local_affinity_multicut_from_wsdt2d(n_threads=10, time_limit=120)) trainer = Trainer(model)\ .save_every((1000, 'iterations'), to_directory=os.path.join(project_directory, 'Weights'))\ .build_criterion(loss)\ .build_optimizer(**config.get('training_optimizer_kwargs'))\ .evaluate_metric_every('never')\ .validate_every((100, 'iterations'), for_num_iterations=1)\ .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\ .build_metric(metric)\ .register_callback(AutoLR(factor=0.98, patience='100 iterations', monitor_while='validating', monitor_momentum=smoothness, consider_improvement_with_respect_to='previous')) logger.info("Building logger.") # Build logger tensorboard = TensorboardLogger( log_scalars_every=(1, 'iteration'), log_images_every=(100, 'iterations')).observe_states( ['validation_input', 'validation_prediction, validation_target'], observe_while='validating') trainer.build_logger(tensorboard, log_directory=os.path.join(project_directory, 'Logs')) return trainer
def set_up_training(project_directory, config, data_config, load_pretrained_model): # Get model if load_pretrained_model: model = Trainer().load(from_directory=project_directory, filename='Weights/checkpoint.pytorch').model else: model_name = config.get('model_name') model = getattr(models, model_name)(**config.get('model_kwargs')) affinity_offsets = data_config['volume_config']['segmentation'][ 'affinity_offsets'] loss = MultiOutputLossWrapper( criterion=SorensenDiceLoss(), transforms=Compose(MaskTransitionToIgnoreLabel(affinity_offsets), RemoveSegmentationFromTarget(), InvertTarget())) # Build trainer and validation metric logger.info("Building trainer.") smoothness = 0.95 # use multicut pipeline for validation # metric = ArandErrorFromSegmentationPipeline(local_affinity_multicut_from_wsdt2d(n_threads=10, # time_limit=120)) # use damws for validation stride = [2, 10, 10] metric = ArandErrorFromSegmentationPipeline( DamWatershed(affinity_offsets, stride, randomize_bounds=False)) trainer = Trainer(model)\ .save_every((1000, 'iterations'), to_directory=os.path.join(project_directory, 'Weights'))\ .build_criterion(loss)\ .build_optimizer(**config.get('training_optimizer_kwargs'))\ .evaluate_metric_every('never')\ .validate_every((100, 'iterations'), for_num_iterations=1)\ .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\ .build_metric(metric)\ .register_callback(AutoLR(factor=0.98, patience='100 iterations', monitor_while='validating', monitor_momentum=smoothness, consider_improvement_with_respect_to='previous')) # FIXME some issues with conda tf for torch0.3 env # logger.info("Building logger.") # # Build logger # tensorboard = TensorboardLogger(log_scalars_every=(1, 'iteration'), # log_images_every=(100, 'iterations')).observe_states( # ['validation_input', 'validation_prediction, validation_target'], # observe_while='validating' # ) # trainer.build_logger(tensorboard, log_directory=os.path.join(project_directory, 'Logs')) return trainer
def set_up_training(project_directory, config, data_config): # Get model model_name = config.get('model_name') model = getattr(models, model_name)(**config.get('model_kwargs')) criterion = SorensenDiceLoss() loss_train = LossWrapper(criterion=criterion, transforms=InvertTarget()) loss_val = LossWrapper(criterion=criterion, transforms=Compose(RemoveSegmentationFromTarget(), InvertTarget())) # Build trainer and validation metric logger.info("Building trainer.") smoothness = 0.75 offsets = data_config['volume_config']['segmentation']['affinity_config'][ 'offsets'] strides = [1, 10, 10] metric = ArandErrorFromMWS(average_slices=False, offsets=offsets, strides=strides, randomize_strides=False) trainer = Trainer(model)\ .save_every((1000, 'iterations'), to_directory=os.path.join(project_directory, 'Weights'))\ .build_criterion(loss_train)\ .build_validation_criterion(loss_val)\ .build_optimizer(**config.get('training_optimizer_kwargs'))\ .evaluate_metric_every('never')\ .validate_every((100, 'iterations'), for_num_iterations=1)\ .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\ .build_metric(metric)\ .register_callback(AutoLR(factor=0.99, patience='100 iterations', monitor_while='validating', monitor_momentum=smoothness, consider_improvement_with_respect_to='previous'))\ logger.info("Building logger.") # Build logger tensorboard = TensorboardLogger( log_scalars_every=(1, 'iteration'), log_images_every=(100, 'iterations'), log_histograms_every='never').observe_states( ['validation_input', 'validation_prediction, validation_target'], observe_while='validating') trainer.build_logger(tensorboard, log_directory=os.path.join(project_directory, 'Logs')) return trainer
def set_up_training(project_directory, config, data_config, load_pretrained_model): # Get model if load_pretrained_model: model = Trainer().load(from_directory=project_directory, filename='Weights/checkpoint.pytorch').model else: model_name = config.get('model_name') model = getattr(models, model_name)(**config.get('model_kwargs')) loss = dice_loss() loss_val = dice_loss(is_val=True) metric = mws_metric() # metric = loss_val # Build trainer and validation metric logger.info("Building trainer.") smoothness = 0.9 trainer = Trainer(model)\ .save_every((1000, 'iterations'), to_directory=os.path.join(project_directory, 'Weights'))\ .build_criterion(loss)\ .build_validation_criterion(loss_val)\ .build_optimizer(**config.get('training_optimizer_kwargs'))\ .evaluate_metric_every('never')\ .validate_every((100, 'iterations'), for_num_iterations=5)\ .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\ .build_metric(metric)\ .register_callback(AutoLR(factor=0.98, patience='100 iterations', monitor_while='validating', monitor_momentum=smoothness, consider_improvement_with_respect_to='previous'))\ .register_callback(GarbageCollection())\ logger.info("Building logger.") # Build logger tensorboard = TensorboardLogger( log_scalars_every=(1, 'iteration'), log_images_every=(100, 'iterations'), log_histograms_every='never').observe_states( ['validation_input', 'validation_prediction, validation_target'], observe_while='validating') trainer.build_logger(tensorboard, log_directory=os.path.join(project_directory, 'Logs')) return trainer
if not os.path.exists(logs_dir): os.mkdir(logs_dir) log_info('Logs will be saved to %s' % (logs_dir)) # Build trainer trainer = Trainer(model) \ .build_criterion('CrossEntropyLoss') \ .build_metric('CategoricalError') \ .build_optimizer('Adam', lr=args.lr, betas=(0.9, 0.999), eps=1e-08) \ .validate_every((1, 'epochs')) \ .save_every((5, 'epochs')) \ .save_to_directory(model_dir) \ .set_max_num_epochs(10000) \ .register_callback(GradChecker()) \ .register_callback(AutoLR(0.96, (1, 'epochs'), monitor_momentum=0.9, monitor_while='validating', consider_improvement_with_respect_to='best'))\ .build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'), log_images_every=(1, 'epoch')), log_directory=logs_dir) # Bind loaders trainer \ .bind_loader('train', train_dl) \ .bind_loader('validate', test_dl) if torch.cuda.is_available(): trainer.cuda() trainer.fit()
def log_histogram(self, tag, values, bins=1000): pass logger.log_histogram = log_histogram trainer = Trainer(model)\ .build_criterion('CrossEntropyLoss') \ .build_metric('CategoricalError') \ .build_optimizer('Adam', weight_decay=args.regularization) \ .evaluate_metric_every((10, 'iterations')) \ .validate_every((1, 'epochs')) \ .save_every((1, 'epochs')) \ .save_to_directory(weight_dir) \ .set_max_num_epochs(10000) \ .build_logger(logger, log_directory=logs_dir) \ .register_callback(AutoLR(0.9, (1, 'epochs'), consider_improvement_with_respect_to='previous')) # .register_callback(AutoLR(0.99, (100, 'epochs'), monitor_momentum=0.95, # monitor_while='validating', # consider_improvement_with_respect_to='best')) # Bind loaders trainer \ .bind_loader('train', train_dl) \ .bind_loader('validate', test_dl) if torch.cuda.is_available(): trainer.cuda() trainer.fit()
def set_up_training(project_directory, config, data_config, load_pretrained_model): # Get model if load_pretrained_model: model = Trainer().load(from_directory=project_directory, filename='Weights/checkpoint.pytorch').model else: model_name = config.get('model_name') model = getattr(models, model_name)(**config.get('model_kwargs')) affinity_offsets = data_config['volume_config']['segmentation'][ 'affinity_offsets'] # NOTE invert target is done in the multiscale loss loss = LossWrapper(criterion=SorensenDiceLoss(), transforms=Compose( MaskTransitionToIgnoreLabel(affinity_offsets), RemoveSegmentationFromTarget())) scaling_factors = 3 * [(1, 3, 3)] multiscale_loss = MultiScaleLossMaxPool(loss, scaling_factors, invert_target=True, retain_segmentation=True) # Build trainer and validation metric logger.info("Building trainer.") smoothness = 0.95 # use multicut pipeline for validation # TODO fix nifty weighting schemes metric = ArandErrorFromSegmentationPipeline( local_affinity_multicut_from_wsdt2d(n_threads=10, weighting_scheme=None, time_limit=120), is_multiscale=True) trainer = Trainer(model)\ .save_every((1000, 'iterations'), to_directory=os.path.join(project_directory, 'Weights'))\ .build_criterion(multiscale_loss)\ .build_optimizer(**config.get('training_optimizer_kwargs'))\ .evaluate_metric_every('never')\ .validate_every((100, 'iterations'), for_num_iterations=1)\ .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\ .build_metric(metric)\ .register_callback(AutoLR(factor=0.98, patience='100 iterations', monitor_while='validating', monitor_momentum=smoothness, consider_improvement_with_respect_to='previous')) logger.info("Building logger.") # Build logger tensorboard = TensorboardLogger( log_scalars_every=(1, 'iteration'), log_images_every=(100, 'iterations')).observe_states( ['validation_input', 'validation_prediction, validation_target'], observe_while='validating') trainer.build_logger(tensorboard, log_directory=os.path.join(project_directory, 'Logs')) return trainer
smoothness = 0.001 trainer = Trainer(model) trainer.build_criterion(LossWrapper(p0=p0, p1=p1)) trainer.build_optimizer('Adam') #, lr=0.0001) trainer.validate_every((1, 'epochs')) #trainer.save_every((4, 'epochs')) trainer.save_to_directory(SAVE_DIRECTORY) trainer.set_max_num_epochs(200) trainer.register_callback( SaveAtBestValidationScore(smoothness=smoothness, verbose=True)) trainer.register_callback( AutoLR(factor=0.5, patience='1 epochs', monitor_while='validating', monitor='validation_loss', monitor_momentum=smoothness, consider_improvement_with_respect_to='previous', verbose=True)) trainer.register_callback(TQDMProgressBar()) # Bind loaders train_loader = torch.utils.data.DataLoader(dataset=bsd_train, num_workers=8) val_loader = torch.utils.data.DataLoader(dataset=bsd_val, num_workers=8) num_inputs = bsd_train.num_inputs() num_targets = bsd_train.num_targets() trainer.load()
trainer = Trainer(model) trainer = trainer\ .build_criterion('BCELoss') \ .build_metric('CategoricalError') \ .build_optimizer('Adam', lr=args.lr) \ .validate_every((2, 'epochs')) \ .save_every((5, 'epochs')) \ .save_to_directory(weight_dir) \ .set_max_num_epochs(10000) \ .build_logger(logger, log_directory=logs_dir) if args.flr: trainer = trainer.register_callback( AutoLR(args.decey, (1, 'epochs'), monitor_momentum=0.9, monitor_while='validating', consider_improvement_with_respect_to='best')) else: trainer = trainer.register_callback( AutoLR(0.9, (1, 'epochs'), consider_improvement_with_respect_to='previous')) if args.init_model_path != '': init_trainer = Trainer(model) if torch.cuda.is_available(): init_trainer = init_trainer.load( from_directory=args.init_model_path, best=True) else: init_trainer = init_trainer.load( from_directory=args.init_model_path, best=True,
def set_up_training(project_directory, config): # Load the model to train from the configuratuib file ('./config/train_config.yml') model_name = config.get('model_name') model = getattr(models, model_name)(**config.get('model_kwargs')) # Initialize the loss: we use the SorensenDiceLoss, which has the nice property # of being fairly robust for un-balanced targets criterion = SorensenDiceLoss() # Wrap the loss to apply additional transformations before the actual # loss is applied. Here, we apply the mask to the target # and invert the target (necessary for sorensen dice) during training. # In addition, we need to remove the segmentation from the target # during validation (we only keep the segmentation in the target during validation) loss_train = LossWrapper(criterion=criterion, transforms=Compose(ApplyAndRemoveMask(), InvertTarget())) loss_val = LossWrapper(criterion=criterion, transforms=Compose(RemoveSegmentationFromTarget(), ApplyAndRemoveMask(), InvertTarget())) # Build the validation metric: we validate by running connected components on # the affinities for several thresholds # metric = ArandErrorFromConnectedComponentsOnAffinities(thresholds=[.5, .6, .7, .8, .9], # invert_affinities=True) metric = ArandErrorFromConnectedComponents(thresholds=[.5, .6, .7, .8, .9], invert_input=True, average_input=True) logger.info("Building trainer.") smoothness = 0.95 # Build the trainer object trainer = Trainer(model)\ .save_every((1000, 'iterations'), to_directory=os.path.join(project_directory, 'Weights'))\ .build_criterion(loss_train)\ .build_validation_criterion(loss_val)\ .build_optimizer(**config.get('training_optimizer_kwargs'))\ .evaluate_metric_every('never')\ .validate_every((100, 'iterations'), for_num_iterations=1)\ .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\ .build_metric(metric)\ .register_callback(AutoLR(factor=0.98, patience='100 iterations', monitor_while='validating', monitor_momentum=smoothness, consider_improvement_with_respect_to='previous')) # .register_callback(DumpHDF5Every(frequency='99 iterations', # to_directory=os.path.join(project_directory, 'debug'))) logger.info("Building logger.") # Build tensorboard logger tensorboard = TensorboardLogger( log_scalars_every=(1, 'iteration'), log_images_every=(100, 'iterations')).observe_states( ['validation_input', 'validation_prediction, validation_target'], observe_while='validating') trainer.build_logger(tensorboard, log_directory=os.path.join(project_directory, 'Logs')) return trainer