def test_no_neptune_client(no_site_packages): with pytest.raises( RuntimeError, match= r"This contrib module requires neptune-client to be installed."): NeptuneLogger()
def test_integration_as_context_manager(): n_epochs = 5 data = list(range(50)) losses = torch.rand(n_epochs * len(data)) losses_iter = iter(losses) def update_fn(engine, batch): return next(losses_iter) with NeptuneLogger(offline_mode=True) as npt_logger: trainer = Engine(update_fn) def dummy_handler(engine, logger, event_name): global_step = engine.state.get_event_attrib_value(event_name) logger.log_metric("test_value", global_step, global_step) npt_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED) trainer.run(data, max_epochs=n_epochs)
def run(train_batch_size, val_batch_size, epochs, lr, momentum): train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size) model = Net() device = "cpu" if torch.cuda.is_available(): device = "cuda" model.to(device) # Move model before creating optimizer optimizer = SGD(model.parameters(), lr=lr, momentum=momentum) criterion = nn.CrossEntropyLoss() trainer = create_supervised_trainer(model, optimizer, criterion, device=device) trainer.logger = setup_logger("Trainer") metrics = {"accuracy": Accuracy(), "loss": Loss(criterion)} train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device) train_evaluator.logger = setup_logger("Train Evaluator") validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device) validation_evaluator.logger = setup_logger("Val Evaluator") @trainer.on(Events.EPOCH_COMPLETED) def compute_metrics(engine): train_evaluator.run(train_loader) validation_evaluator.run(val_loader) npt_logger = NeptuneLogger( api_token="ANONYMOUS", project_name="shared/pytorch-ignite-integration", name="ignite-mnist-example", params={ "train_batch_size": train_batch_size, "val_batch_size": val_batch_size, "epochs": epochs, "lr": lr, "momentum": momentum, }, ) npt_logger.attach_output_handler( trainer, event_name=Events.ITERATION_COMPLETED(every=100), tag="training", output_transform=lambda loss: {"batchloss": loss}, ) for tag, evaluator in [("training", train_evaluator), ("validation", validation_evaluator)]: npt_logger.attach_output_handler( evaluator, event_name=Events.EPOCH_COMPLETED, tag=tag, metric_names=["loss", "accuracy"], global_step_transform=global_step_from_engine(trainer), ) npt_logger.attach_opt_params_handler( trainer, event_name=Events.ITERATION_COMPLETED(every=100), optimizer=optimizer) npt_logger.attach(trainer, log_handler=WeightsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100)) npt_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100)) def score_function(engine): return engine.state.metrics["accuracy"] handler = Checkpoint( {"model": model}, NeptuneSaver(npt_logger), n_saved=2, filename_prefix="best", score_function=score_function, score_name="validation_accuracy", global_step_transform=global_step_from_engine(trainer), ) validation_evaluator.add_event_handler(Events.COMPLETED, handler) # kick everything off trainer.run(train_loader, max_epochs=epochs) npt_logger.close()