Пример #1
0
def test_weights_scalar_handler_wrong_setup():

    with pytest.raises(
            TypeError,
            match="Argument model should be of type torch.nn.Module"):
        WeightsScalarHandler(None)

    model = MagicMock(spec=torch.nn.Module)
    with pytest.raises(TypeError,
                       match="Argument reduction should be callable"):
        WeightsScalarHandler(model, reduction=123)

    with pytest.raises(
            TypeError,
            match="Output of the reduction function should be a scalar"):
        WeightsScalarHandler(model, reduction=lambda x: x)

    wrapper = WeightsScalarHandler(model)
    mock_logger = MagicMock()
    mock_engine = MagicMock()
    with pytest.raises(
            RuntimeError,
            match="Handler WeightsScalarHandler works only with ClearMLLogger"
    ):
        wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
Пример #2
0
def test_weights_scalar_handler_whitelist(dummy_model_factory):
    model = dummy_model_factory()

    wrapper = WeightsScalarHandler(model, whitelist=["fc2.weight"])
    mock_logger = MagicMock(spec=ClearMLLogger)
    mock_logger.clearml_logger = MagicMock()

    mock_engine = MagicMock()
    mock_engine.state = State()
    mock_engine.state.epoch = 5

    wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
    mock_logger.clearml_logger.report_scalar.assert_called_once_with(
        title="weights_norm/fc2",
        value=ANY,
        series="weight",
        iteration=mock_engine.state.epoch)
    mock_logger.clearml_logger.report_scalar.reset_mock()

    wrapper = WeightsScalarHandler(model, tag="model", whitelist=["fc1"])
    wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)

    mock_logger.clearml_logger.report_scalar.assert_has_calls(
        [
            call(title="model/weights_norm/fc1",
                 value=ANY,
                 series="weight",
                 iteration=mock_engine.state.epoch),
            call(title="model/weights_norm/fc1",
                 value=ANY,
                 series="bias",
                 iteration=mock_engine.state.epoch),
        ],
        any_order=True,
    )
    assert mock_logger.clearml_logger.report_scalar.call_count == 2
    mock_logger.clearml_logger.report_scalar.reset_mock()

    def weight_selector(n, _):
        return "bias" in n

    wrapper = WeightsScalarHandler(model,
                                   tag="model",
                                   whitelist=weight_selector)
    wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)

    mock_logger.clearml_logger.report_scalar.assert_has_calls(
        [
            call(title="model/weights_norm/fc1",
                 value=ANY,
                 series="bias",
                 iteration=mock_engine.state.epoch),
            call(title="model/weights_norm/fc2",
                 value=ANY,
                 series="bias",
                 iteration=mock_engine.state.epoch),
        ],
        any_order=True,
    )
    assert mock_logger.clearml_logger.report_scalar.call_count == 2
Пример #3
0
    def _test(tag=None):
        wrapper = WeightsScalarHandler(model, tag=tag)
        mock_logger = MagicMock(spec=ClearMLLogger)
        mock_logger.clearml_logger = MagicMock()

        mock_engine = MagicMock()
        mock_engine.state = State()
        mock_engine.state.epoch = 5

        wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)

        tag_prefix = f"{tag}/" if tag else ""

        assert mock_logger.clearml_logger.report_scalar.call_count == 4
        mock_logger.clearml_logger.report_scalar.assert_has_calls(
            [
                call(title=tag_prefix + "weights_norm/fc1",
                     series="weight",
                     iteration=5,
                     value=0.0),
                call(title=tag_prefix + "weights_norm/fc1",
                     series="bias",
                     iteration=5,
                     value=0.0),
                call(title=tag_prefix + "weights_norm/fc2",
                     series="weight",
                     iteration=5,
                     value=12.0),
                call(title=tag_prefix + "weights_norm/fc2",
                     series="bias",
                     iteration=5,
                     value=math.sqrt(12.0)),
            ],
            any_order=True,
        )
Пример #4
0
def test_weights_scalar_handler_wrong_setup():

    model = MagicMock(spec=torch.nn.Module)
    wrapper = WeightsScalarHandler(model)
    mock_logger = MagicMock()
    mock_engine = MagicMock()
    with pytest.raises(
            RuntimeError,
            match="Handler WeightsScalarHandler works only with ClearMLLogger"
    ):
        wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
Пример #5
0
def test_weights_scalar_handler_frozen_layers(dummy_model_factory):

    model = dummy_model_factory(with_grads=True, with_frozen_layer=True)

    wrapper = WeightsScalarHandler(model)
    mock_logger = MagicMock(spec=ClearMLLogger)
    mock_logger.clearml_logger = MagicMock()

    mock_engine = MagicMock()
    mock_engine.state = State()
    mock_engine.state.epoch = 5

    wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)

    mock_logger.clearml_logger.report_scalar.assert_has_calls(
        [
            call(title="weights_norm/fc2",
                 series="weight",
                 iteration=5,
                 value=12.0),
            call(title="weights_norm/fc2",
                 series="bias",
                 iteration=5,
                 value=math.sqrt(12.0)),
        ],
        any_order=True,
    )

    with pytest.raises(AssertionError):
        mock_logger.clearml_logger.report_scalar.assert_has_calls(
            [
                call(title="weights_norm/fc1",
                     series="weight",
                     iteration=5,
                     value=12.0),
                call(title="weights_norm/fc1",
                     series="bias",
                     iteration=5,
                     value=math.sqrt(12.0)),
            ],
            any_order=True,
        )

    assert mock_logger.clearml_logger.report_scalar.call_count == 2
def run(train_batch_size, val_batch_size, epochs, lr, momentum):
    train_loader, val_loader = get_data_loaders(train_batch_size,
                                                val_batch_size)
    model = Net()
    device = "cpu"

    if torch.cuda.is_available():
        device = "cuda"

    model.to(device)  # Move model before creating optimizer
    optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
    criterion = nn.CrossEntropyLoss()
    trainer = create_supervised_trainer(model,
                                        optimizer,
                                        criterion,
                                        device=device)
    trainer.logger = setup_logger("Trainer")

    metrics = {"accuracy": Accuracy(), "loss": Loss(criterion)}

    train_evaluator = create_supervised_evaluator(model,
                                                  metrics=metrics,
                                                  device=device)
    train_evaluator.logger = setup_logger("Train Evaluator")
    validation_evaluator = create_supervised_evaluator(model,
                                                       metrics=metrics,
                                                       device=device)
    validation_evaluator.logger = setup_logger("Val Evaluator")

    @trainer.on(Events.EPOCH_COMPLETED)
    def compute_metrics(engine):
        train_evaluator.run(train_loader)
        validation_evaluator.run(val_loader)

    clearml_logger = ClearMLLogger(project_name="examples", task_name="ignite")

    clearml_logger.attach_output_handler(
        trainer,
        event_name=Events.ITERATION_COMPLETED(every=100),
        tag="training",
        output_transform=lambda loss: {"batchloss": loss},
    )

    for tag, evaluator in [("training metrics", train_evaluator),
                           ("validation metrics", validation_evaluator)]:
        clearml_logger.attach_output_handler(
            evaluator,
            event_name=Events.EPOCH_COMPLETED,
            tag=tag,
            metric_names=["loss", "accuracy"],
            global_step_transform=global_step_from_engine(trainer),
        )

    clearml_logger.attach_opt_params_handler(
        trainer,
        event_name=Events.ITERATION_COMPLETED(every=100),
        optimizer=optimizer)

    clearml_logger.attach(trainer,
                          log_handler=WeightsScalarHandler(model),
                          event_name=Events.ITERATION_COMPLETED(every=100))

    clearml_logger.attach(trainer,
                          log_handler=WeightsHistHandler(model),
                          event_name=Events.EPOCH_COMPLETED(every=100))

    clearml_logger.attach(trainer,
                          log_handler=GradsScalarHandler(model),
                          event_name=Events.ITERATION_COMPLETED(every=100))

    clearml_logger.attach(trainer,
                          log_handler=GradsHistHandler(model),
                          event_name=Events.EPOCH_COMPLETED(every=100))

    handler = Checkpoint(
        {"model": model},
        ClearMLSaver(),
        n_saved=1,
        score_function=lambda e: e.state.metrics["accuracy"],
        score_name="val_acc",
        filename_prefix="best",
        global_step_transform=global_step_from_engine(trainer),
    )
    validation_evaluator.add_event_handler(Events.EPOCH_COMPLETED, handler)

    # kick everything off
    trainer.run(train_loader, max_epochs=epochs)

    clearml_logger.close()