Example #1
0
def run_catalyst(irunner: dl.IRunner,
                 idx: int,
                 device: str = "cuda",
                 num_epochs: int = 10):
    utils.set_global_seed(idx)
    loader = irunner.get_loaders()["train"]
    model = irunner.get_model().to(device)
    criterion = irunner.get_criterion()
    optimizer = irunner.get_optimizer(model)

    runner = dl.SupervisedRunner()
    runner.train(
        engine=dl.GPUEngine() if device == "cuda" else dl.CPUEngine(),
        model=model,
        criterion=criterion,
        optimizer=optimizer,
        loaders={"train": loader},
        num_epochs=num_epochs,
        verbose=False,
        callbacks=[
            dl.AccuracyCallback(
                input_key=runner._output_key,
                target_key=runner._target_key,
                topk=(1, ),
            )
        ],
    )

    return (
        runner.epoch_metrics["train"]["accuracy01"],
        runner.epoch_metrics["train"]["loss"],
        _get_used_memory(),
    )
Example #2
0
def run_pytorch(irunner: dl.IRunner,
                idx: int,
                device: str = "cuda",
                num_epochs: int = 10):
    device = torch.device(device)
    utils.set_global_seed(idx)

    loader = irunner.get_loaders()["train"]
    model = irunner.get_model().to(device)
    criterion = irunner.get_criterion()
    optimizer = irunner.get_optimizer(model)

    epoch_scores = []
    epoch_losses = []
    for i in range(num_epochs):
        epoch_score = 0
        epoch_loss = 0

        for features, targets in loader:
            features = features.to(device)
            targets = targets.to(device)
            logits = model(features)
            loss = criterion(logits, targets)

            epoch_loss += loss.item()
            pred = logits.argmax(dim=1, keepdim=True)
            epoch_score += pred.eq(targets.view_as(pred)).sum().item()

            self.engine.backward(loss)
            optimizer.step()
            optimizer.zero_grad()

        epoch_score /= len(loader.dataset)
        epoch_loss /= len(loader)

        print(f"Epoch {i} \t Score: {epoch_score} \t Loss: {epoch_loss}")

        epoch_scores.append(epoch_score)
        epoch_losses.append(epoch_loss)

    return epoch_scores[-1], epoch_losses[-1], _get_used_memory()
Example #3
0
def test_benchmark(
    tmpdir,
    irunner: dl.IRunner,
    device: str,
    num_epochs: int,
    num_runs: int,
    precision: int,
    max_diff_time: float,
    max_diff_memory: float,
):

    irunner = irunner()
    # prepare data
    _ = irunner.get_loaders()

    # score runs
    pytorch = score_runs(
        irunner,
        mode=RunMode.pytorch,
        device=device,
        num_epochs=num_epochs,
        num_runs=num_runs,
    )
    catalyst = score_runs(
        irunner,
        mode=RunMode.catalyst,
        device=device,
        num_epochs=num_epochs,
        num_runs=num_runs,
    )

    # check performance
    print("Scores are for... \n "
          f"PyTorch: {pytorch['scores']} \n Catalyst: {catalyst['scores']}")
    for catalyst_, pytorch_ in zip(catalyst["scores"], pytorch["scores"]):
        np.testing.assert_almost_equal(catalyst_, pytorch_, precision)

    # check loss
    print("Losses are for... \n "
          f"PyTorch: {pytorch['losses']} \n Catalyst: {catalyst['losses']}")
    for catalyst_, pytorch_ in zip(catalyst["losses"], pytorch["losses"]):
        np.testing.assert_almost_equal(catalyst_, pytorch_, precision)

    # check time
    print(
        f"Times are for... \n PyTorch: {pytorch['time']} \n Catalyst: {catalyst['time']}"
    )
    assert_absolute_equal(
        catalyst["time"],
        pytorch["time"],
        norm=num_epochs,
        max_diff=max_diff_time,
    )

    # check memory
    if torch.cuda.is_available():
        print(
            "Memory usages are for... \n "
            f"PyTorch: {pytorch['memory']} \n Catalyst: {catalyst['memory']}")
        assert_relative_equal(catalyst["memory"],
                              pytorch["memory"],
                              max_diff=max_diff_memory)