Example #1
0
    def _test_fit_loader(device):

        seed_all(42)
        save_dir = tmpdir.mkdir("/test_save")
        file_name = "test_classification.bin"
        dataset = torch.utils.data.TensorDataset(X, y)
        test_dataset = create_ds((test_data, ))
        loader = torch.utils.data.DataLoader(dataset, batch_size=32)
        test_dl = torch.utils.data.DataLoader(test_dataset, batch_size=16)

        metric_list = [
            Accuracy(num_classes=num_classes),
        ]

        callbacks = [
            cbs.EarlyStopping(monitor="val_accuracy", mode="max"),
            cbs.ModelCheckpoint(monitor="val_accuracy",
                                mode="max",
                                save_dir=save_dir,
                                file_name=file_name),
            cbs.CosineAnnealingWarmRestarts(T_0=2),
        ]

        exp = Experiment(
            num_epochs=10,
            fp16=fp16,
            device=device,
            seed=42,
        )

        exp.compile_experiment(
            model_config=config,
            metrics=metric_list,
            callbacks=callbacks,
            main_metric="accuracy",
        )

        assert isinstance(exp.state, State) is True
        exp.fit_loader(train_dl=loader, valid_dl=loader)
        logs = exp.get_logs()
        assert isinstance(logs, pd.DataFrame) is True
        outputs = []
        for op in exp.predict_on_loader(test_dl=test_dl,
                                        device=device,
                                        path_to_model=os.path.join(
                                            save_dir, file_name)):
            outputs.extend(op)

        assert len(outputs) == test_samples
Example #2
0
    def _test_fit_loader(device):

        seed_all(42)
        save_dir = tmpdir.mkdir("/test_save")
        file_name = "test_classification.bin"
        dataset = torch.utils.data.TensorDataset(X, y)
        test_dataset = torch.utils.data.TensorDataset(test_data)
        loader = torch.utils.data.DataLoader(dataset, batch_size=32)
        test_dl = torch.utils.data.DataLoader(test_dataset, batch_size=16)
        model = torch.nn.Linear(num_features, num_classes)

        metric_list = [
            metrics.Accuracy(num_classes=num_classes, multilabel=False),
            metrics.F1Score(num_classes=num_classes, multilabel=False),
        ]

        callbacks = [
            cbs.EarlyStopping(monitor="accuracy", mode="max"),
            cbs.ModelCheckpoint(monitor="accuracy", mode="max", save_dir=save_dir, file_name=file_name),
            cbs.CosineAnnealingWarmRestarts(T_0=2),
        ]

        exp = Experiment(
            num_epochs=10,
            fp16=fp16,
            device=device,
            seed=42,
        )

        exp.compile_experiment(
            model=model,
            metrics=metric_list,
            callbacks=callbacks,
            main_metric="accuracy",
            optimizer=optimizer,
            optimizer_params=optimizer_params,
            criterion=criterion,
        )
        exp.fit_loader(train_dl=loader, valid_dl=loader)
        exp.plot_history(keys=["accuracy"], save_fig=False, plot_fig=False)
        outputs = []
        for op in exp.predict_on_loader(
            test_dl=test_dl, device=device, path_to_model=os.path.join(save_dir, file_name)
        ):
            outputs.extend(op)

        assert len(outputs) == test_samples
Example #3
0
                "mlp_params": {"projection_size": 256, "hidden_size": 1024},
            },
            "predictor": {
                "in_channels": 256,
                "projection_size": 256,
                "hidden_size": 1024,
            },
        },
        optimizer="AdamW",
        optimizer_params={"lr": 1e-4, "weight_decay": 1e-3},
        criterion=normalized_mse,
    )

    # Defining callbacks.
    callbacks = [
        cbs.ModelCheckpoint(save_dir="./", mode="min", monitor="train_loss"),
        init_target_network,
        cbs.CosineAnnealingWarmRestarts(T_0=2),
    ]

    # Compiling and running the experiment.
    byol_exp = BYOLExperiment(
        num_epochs=25,
        seed=42,
        device="cuda",
        fp16=True,
        momentum=4e-3,
        augmentation_fn=default_augmentation,
        image_size=(96, 96),
    )
    )

    valid_dl = (
        TextDataset.from_df(df=valid_df, tokenizer=tokenizer, max_len=128, input_columns=["text"])
        .targets_from_df(target_columns=["label"])
        .batch(batch_size=16, shuffle=True)
    )

    # Defining metrics
    metric_list = [torchmetrics.Accuracy(threshold=0.6)]

    # Defining Callbacks.
    callbacks = [
        cbs.EarlyStopping(monitor="val_accuracy", patience=2, mode="max"),
        cbs.ModelCheckpoint(
            monitor="val_accuracy", mode="max", save_dir="./", file_name="model.bin"
        ),
        cbs.ReduceLROnPlateau(mode="max", patience=2),
    ]

    config = ModelConfig(
        nn_module=Model,
        module_params={"dropout": 0.3, "out_features": 1},
        optimizer="AdamW",
        optimizer_params={"lr": 3e-4},
        criterion=crit.BCEWithLogitsFlat,
    )

    # Compiling and Running Experiment.
    exp = Experiment(
        num_epochs=3,
Example #5
0
        random_state=123,
        test_size=0.20)

    train_images = train_images.reshape(train_images.shape[0], 1, 28, 28)
    val_images = val_images.reshape(val_images.shape[0], 1, 28, 28)

    train_images = train_images / 255.0
    val_images = val_images / 255.0

    # Defining Metrics
    metric_list = [Accuracy(num_classes=classes, average="micro")]

    # Defining Callbacks
    callbacks = [
        cbs.EarlyStopping(monitor="val_accuracy", mode="max", patience=5),
        cbs.ModelCheckpoint(monitor="val_accuracy", mode="max"),
        cbs.ReduceLROnPlateau(mode="max", patience=2),
        cbs.DiscordNotifierCallback(
            exp_name="MNIST-EXP",
            webhook_url=os.environ.get("DISCORD_WEBHOOK")),
    ]

    # Defining ModelConfig for Experiment
    config = ModelConfig(
        nn_module=Net,  # The uninstantiated model_class for the neural network.
        module_params={"out_features": 10},
        optimizer="Adam",
        optimizer_params={"lr": 3e-4},
        criterion="cross_entropy",
    )
Example #6
0

if __name__ == "__main__":
    # Defining ModelConfig for experiment
    exp_config = ModelConfig(
        nn_module={"discriminator": Discriminator, "generator": Generator},
        module_params={
            "discriminator": {"output_dim": 1},
            "generator": {"latent_dim": 16},
        },
        optimizer={"discriminator": "Adam", "generator": "Adam"},
        optimizer_params={"discriminator": dict(lr=1e-3), "generator": dict(lr=2e-4)},
        criterion="binary_cross_entropy",
    )
    # Some callbacks
    callbacks = [cbs.ModelCheckpoint(mode="min", monitor="train_loss_g", save_dir="./")]

    transform = tv.transforms.Compose(
        [
            tv.transforms.Grayscale(num_output_channels=1),
            tv.transforms.ToTensor(),
            tv.transforms.Normalize((0.5,), (0.5,)),
        ]
    )
    # Creating dataloaders
    dataset = ImageFolder(root=os.path.join("mnist_png", "training"), transform=transform)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=16)

    # Compiling and Running the experiment.
    exp = DCGANExperiment(
        latent_dim=16,