Exemplo n.º 1
0
def main():
    experiment = Experiment()

    train_ds = MNIST("../tests/data/",
                     download=True,
                     train=True,
                     transform=transforms.ToTensor())
    test_ds = MNIST("../tests/data/",
                    download=True,
                    train=False,
                    transform=transforms.ToTensor())
    train_loader = DataLoader(train_ds, batch_size=64, shuffle=True)
    test_loader = DataLoader(test_ds, batch_size=64, shuffle=False)

    model = models.MNIST()
    optimizer = optim.Adam(model.parameters())
    runner = tasks.ClassificationRunner(
        model,
        optimizer,
        nn.CrossEntropyLoss(),
        experiment,
        scheduler=[
            optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
        ],
        early_stop=EarlyStopping("train_avg_loss", min_delta=0.1, patience=1))
    runner.add_loader("train", train_loader).add_loader("test", test_loader)\
        .train_config(
        epochs=5,
        checkpoint_path="./checkpoints",
        monitor="validate_avg_acc >= 0.75"
    )

    runner.run(verbose=True)
Exemplo n.º 2
0
def test_classification_3():
    runner = tasks.ClassificationRunner(model, optimizer,
                                        nn.CrossEntropyLoss(),
                                        TensorBoardLogger())
    try:
        runner.fit(x.astype(np.float32), y.astype(np.int64))
        is_pass = True

    except Exception:
        is_pass = False

    assert is_pass is True
Exemplo n.º 3
0
def test_classification_2():
    runner = tasks.ClassificationRunner(model, optimizer,
                                        nn.CrossEntropyLoss(),
                                        TensorBoardLogger())
    runner.train_config(epochs=1)

    try:
        runner.run(verbose=False)
        is_pass = True

    except Exception:
        is_pass = False

    assert is_pass is False
Exemplo n.º 4
0
def test_classification_1():
    runner = tasks.ClassificationRunner(model, optimizer,
                                        nn.CrossEntropyLoss(),
                                        TensorBoardLogger("./logs"))
    runner.add_loader("train",
                      train_loader).add_loader("val", val_loader).add_loader(
                          "test", test_loader)
    runner.train_config(epochs=1)

    try:
        runner.run(verbose=True)
        is_pass = True
    except Exception:
        is_pass = False

    assert is_pass is True
def test_classification_3():
    runner = tasks.ClassificationRunner(
        model,
        optimizer,
        nn.CrossEntropyLoss(),
        OfflineExperiment(offline_directory="../tmp")
    )
    try:
        runner.fit(x.astype(np.float32), y.astype(np.int64), verbose=False, epochs=1, batch_size=32)
        is_pass = True

    except Exception as e:
        print(e)
        is_pass = False

    assert is_pass is True
Exemplo n.º 6
0
def main(cfg):
    shapes = cfg.model.shapes
    opt_params = cfg.optimizer.params

    experiment = Experiment(log_code=False)
    experiment.set_code(filename=hydra.utils.to_absolute_path(__file__))
    experiment.add_tag("with_hydra")
    experiment.log_parameters({"hydra-cfg": [cfg]})
    model = layers.MLP(shapes)
    optimizer = optim.Adam(model.parameters(), **opt_params)
    runner = tasks.ClassificationRunner(
        model,
        optimizer=optimizer,
        criterion=nn.CrossEntropyLoss(),
        experiment=experiment
    )
    runner.fit(x, y, epochs=10, checkpoint_path="./checkpoints")
    runner.save()
def test_classification_1():
    runner = tasks.ClassificationRunner(
        model,
        optimizer,
        nn.CrossEntropyLoss(),
        OfflineExperiment(offline_directory="./logs", display_summary_level=0)
    )
    runner.add_loader("train", train_loader).add_loader("val", val_loader).add_loader("test", test_loader)
    runner.train_config(epochs=1)

    try:
        runner.run(verbose=True)
        is_pass = True
    except Exception as e:
        print(e)
        is_pass = False

    assert is_pass is True
Exemplo n.º 8
0
def test_classification_1():
    runner = tasks.ClassificationRunner(
        model,
        optimizer,
        nn.CrossEntropyLoss(),
        TensorBoardLogger(),
        scheduler=[optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)])
    runner.add_loader("train",
                      train_loader).add_loader("val", val_loader).add_loader(
                          "test", test_loader)
    runner.train_config(epochs=1)

    try:
        runner.run(verbose=True)
        is_pass = True
    except Exception as e:
        is_pass = False
        print(e)

    assert is_pass is True
def test_classification_2():
    runner = tasks.ClassificationRunner(
        model,
        optimizer,
        nn.CrossEntropyLoss(),
        OfflineExperiment(offline_directory="./logs", display_summary_level=0)
    )
    runner.train_config(epochs=1)

    try:
        runner.run(verbose=False)
        is_pass = True

    except ValueError:
        is_pass = False

    except Exception as e:
        print(e)
        is_pass = True

    assert is_pass is False
def test_classification_4():
    runner = tasks.ClassificationRunner(
        model,
        optimizer,
        nn.CrossEntropyLoss(),
        OfflineExperiment(offline_directory="./logs", display_summary_level=0)
    )
    runner.add_loader("train", train_loader).add_loader("val", val_loader).add_loader("test", test_loader)

    try:
        runner.run(verbose=False)
        runner.train_config(epochs=1, checkpoint_path="../tmp/checkpoints", monitor="train_avg_acc >= 0.6")
        is_pass = True

    except TypeError:
        is_pass = False

    except Exception as e:
        print(e)
        is_pass = True

    assert is_pass is False
Exemplo n.º 11
0
from enchanter import addons
from enchanter import tasks
from enchanter.addons import layers
from enchanter.engine.modules import fix_seed, get_dataset

fix_seed(0)

experiment = Experiment()
model = layers.MLP([4, 512, 128, 3], addons.mish)
optimizer = optim.Adam(model.parameters())
runner = tasks.ClassificationRunner(model,
                                    optimizer=optimizer,
                                    criterion=nn.CrossEntropyLoss(),
                                    experiment=experiment,
                                    scheduler=[
                                        CosineAnnealingLR(optimizer,
                                                          T_max=10,
                                                          eta_min=1e-10),
                                        ExponentialLR(optimizer, gamma=0.9),
                                    ])

x, y = load_iris(return_X_y=True)
x = x.astype("float32")
y = y.astype("int64")

x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=0)
x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                  y_train,
                                                  random_state=0)

train_ds = get_dataset(x_train, y_train)
Exemplo n.º 12
0
import torch.optim as optim
from sklearn.datasets import load_iris
import enchanter.tasks as tasks
import enchanter.addons as addons       # pylint: disable=W0611
import enchanter.addons.layers as layers
from enchanter.utils import comet

config = comet.TunerConfigGenerator(
    algorithm="bayes",
    metric="train_avg_loss",
    objective="minimize",
    seed=0,
    trials=5
)

config.suggest_categorical("activation", ["addons.mish", "torch.relu", "torch.sigmoid"])

opt = Optimizer(config.generate())

for experiment in opt.get_experiments():
    model = layers.MLP([4, 512, 128, 3], eval(experiment.get_parameter("activation")))
    optimizer = optim.Adam(model.parameters())
    runner = tasks.ClassificationRunner(
        model, optimizer=optimizer, criterion=nn.CrossEntropyLoss(), experiment=experiment
    )
    x, y = load_iris(return_X_y=True)
    x = x.astype("float32")
    y = y.astype("int64")

    runner.fit(x, y, epochs=1, batch_size=32)
Exemplo n.º 13
0
x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                  y_train,
                                                  random_state=0)

batch_size = 32

train_loader = \
    tf.data.Dataset.from_tensor_slices(
        (x_train, y_train)
    ).shuffle(batch_size).batch(batch_size)

val_loader = \
    tf.data.Dataset.from_tensor_slices(
        (x_val, y_val)
    ).shuffle(batch_size).batch(batch_size)
test_loader = \
    tf.data.Dataset.from_tensor_slices(
        (x_test, y_test)
    ).shuffle(batch_size).batch(batch_size)

model = addons.layers.MLP([4, 16, 3], activation=addons.Mish())
optimizer = optim.Adam(model.parameters())

runner = tasks.ClassificationRunner(model, optimizer, nn.CrossEntropyLoss(),
                                    Experiment())
runner.add_loader("train", train_loader).add_loader("val",
                                                    val_loader).add_loader(
                                                        "test", test_loader)
runner.train_config(epochs=10)
runner.run()
Exemplo n.º 14
0
optimizer = optim.Adam(model.parameters())

x, y = load_iris(return_X_y=True)
x = x.astype("float32")
y = y.astype("int64")

train_ds = modules.get_dataset(x, y)
val_ds = modules.get_dataset(x, y)
test_ds = modules.get_dataset(x, y)

train_loader = DataLoader(train_ds, batch_size=32)
val_loader = DataLoader(val_ds, batch_size=32)
test_loader = DataLoader(test_ds, batch_size=32)


with tasks.ClassificationRunner(model, optimizer, nn.CrossEntropyLoss(), experiment) as runner:
    for epoch in tqdm(range(10)):
        with runner.experiment.train():
            for train_batch in train_loader:
                runner.optimizer.zero_grad()
                train_out = runner.train_step(train_batch)
                runner.backward(train_out["loss"])
                runner.update_optimizer()

                with runner.experiment.validate(), torch.no_grad():
                    for val_batch in val_loader:
                        val_out = runner.val_step(val_batch)["loss"]
                        runner.experiment.log_metric("val_loss", val_out)

        with runner.experiment.test(), torch.no_grad():
            for test_batch in test_loader:
Exemplo n.º 15
0
from sklearn.model_selection import train_test_split

from enchanter import addons
from enchanter import tasks
from enchanter.addons import layers
from enchanter.engine.modules import fix_seed, get_dataset
from enchanter.callbacks import EarlyStopping

fix_seed(0)

experiment = Experiment()
model = layers.MLP([4, 512, 128, 3], addons.mish)
optimizer = optim.Adam(model.parameters())
runner = tasks.ClassificationRunner(
    model,
    optimizer=optimizer,
    criterion=nn.CrossEntropyLoss(),
    experiment=experiment,
    callbacks=[EarlyStopping(monitor="val_avg_acc", patience=5, mode="max")])

x, y = load_iris(return_X_y=True)
x = x.astype("float32")
y = y.astype("int64")

x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=0)
x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                  y_train,
                                                  random_state=0)

train_ds = get_dataset(x_train, y_train)
val_ds = get_dataset(x_val, y_val)
test_ds = get_dataset(x_test, y_test)