Exemplo n.º 1
0
    def fit(self, x: ndarray, y: ndarray, **kwargs):
        """
        Scikit-Learn style training method.

        Args:
            x: Training data
            y: Label
            **kwargs:

        """
        val_size: float = kwargs.get("val_size", 0.1)
        num_workers: int = kwargs.get("num_workers", os.cpu_count())
        batch_size: int = kwargs.get("batch_size", 1)
        pin_memory: bool = kwargs.get("pin_memory", False)
        verbose: bool = kwargs.get("verbose", True)
        checkpoint_path = kwargs.get("checkpoint_path", None)
        monitor = kwargs.get("monitor", None)

        if self.configures["epochs"] == 0:
            epochs = kwargs.get("epochs", 1)
        else:
            epochs = self.configures["epochs"]

        train_ds = get_dataset(x, y)
        val_ds = get_dataset(x, y)
        n_train = len(train_ds)
        indices = list(range(n_train))
        split = int(floor(val_size * n_train))

        train_idx, val_idx = indices[split:], indices[:split]
        train_sampler = SubsetRandomSampler(train_idx)
        val_sampler = SubsetRandomSampler(val_idx)

        train_loader = DataLoader(
            train_ds,
            batch_size=batch_size,
            sampler=train_sampler,
            num_workers=num_workers,
            pin_memory=pin_memory,
        )
        val_loader = DataLoader(
            val_ds,
            batch_size=batch_size,
            sampler=val_sampler,
            num_workers=num_workers,
            pin_memory=pin_memory,
        )

        self.add_loader("train", train_loader)
        self.add_loader("val", val_loader)
        self.train_config(epochs,
                          checkpoint_path=checkpoint_path,
                          monitor=monitor)
        self.run(verbose=verbose)

        return self
Exemplo n.º 2
0
def train_nn(
    dataset: str, batch_size: int, depth: int, epochs: int
) -> Tuple[CNN, Tuple[Union[np.ndarray, np.ndarray], Union[
        np.ndarray, np.ndarray]], Tuple[Union[np.ndarray, np.ndarray], Union[
            np.ndarray, np.ndarray]]]:
    experiment = Experiment(project_name="cphap", auto_output_logging=False)
    experiment.add_tag(dataset)
    experiment.add_tag("NN-depth-{}".format(depth))
    (x_train, y_train), (x_test, y_test) = fetch_dataset(dataset)
    scaler = TimeSeriesScalerMeanVariance()
    x_train: np.ndarray = scaler.fit_transform(x_train)
    x_test: np.ndarray = scaler.transform(x_test)

    x_train = x_train.transpose((0, 2, 1)).astype(np.float32)
    x_test = x_test.transpose((0, 2, 1)).astype(np.float32)

    n_features = x_train.shape[1]
    n_targets = len(np.unique(y_train))

    train_ds = get_dataset(x_train, y_train)
    test_ds = get_dataset(x_test, y_test)

    train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_ds, batch_size=batch_size, shuffle=False)

    model = CNN(n_features, 32, n_targets, depth=depth)
    optimizer = optim.Adam(model.parameters())
    criterion = nn.CrossEntropyLoss()

    runner = ClassificationRunner(model, optimizer, criterion, experiment)
    runner.add_loader("train", train_loader)
    runner.add_loader("test", test_loader)
    runner.train_config(epochs=epochs)
    runner.run()
    runner.quite()

    return runner.model.eval(), (x_train, x_test), (y_train, y_test)
Exemplo n.º 3
0
from torch.utils.data import DataLoader
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split

import enchanter.tasks as tasks
import enchanter.addons as addons
from enchanter.callbacks import TensorBoardLogger
from enchanter.engine.modules import get_dataset

x, y = load_iris(return_X_y=True)
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=0)
x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                  y_train,
                                                  random_state=0)

train_ds = get_dataset(x_train.astype(np.float32), y_train.astype(np.int64))
val_ds = get_dataset(x_val.astype(np.float32), y_val.astype(np.int64))
test_ds = get_dataset(x_test.astype(np.float32), y_test.astype(np.int64))

train_loader = DataLoader(train_ds, batch_size=32, shuffle=True)
val_loader = DataLoader(val_ds, batch_size=32, shuffle=False)
test_loader = DataLoader(test_ds, batch_size=32, shuffle=False)

model = addons.layers.MLP([4, 16, 3], activation=addons.Mish())
optimizer = optim.Adam(model.parameters())


def test_classification_1():
    runner = tasks.ClassificationRunner(
        model,
        optimizer,
Exemplo n.º 4
0
                                        CosineAnnealingLR(optimizer,
                                                          T_max=10,
                                                          eta_min=1e-10),
                                        ExponentialLR(optimizer, gamma=0.9),
                                    ])

x, y = load_iris(return_X_y=True)
x = x.astype("float32")
y = y.astype("int64")

x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=0)
x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                  y_train,
                                                  random_state=0)

train_ds = get_dataset(x_train, y_train)
val_ds = get_dataset(x_val, y_val)
test_ds = get_dataset(x_test, y_test)

train_loader = DataLoader(train_ds, batch_size=8)
val_loader = DataLoader(val_ds, batch_size=8)
test_loader = DataLoader(test_ds, batch_size=8)

runner.add_loader("train", train_loader)
runner.add_loader("val", val_loader)
runner.add_loader("test", test_loader)

runner.train_config(epochs=50)

runner.run()
Exemplo n.º 5
0
import enchanter.tasks as tasks
import enchanter.engine.modules as modules
import enchanter.addons as addons
import enchanter.addons.layers as layers


experiment = Experiment()
model = layers.MLP([4, 512, 128, 3], addons.mish)
optimizer = optim.Adam(model.parameters())

x, y = load_iris(return_X_y=True)
x = x.astype("float32")
y = y.astype("int64")

train_ds = modules.get_dataset(x, y)
val_ds = modules.get_dataset(x, y)
test_ds = modules.get_dataset(x, y)

train_loader = DataLoader(train_ds, batch_size=32)
val_loader = DataLoader(val_ds, batch_size=32)
test_loader = DataLoader(test_ds, batch_size=32)


with tasks.ClassificationRunner(model, optimizer, nn.CrossEntropyLoss(), experiment) as runner:
    for epoch in tqdm(range(10)):
        with runner.experiment.train():
            for train_batch in train_loader:
                runner.optimizer.zero_grad()
                train_out = runner.train_step(train_batch)
                runner.backward(train_out["loss"])
Exemplo n.º 6
0
def test_get_dataset_1():
    x = torch.randn(32, 128)
    y = torch.randint(0, 9, (32, ))

    ds = modules.get_dataset(x, y)
    assert isinstance(ds, Dataset)
Exemplo n.º 7
0
def test_is_tfds_1():
    x = torch.randn(32, 128)
    y = torch.randint(0, 9, (32, ))
    ds = modules.get_dataset(x, y)
    assert modules.is_tfds(ds) is False
Exemplo n.º 8
0
def test_get_dataset_2():
    x = torch.randn(32, 128)

    ds = modules.get_dataset(x)
    assert isinstance(ds, Dataset)