예제 #1
0
def test_logger():
    logger = set_logger("Loglevel_DEBUG", "DEBUG")
    _record(logger)
    logger = set_logger("Loglevel_INFO", "INFO")
    _record(logger)
    logger = set_logger("Loglevel_WARNING", "WARNING")
    _record(logger)
    logger = set_logger("Loglevel_ERROR", "ERROR")
    _record(logger)
    logger = set_logger("Loglevel_CRITICAL", "CRITICAL")
    _record(logger)

    with pytest.raises(ValueError) as excinfo:
        set_logger("Loglevel_INVALID", "INVALID")
    assert ("INVALID" in str(excinfo.value))
import torch
import pytest
import numpy as np
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader

import torchensemble
from torchensemble.utils.logging import set_logger

parallel = [
    torchensemble.FusionClassifier,
    torchensemble.VotingClassifier,
    torchensemble.BaggingClassifier,
]

set_logger("pytest_training_params")


# Base estimator
class MLP(nn.Module):
    def __init__(self):
        super(MLP, self).__init__()
        self.linear1 = nn.Linear(2, 2)
        self.linear2 = nn.Linear(2, 2)

    def forward(self, X):
        X = X.view(X.size()[0], -1)
        output = self.linear1(X)
        output = self.linear2(output)
        return output
import torch
import pytest
import numpy as np
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader

import torchensemble
from torchensemble.utils.logging import set_logger

set_logger("pytest_adversarial_training")

X_train = torch.Tensor(np.array(([1, 1], [2, 2], [3, 3], [4, 4])))
y_train_clf = torch.LongTensor(np.array(([0, 0, 1, 1])))


# Base estimator
class MLP_clf(nn.Module):
    def __init__(self):
        super(MLP_clf, self).__init__()
        self.linear1 = nn.Linear(2, 2)
        self.linear2 = nn.Linear(2, 2)

    def forward(self, X):
        X = X.view(X.size()[0], -1)
        output = self.linear1(X)
        output = self.linear2(output)
        return output


def test_adversarial_training_range():
    """
    # Hyper-parameters
    n_estimators = 10
    lr = 1e-3
    weight_decay = 5e-4
    epochs = 50

    # Utils
    batch_size = 512
    records = []
    torch.manual_seed(0)

    # Load data
    train_loader, test_loader = load_data(batch_size)
    print("Finish loading data...\n")

    logger = set_logger("regression_YearPredictionMSD_mlp")

    # FusionRegressor
    model = FusionRegressor(estimator=MLP,
                            n_estimators=n_estimators,
                            cuda=True)

    # Set the optimizer
    model.set_optimizer("Adam", lr=lr, weight_decay=weight_decay)

    tic = time.time()
    model.fit(train_loader, epochs=epochs)
    toc = time.time()
    training_time = toc - tic

    tic = time.time()
    train_loader = DataLoader(
        datasets.CIFAR10(
            data_dir, train=True, download=True, transform=train_transformer
        ),
        batch_size=batch_size,
        shuffle=True,
    )

    test_loader = DataLoader(
        datasets.CIFAR10(data_dir, train=False, transform=test_transformer),
        batch_size=batch_size,
        shuffle=True,
    )

    # Set the Logger
    logger = set_logger("snapshot_ensemble_cifar10_resnet18")

    # Choose the Ensemble Method
    model = SnapshotEnsembleClassifier(
        estimator=ResNet,
        estimator_args={"block": BasicBlock, "num_blocks": [2, 2, 2, 2]},
        n_estimators=n_estimators,
        cuda=True,
    )

    # Set the Optimizer
    model.set_optimizer(
        "SGD", lr=lr, weight_decay=weight_decay, momentum=momentum
    )

    # Train and Evaluate
all_reg = [
    torchensemble.FusionRegressor,
    torchensemble.VotingRegressor,
    torchensemble.BaggingRegressor,
    torchensemble.GradientBoostingRegressor,
    torchensemble.SnapshotEnsembleRegressor,
    torchensemble.AdversarialTrainingRegressor,
    torchensemble.FastGeometricRegressor,
    torchensemble.SoftGradientBoostingRegressor,
]


np.random.seed(0)
torch.manual_seed(0)
device = torch.device("cpu")
logger = set_logger("pytest_all_models_multiple_input")


# Base estimator
class MLP_clf(nn.Module):
    def __init__(self):
        super(MLP_clf, self).__init__()
        self.linear1 = nn.Linear(2, 2)
        self.linear2 = nn.Linear(2, 2)

    def forward(self, X_1, X_2):
        X_1 = X_1.view(X_1.size()[0], -1)
        X_2 = X_2.view(X_2.size()[0], -1)
        output_1 = self.linear1(X_1)
        output_1 = self.linear2(output_1)
        output_2 = self.linear1(X_2)
    # Hyper-parameters
    n_estimators = 10
    lr = 1e-3
    weight_decay = 5e-4
    epochs = 50

    # Utils
    batch_size = 512
    records = []
    torch.manual_seed(0)

    # Load data
    train_loader, test_loader = load_data(batch_size)
    print("Finish loading data...\n")

    logger = set_logger("regression_YearPredictionMSD_mlp", use_tb_logger=True)

    # FusionRegressor
    model = FusionRegressor(
        estimator=MLP, n_estimators=n_estimators, cuda=True
    )

    # Set the optimizer
    model.set_optimizer("Adam", lr=lr, weight_decay=weight_decay)

    tic = time.time()
    model.fit(train_loader, epochs=epochs)
    toc = time.time()
    training_time = toc - tic

    tic = time.time()
    train_loader = DataLoader(
        datasets.CIFAR10(
            data_dir, train=True, download=True, transform=train_transformer
        ),
        batch_size=batch_size,
        shuffle=True,
    )

    test_loader = DataLoader(
        datasets.CIFAR10(data_dir, train=False, transform=test_transformer),
        batch_size=batch_size,
        shuffle=True,
    )

    logger = set_logger("classification_cifar10_cnn", use_tb_logger=True)

    # FusionClassifier
    model = FusionClassifier(
        estimator=LeNet5, n_estimators=n_estimators, cuda=True
    )

    # Set the optimizer
    model.set_optimizer("Adam", lr=lr, weight_decay=weight_decay)

    # Training
    tic = time.time()
    model.fit(train_loader, epochs=epochs)
    toc = time.time()
    training_time = toc - tic
예제 #9
0
import torch
import pytest
import numpy as np
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader

from torchensemble import FastGeometricClassifier as clf
from torchensemble import FastGeometricRegressor as reg
from torchensemble.utils.logging import set_logger

set_logger("pytest_fast_geometric")

# Testing data
X_test = torch.Tensor(np.array(([0.5, 0.5], [0.6, 0.6])))

y_test_clf = torch.LongTensor(np.array(([1, 0])))
y_test_reg = torch.FloatTensor(np.array(([0.5, 0.6])))
y_test_reg = y_test_reg.view(-1, 1)


# Base estimator
class MLP_clf(nn.Module):
    def __init__(self):
        super(MLP_clf, self).__init__()
        self.linear1 = nn.Linear(2, 2)
        self.linear2 = nn.Linear(2, 2)

    def forward(self, X):
        X = X.view(X.size()[0], -1)
        output = self.linear1(X)
        output = self.linear2(output)
예제 #10
0
# All regressors
all_reg = [
    torchensemble.FusionRegressor,
    torchensemble.VotingRegressor,
    torchensemble.BaggingRegressor,
    torchensemble.GradientBoostingRegressor,
    torchensemble.SnapshotEnsembleRegressor,
    torchensemble.AdversarialTrainingRegressor,
    torchensemble.FastGeometricRegressor,
]


np.random.seed(0)
torch.manual_seed(0)
set_logger("pytest_all_models", use_tb_logger=True)


# Base estimator
class MLP_clf(nn.Module):
    def __init__(self):
        super(MLP_clf, self).__init__()
        self.linear1 = nn.Linear(2, 2)
        self.linear2 = nn.Linear(2, 2)

    def forward(self, X):
        X = X.view(X.size()[0], -1)
        output = self.linear1(X)
        output = self.linear2(output)
        return output
예제 #11
0
        datasets.CIFAR10(data_dir,
                         train=True,
                         download=True,
                         transform=train_transformer),
        batch_size=batch_size,
        shuffle=True,
    )

    test_loader = DataLoader(
        datasets.CIFAR10(data_dir, train=False, transform=test_transformer),
        batch_size=batch_size,
        shuffle=True,
    )

    # Set the Logger
    logger = set_logger("snapshot_ensemble_cifar10_resnet18",
                        use_tb_logger=True)

    # Choose the Ensemble Method
    model = SnapshotEnsembleClassifier(
        estimator=ResNet,
        estimator_args={
            "block": BasicBlock,
            "num_blocks": [2, 2, 2, 2]
        },
        n_estimators=n_estimators,
        cuda=True,
    )

    # Set the Optimizer
    model.set_optimizer("SGD",
                        lr=lr,
예제 #12
0
    train_loader = DataLoader(
        datasets.CIFAR10(
            data_dir, train=True, download=True, transform=train_transformer
        ),
        batch_size=batch_size,
        shuffle=True,
    )

    test_loader = DataLoader(
        datasets.CIFAR10(data_dir, train=False, transform=test_transformer),
        batch_size=batch_size,
        shuffle=True,
    )

    # Set the Logger
    logger = set_logger("FastGeometricClassifier_cifar10_resnet")

    # Choose the Ensemble Method
    model = FastGeometricClassifier(
        estimator=ResNet,
        estimator_args={"block": BasicBlock, "num_blocks": [2, 2, 2, 2]},
        n_estimators=n_estimators,
        cuda=True,
    )

    # Set the Optimizer
    model.set_optimizer(
        "SGD", lr=lr, weight_decay=weight_decay, momentum=momentum
    )

    # Set the Scheduler
예제 #13
0
    test_loader = torch.utils.data.DataLoader(
        datasets.MNIST(
            data_dir,
            train=False,
            download=True,
            transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307, ), (0.3081, )),
            ]),
        ),
        batch_size=batch_size,
        shuffle=True,
    )

    logger = set_logger("classification_mnist_tree_ensemble",
                        use_tb_logger=False)

    model = NeuralForestClassifier(
        n_estimators=n_estimators,
        depth=depth,
        lamda=lamda,
        cuda=cuda,
        n_jobs=-1,
    )

    model.set_optimizer("Adam", lr=lr, weight_decay=weight_decay)

    tic = time.time()
    model.fit(train_loader, epochs=epochs, test_loader=test_loader)
    toc = time.time()
    training_time = toc - tic
    train_loader = DataLoader(
        datasets.CIFAR10(data_dir,
                         train=True,
                         download=True,
                         transform=train_transformer),
        batch_size=batch_size,
        shuffle=True,
    )

    test_loader = DataLoader(
        datasets.CIFAR10(data_dir, train=False, transform=test_transformer),
        batch_size=batch_size,
        shuffle=True,
    )

    logger = set_logger("classification_cifar10_cnn")

    # FusionClassifier
    model = FusionClassifier(estimator=LeNet5,
                             n_estimators=n_estimators,
                             cuda=True)

    # Set the optimizer
    model.set_optimizer("Adam", lr=lr, weight_decay=weight_decay)

    # Training
    tic = time.time()
    model.fit(train_loader, epochs=epochs)
    toc = time.time()
    training_time = toc - tic
예제 #15
0
        datasets.CIFAR10(data_dir,
                         train=True,
                         download=True,
                         transform=train_transformer),
        batch_size=batch_size,
        shuffle=True,
    )

    test_loader = DataLoader(
        datasets.CIFAR10(data_dir, train=False, transform=test_transformer),
        batch_size=batch_size,
        shuffle=True,
    )

    # Set the Logger
    logger = set_logger("FastGeometricClassifier_cifar10_resnet",
                        use_tb_logger=True)

    # Choose the Ensemble Method
    model = FastGeometricClassifier(
        estimator=ResNet,
        estimator_args={
            "block": BasicBlock,
            "num_blocks": [2, 2, 2, 2]
        },
        n_estimators=n_estimators,
        cuda=True,
    )

    # Set the Optimizer
    model.set_optimizer("SGD",
                        lr=lr,
]

all_reg = [
    torchensemble.FusionRegressor,
    torchensemble.VotingRegressor,
    torchensemble.BaggingRegressor,
    torchensemble.GradientBoostingRegressor,
    torchensemble.SnapshotEnsembleRegressor,
    torchensemble.AdversarialTrainingRegressor,
]

# Remove randomness
np.random.seed(0)
torch.manual_seed(0)

set_logger("pytest_all_models")


# Base estimator
class MLP_clf(nn.Module):
    def __init__(self):
        super(MLP_clf, self).__init__()
        self.linear1 = nn.Linear(2, 2)
        self.linear2 = nn.Linear(2, 2)

    def forward(self, X):
        X = X.view(X.size()[0], -1)
        output = self.linear1(X)
        output = self.linear2(output)
        return output
import torch
import numpy as np
from torch.utils.data import TensorDataset, DataLoader

from torchensemble.utils import io
from torchensemble.utils.logging import set_logger
from torchensemble import NeuralForestClassifier, NeuralForestRegressor

np.random.seed(0)
torch.manual_seed(0)
set_logger("pytest_neural_tree_ensemble")

# Training data
X_train = torch.Tensor(
    np.array(([0.1, 0.1], [0.2, 0.2], [0.3, 0.3], [0.4, 0.4])))

y_train_clf = torch.LongTensor(np.array(([0, 0, 1, 1])))
y_train_reg = torch.FloatTensor(np.array(([0.1, 0.2, 0.3, 0.4])))
y_train_reg = y_train_reg.view(-1, 1)

# Testing data
numpy_X_test = np.array(([0.5, 0.5], [0.6, 0.6]))
X_test = torch.Tensor(numpy_X_test)

y_test_clf = torch.LongTensor(np.array(([1, 0])))
y_test_reg = torch.FloatTensor(np.array(([0.5, 0.6])))
y_test_reg = y_test_reg.view(-1, 1)


def test_neural_forest_classifier():
    """