Ejemplo n.º 1
0
    def fit(self, X, y=None, **fit_params):
        """Fit the estimator to data.

        This derives the number of object features from the data and then
        delegates to ``skorch.NeuralNet.fit``. See the documentation of that
        method for more details.

        Parameters
        ----------
        X : input data
            May take various forms, such as numpy arrays or torch datasets. See
            the documentation of ``skorch.NeuralNet.fit`` for more details.

        y : target data
            May take the same forms as ``x``. This is optional since the target
            data may already be included in the data structure that is passed
            as ``X``. See the documentation of ``skorch.NeuralNet.fit`` for
            more details.

        **fit_params : dict
            Additional fit parameters. See the documentation of
            ``skorch.NeuralNet.fit`` for more details.
        """
        dataset = self.get_dataset(X, y)
        (_n_objects, self.n_features_) = dataset[0][0].shape
        NeuralNet.fit(self, X=dataset, y=None, **fit_params)
Ejemplo n.º 2
0
def test_loss_lowers_on_each_epoch():
    torch.manual_seed(SEED)
    num_classes = 5
    num_features = 5
    size = 200
    y = torch.randint(0, num_classes, (size, 1), dtype=torch.long)
    X = torch.rand((size, num_features))

    predictor = nn.Sequential(nn.Linear(num_features, num_features), nn.ReLU(),
                              nn.Linear(num_features, 1))

    skorch_model = NeuralNet(
        module=OrdinalLogisticModel,
        module__predictor=predictor,
        module__num_classes=num_classes,
        criterion=CumulativeLinkLoss,
        max_epochs=10,
        optimizer=torch.optim.Adam,
        lr=0.01,
        train_split=None,
        callbacks=[
            ('ascension', AscensionCallback()),
        ],
    )

    skorch_model.fit(X, y)
    losses = [epoch['train_loss'] for epoch in skorch_model.history]
    for idx, loss in enumerate(losses[:-1]):
        # Next epoch's loss is less than this epoch's loss.
        assert losses[idx + 1] < loss, 'Loss lowers on each epoch'
Ejemplo n.º 3
0
def train(data_folder: str, out_model: str):
    out_model = Path(out_model)
    out_model.mkdir()

    data_paths = list(Path(data_folder).rglob("*.npy"))
    train_paths, valid_paths = train_test_split(data_paths, train_size=0.7)

    train_dataset = LibriSpeechDataset(
        train_paths,
        Path(data_folder).parent / "SPEAKERS.TXT",
        Compose([ExtractStft(),
                 RandomCrop(constants.STFT_CROP_WIDTH)]))

    valid_dataset = LibriSpeechDataset(
        valid_paths,
        Path(data_folder).parent / "SPEAKERS.TXT",
        Compose([ExtractStft(),
                 RandomCrop(constants.STFT_CROP_WIDTH)]))

    net = NeuralNet(Classifier,
                    module__n_classes=constants.NUMBER_OF_CLASSES,
                    criterion=nn.CrossEntropyLoss,
                    batch_size=8,
                    max_epochs=100,
                    optimizer=optim.Adam,
                    lr=0.001,
                    iterator_train__shuffle=True,
                    iterator_train__num_workers=2,
                    iterator_valid__shuffle=False,
                    iterator_valid__num_workers=2,
                    train_split=predefined_split(valid_dataset),
                    device="cuda",
                    callbacks=[
                        Checkpoint(
                            f_params=(out_model / "params.pt").as_posix(),
                            f_optimizer=(out_model / "optim.pt").as_posix(),
                            f_history=(out_model / "history.pt").as_posix()),
                        ProgressBar(postfix_keys=["train_loss", "train_acc"]),
                        EarlyStopping(),
                        EpochScoring(acc,
                                     name="val_acc",
                                     lower_is_better=False,
                                     on_train=False),
                        EpochScoring(acc,
                                     name="train_acc",
                                     lower_is_better=False,
                                     on_train=True),
                        Tensorboard((out_model / "train").as_posix(),
                                    metrics={"acc": acc_as_metric},
                                    is_training=True),
                        Tensorboard((out_model / "valid").as_posix(),
                                    metrics={"acc": acc_as_metric},
                                    is_training=False),
                    ])

    net.fit(train_dataset)
Ejemplo n.º 4
0
    def __init__(
        self,
        weight_decay=1e-4,
        lr=0.01,
        seed=42,
        device="cpu",
        tuning=False,
        momentum=0.9,
        opt="sgd",
    ):
        self.seed = seed
        self.tuning = tuning
        self.weight_decay = weight_decay
        self.lr = lr
        self.device = device
        self.momentum = momentum
        self.opt = opt

        if opt == "adagrad":
            kwargs = dict(
                optimizer=optim.Adagrad,
                optimizer__weight_decay=self.weight_decay,
                optimizer__lr=lr,
            )
        elif opt == "asgd":
            kwargs = dict(
                optimizer=optim.ASGD,
                optimizer__weight_decay=self.weight_decay,
                optimizer__lr=lr,
                optimizer__t0=1e3,
            )
        else:
            kwargs = dict(
                optimizer=optim.SGD,
                optimizer__weight_decay=self.weight_decay,
                optimizer__lr=lr,
                optimizer__momentum=self.momentum,
                optimizer__nesterov=True,
            )
        self.model = NeuralNet(
            module=Linear,
            lr=lr,
            criterion=nn.CrossEntropyLoss,
            warm_start=True,
            max_epochs=1,
            batch_size=-1,
            train_split=None,
            device=device,
            **kwargs,
        )
        super().__init__()
Ejemplo n.º 5
0
    def __init__(self, data: PreprocessedData) -> None:
        embedding_dim = data.X_train[0].shape[-1]
        num_classes = data.y_train.shape[-1]

        multi_class = num_classes > 1
        multi_label = np.sum(data.y_train, axis=-1).max() > 1

        metric = f1_micro_score if multi_class or multi_label else metrics.accuracy_score
        criterion = nn.BCEWithLogitsLoss if multi_label else nn.CrossEntropyLoss
        predict_nonlinearity = (lambda x: (torch.sigmoid(x) >= 0.5).float()
                                if multi_label else "auto")  # noqa: E731
        estimator = NeuralNet(
            module=MLP,
            criterion=criterion,
            optimizer=torch.optim.Adam,
            max_epochs=10,
            train_split=None,
            predict_nonlinearity=predict_nonlinearity,
            module__embedding_dim=embedding_dim,
            module__num_classes=num_classes,
        )

        super().__init__(
            estimator=estimator,
            data=data,
            metric=metric,
            param_grid=MLPClassifier.param_grid,
        )
Ejemplo n.º 6
0
    def hyperparameter_tunning(self):
        net = NeuralNet(network.SiameseNetV2,
                        max_epochs=2,
                        batch_size=128,
                        criterion=BCELoss,
                        optimizer=Adam,
                        iterator_train__num_workers=4,
                        iterator_train__pin_memory=False,
                        iterator_valid__num_workers=4,
                        verbose=2,
                        device='cuda',
                        iterator_train__shuffle=True,
                        callbacks=[PrintLog(), ProgressBar()])

        net.set_params(train_split=False)
        params = {'lr': [0.01, 0.001], 'module__num_dims': [128, 256]}
        gs = GridSearchCV(net, params, refit=False, cv=3, scoring='f1')
        X_sl = SliceDataset(self.train_set, idx=0)
        Y_sl = SliceDataset(self.train_set, idx=1)
        gs.fit(X_sl, Y_sl)
Ejemplo n.º 7
0
def calorie_model(val_ds):

    lrscheduler = LRScheduler(policy='StepLR', step_size=7, gamma=0.1)
    checkpoint = Checkpoint(f_params='models/calorie_net.pt',
                            monitor='valid_acc_best')

    return NeuralNet(CalorieNet,
                     criterion=nn.MSELoss(),
                     lr=0.001,
                     batch_size=64,
                     max_epochs=25,
                     optimizer=optim.SGD,
                     optimizer__momentum=0.9,
                     iterator_train__shuffle=True,
                     iterator_train__num_workers=4,
                     iterator_valid__shuffle=True,
                     iterator_valid__num_workers=4,
                     train_split=predefined_split(val_ds),
                     callbacks=[lrscheduler, checkpoint],
                     device='cuda')
Ejemplo n.º 8
0
from torch.nn import MSELoss
from torch.optim import SGD
import numpy as np

import sys
sys.path.append('..')
from helpers import load_data_in_chunks, save_model
from model import Net
from RelativeEntropyLoss import RelativeEntropyLoss

(Xs, Ys) = load_data_in_chunks('survival', 'train', chunk_size=5)
Xs = Xs.astype(np.float32)
Ys = Ys.astype(np.float32)

regr = NeuralNet(Net,
                 max_epochs=10000000000,
                 batch_size=100,
                 iterator_train__shuffle=True,
                 criterion=RelativeEntropyLoss,
                 optimizer=SGD,
                 optimizer__lr=1e-5,
                 optimizer__momentum=0.9,
                 optimizer__nesterov=True,
                 optimizer__dampening=0,
                 verbose=5,
                 callbacks=[('early_stop', EarlyStopping())])

regr.fit(Xs, Ys)

save_model(regr, 'conv-survival')
Ejemplo n.º 9
0
def acc(net: NeuralNet, ds: Dataset, y: torch.Tensor) -> float:
    predict_values = net.predict(ds)
    return acc_as_metric(predict_values, y)
Ejemplo n.º 10
0
 def get_loss(self, y_pred, y_true, *args, **kwargs):
     if not isinstance(self.criterion_, torch.nn.NLLLoss):
         raise RuntimeError('Use NLLLoss')
     return NeuralNet.get_loss(self, y_pred, y_true, *args, **kwargs)
Ejemplo n.º 11
0
lbl17 = OR.getY()
x17 = OR.getAbstract()
x17_t = OR.getTitle()

x = np.concatenate((x18_t, x17_t))
lbl = np.concatenate((lbl18, lbl17))
x_encoded = ohe().fit_transform(x)  #encoded with wntire dataset.


class torch_cv(torch.utils.data.Dataset):
    def __init__(self, x_, lbl_, dtype = [torch.LongTensor, torch.LongTensor]):

        x_ = util.padding(x_, 256)
        
        feature = Variable(dtype[0](x_))
        label = Variable(dtype[1](lbl_))
        
        self.X = feature
        self.Y = label

    def __getitem__(self, idx):
        return self.X[idx], self.Y[idx]

    def __len__(self):
        return len(self.X)

train_ds = torch_cv(x_encoded, lbl)
nn = models.MLP(100, 2)
net = NeuralNet(nn, criterion=nn.loss_function())
net.fit(x_encoded, lbl)
net = NeuralNet(
    # Module
    module=BaselineNN,
    # Module settings
    module__hidden_dim=best.hidden_units,
    module__p_dropout=best.dropout,
    module__use_batch_norm=best.use_batch_norm,
    module__weights=FTEMB,
    module__num_classes=len(category_map),
    # Epochs & learning rate
    max_epochs=best.iterations,
    lr=best.learning_rate,
    # Optimizer
    optimizer=best.optimizer,
    # Loss function
    criterion=nn.CrossEntropyLoss,
    criterion__weight=cw,
    # Shuffle training data on each epoch
    iterator_train__shuffle=True,
    # Batch size
    batch_size=best.batch_size,
    train_split=CVSplit(cv=5),
    # Device
    device=device,
    # Callbacks
    callbacks=[
        skorch.callbacks.EpochScoring(f1_score,
                                      use_caching=True,
                                      name="valid_f1"),
        skorch.callbacks.EpochScoring(precision_score,
                                      use_caching=True,
                                      name="valid_precision"),
        skorch.callbacks.EpochScoring(recall_score,
                                      use_caching=True,
                                      name="valid_recall"),
        skorch.callbacks.EpochScoring(accuracy_score,
                                      use_caching=True,
                                      name="valid_accuracy")
    ])
Ejemplo n.º 13
0
class CovClassifier(BaseEstimator):
    def __init__(
        self,
        weight_decay=1e-4,
        lr=0.01,
        seed=42,
        device="cpu",
        tuning=False,
        momentum=0.9,
        opt="sgd",
    ):
        self.seed = seed
        self.tuning = tuning
        self.weight_decay = weight_decay
        self.lr = lr
        self.device = device
        self.momentum = momentum
        self.opt = opt

        if opt == "adagrad":
            kwargs = dict(
                optimizer=optim.Adagrad,
                optimizer__weight_decay=self.weight_decay,
                optimizer__lr=lr,
            )
        elif opt == "asgd":
            kwargs = dict(
                optimizer=optim.ASGD,
                optimizer__weight_decay=self.weight_decay,
                optimizer__lr=lr,
                optimizer__t0=1e3,
            )
        else:
            kwargs = dict(
                optimizer=optim.SGD,
                optimizer__weight_decay=self.weight_decay,
                optimizer__lr=lr,
                optimizer__momentum=self.momentum,
                optimizer__nesterov=True,
            )
        self.model = NeuralNet(
            module=Linear,
            lr=lr,
            criterion=nn.CrossEntropyLoss,
            warm_start=True,
            max_epochs=1,
            batch_size=-1,
            train_split=None,
            device=device,
            **kwargs,
        )
        super().__init__()

    def _set_seed(self):
        seed = self.seed
        assert seed is not None, "Specify seed, don't leave seed=None"
        s = str(seed) * 10
        sha = sha256(bytes(s, "ascii"))
        randint = int("0x" + sha.hexdigest(), 0)
        capped = randint % (2**32 - 1)

        torch.manual_seed(capped)
        random.seed(capped)
        return np.random.RandomState(capped)

    def initialize(self):
        self.rng_ = self._set_seed()
        if hasattr(self.model, "initialized_") and self.model.initialized_:
            raise ValueError("Reinitializing!")
        self.model.initialize()
        #         self.model_ = Net()
        #         self.optimizer_ = optim.AdaGrad(weight_decay=self.weight_decay)
        assert self.model.initialized_ == True
        self.initialized_ = True

        self.history_ = []
        self.models_ = []
        self.meta_ = {
            "model_updates": 0,
            "num_examples": 0,
            "len_dataset": int(200e3),
            **self.get_params(),
        }
        # [1]:https://www.kaggle.com/c/forest-cover-type-prediction/data
        if self.tuning:
            self.meta_["len_dataset"] *= 0.8
        return True
Ejemplo n.º 14
0
from assignment_two.src.models.neural_network import GenericNN
from assignment_two.src.utils.data_loaders import FrankeDataSet

franke_data_train = FrankeDataSet(num_points=2000)
train = DataLoader(franke_data_train)
X, y = franke_data_train.xy, franke_data_train.z_noise
print(X.shape, y.shape)
net = NeuralNet(
    GenericNN,
    module__num_input_features=2,
    module__num_output_features=1,
    module__num_hidden_layers=3,
    module__num_hidden_features=20,
    module__activation=torch.nn.functional.relu,
    criterion=torch.nn.MSELoss,
    optimizer=torch.optim.SGD,
    max_epochs=100,
    optimizer__nesterov=True,
    optimizer__momentum=0.9,
    # batch_size=20,
    train_split=CVSplit(cv=5),
)

params = {
    'lr': np.logspace(-5, 0, 20, endpoint=False),
}

reg = GridSearchCV(estimator=net, param_grid=params, scoring='neg_mean_squared_error', n_jobs=12)
reg.fit(X, y)
Ejemplo n.º 15
0
def run():
    parser = get_arg_parser()
    cmd_args = parser.parse_args()

    if cmd_args.gpu is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = str(cmd_args.gpu)
        gpunum = os.getenv('CUDA_VISIBLE_DEVICES')
        logging.info("GPU has been set to {}".format(gpunum))

    logging.info("Model used for the regression network: {}"
                 .format(cmd_args.model_name))

    # 1. Dataset retrieval
    # --------------------

    tab_printer(constants.Dataset)
    dataset = Dataset(nrows=constants.Dataset.nrows,
                      augment_labels=constants.Dataset.augment_labels,
                      top_n=constants.Dataset.top_n)

    logging.info("Going to create vocabulary and fit a preprocessing pipeline"
                 "using {} samples. Settings will be listed below"
                 .format(len(dataset.X_train)))

    # 2. Preprocessing
    # -----------------

    tab_printer(constants.NLP)
    preprocessor = Preprocessing(dataset.X_train)

    # Preprocess documents
    X_train = preprocessor.transform_documents(dataset.X_train)
    X_test = preprocessor.transform_documents(dataset.X_test)

    # 3. Word embeddings with word2vec
    # --------------------------------

    # Train word2vec embeddings if train_word2vec option is selected
    if cmd_args.train_word2vec: utils.embeddings.main()
    weights = get_embedding_tensor(preprocessor)

    # 4. Node embeddings with AttentionWalk
    # -------------------------------------
    args = _generate_deepwalk_parameters(dataset.y_train_graph)
    if cmd_args.train_attentionwalk: train_attention_walk(args)

    graph_embeddings = pd.read_csv(args.embedding_path).iloc[:, 1:].values

    # Get document representations using node embeddings
    y_embedded = _get_label_embeddings(dataset.y_train, graph_embeddings)
    y_test_embedded = _get_label_embeddings(dataset.y_test, graph_embeddings)

    # 5. Regressor Training
    # ---------------------

    device = 'cuda:' + str(os.getenv("CUDA_VISIBLE_DEVICES")) \
        if torch.cuda.is_available() else 'cpu'

    regressor_nn = NeuralNet(
        get_network_class(cmd_args.model_name),
        max_epochs=constants.NeuralNetworkTraining.epochs,
        lr=constants.NeuralNetworkTraining.learning_rate,
        batch_size=constants.NeuralNetworkTraining.batch_size,
        optimizer=torch.optim.Adam,
        criterion=torch.nn.MSELoss,

        module__output_dim=args.dimensions,
        module__embedding=weights,
        module__embedding_dim=constants.NLP.embedding_size,

        device=device,
        train_split=None,
    )

    # Train the regressor neural network
    regressor_nn.fit(X_train, y_embedded.astype(np.float32))

    # 6. Train Multi-label KNN algorithm
    # ----------------------------------

    tab_printer(constants.MLKNN)

    # Train multi-label KNN to turn label embeddings into label predictions
    classifier = MLkNN(k=constants.MLKNN.k, s=constants.MLKNN.s)
    classifier.fit(y_embedded, dataset.y_train)

    # 7. Evaluation
    # -------------

    # Label prediction with documents
    y_test_pred = regressor_nn.predict(X_test)
    preds = classifier.predict(y_test_pred)
    preds_raw = classifier.predict_proba(y_test_pred)

    # Label prediction with label embeddings
    preds_w_labels = classifier.predict(y_test_embedded)
    preds_w_labels_raw = classifier.predict_proba(y_test_embedded)

    # Log evaluation result with label embeddings
    eval_metrics_w_labels = evaluation \
        .all_metrics(preds_w_labels.toarray(),
                     dataset.y_test,
                     yhat_raw=preds_w_labels_raw.toarray())

    logging.info(str(eval_metrics_w_labels))

    # Log evaluation result with documents
    report_evaluation(preds.toarray(),
                      dataset.y_test,
                      yhat_raw=preds_raw.toarray())
def baselineNN_search(parameters):
    """Set up, run and evaluate a baseline neural network"""
    # CV with skorch
    net = NeuralNet(
        # Module
        module=BaselineNN,
        # Module settings
        module__hidden_dim=parameters["hidden_units"],
        module__p_dropout=parameters["dropout"],
        module__use_batch_norm=parameters["use_batch_norm"],
        module__weights=FTEMB,  # These are word embeddings
        module__num_classes=len(category_map),
        # Epochs & learning rate
        max_epochs=25,
        lr=parameters["learning_rate"],
        # Optimizer
        optimizer=optim.Adam
        if parameters["optimizer"] == "Adam" else optim.RMSprop,
        # Loss function
        criterion=nn.CrossEntropyLoss,
        criterion__weight=cw,
        # Shuffle training data on each epoch
        iterator_train__shuffle=True,
        # Batch size
        batch_size=128,
        train_split=CVSplit(cv=5),
        # Device
        device=device,
        # Callbacks
        callbacks=[
            skorch.callbacks.EpochScoring(f1_score,
                                          use_caching=True,
                                          name="valid_f1"),
            skorch.callbacks.EpochScoring(precision_score,
                                          use_caching=True,
                                          name="valid_precision"),
            skorch.callbacks.EpochScoring(recall_score,
                                          use_caching=True,
                                          name="valid_recall"),
            skorch.callbacks.EpochScoring(accuracy_score,
                                          use_caching=True,
                                          name="valid_accuracy")
        ])
    # Verbose to false
    net.verbose = 1
    # Fit
    net = net.fit(WD)
    # Get train / validation history
    train_loss = net.history[:, "train_loss"]
    val_loss = net.history[:, "valid_loss"]
    val_accuracy = net.history[:, "valid_accuracy"]
    val_f1 = net.history[:, "valid_f1"]
    val_precision = net.history[:, "valid_precision"]
    val_recall = net.history[:, "valid_recall"]
    # Min loss
    which_min = np.argmin(val_loss)
    # Write to file
    with open(args.out_file, 'a') as of_connection:
        writer = csv.writer(of_connection)
        writer.writerow([
            parameters, which_min,
            np.round(train_loss[which_min], 4),
            np.round(val_accuracy[which_min], 4),
            np.round(val_loss[which_min], 4),
            np.round(val_f1[which_min], 4),
            np.round(val_precision[which_min], 4),
            np.round(val_recall[which_min], 4)
        ])
    # Return cross-validation loss
    return ({
        "loss": val_loss[which_min],
        "parameters": parameters,
        "iteration": which_min,
        'status': STATUS_OK
    })
Ejemplo n.º 17
0
model = UNet3d_assembled.UNet3d(channels)
model = net2

# override with actual model choice
model = mr.BN20channels()
model = UNet2d_assembled.UNet2D(channels)
# print(type(model))

model = NeuralNet(
    module=model,
    criterion=nn.MSELoss,
    max_epochs=EPOCHS,
    batch_size=BS,
    iterator_train__shuffle=True,
    lr=LR,
    optimizer__weight_decay=LAMBDA,
    device=DEVICE,
    callbacks=[
        cp_best_model,
        cp_best_train,
        progressbar,
        # cyclicLR,
        epoch_MAE_train,
    ])

#######################################################################
# TRAIN MODEL
#######################################################################
print("############################################################")
print("\n\t\tTRAINING MODEL\n")
print("############################################################\n")
def run():
    parser = get_arg_parser(embedding_classifier=False)
    cmd_args = parser.parse_args()

    if cmd_args.gpu is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = str(cmd_args.gpu)
        gpunum = os.getenv('CUDA_VISIBLE_DEVICES')
        logging.info("GPU has been set to {}".format(gpunum))

    logging.info("Model used for the classification network: {}".format(
        cmd_args.model_name))

    # 1. Dataset retrieval
    # --------------------

    tab_printer(constants.Dataset)
    dataset = Dataset(nrows=constants.Dataset.nrows,
                      augment_labels=constants.Dataset.augment_labels,
                      top_n=constants.Dataset.top_n)

    logging.info("Going to create vocabulary and fit a preprocessing pipeline"
                 "using {} samples. Settings will be listed below".format(
                     len(dataset.X_train)))

    # 2. Preprocessing
    # -----------------

    tab_printer(constants.NLP)
    preprocessor = Preprocessing(dataset.X_train)

    # Preprocess documents
    X_train = preprocessor.transform_documents(dataset.X_train)
    X_test = preprocessor.transform_documents(dataset.X_test)

    # 3. Word embeddings with word2vec
    # --------------------------------

    # Train word2vec embeddings if train_word2vec option
    # is selected
    if cmd_args.train_word2vec: utils.embeddings.main()
    weights = get_embedding_tensor(preprocessor)

    logging.info("Word embeddings are loaded.")

    # 4. Label Network Optim
    # -----------------------

    device = 'cuda:' + str(os.getenv("CUDA_VISIBLE_DEVICES")) \
        if torch.cuda.is_available() else 'cpu'
    logging.info("Going to run on device: {}".format(device))

    args = _generate_deepwalk_parameters(dataset.y_train_graph)
    label_embeddings = np.array(
        pd.read_csv(args.embedding_path).iloc[:, 1:].values)
    label_embeddings_weights = torch.FloatTensor(label_embeddings)

    label_network = NeuralNet(
        CAML,
        max_epochs=50,
        lr=constants.NeuralNetworkTraining.learning_rate,
        batch_size=constants.NeuralNetworkTraining.batch_size,
        optimizer=torch.optim.Adam,
        criterion=torch.nn.BCEWithLogitsLoss,
        module__output_dim=dataset.y_train.shape[1],
        module__embedding=label_embeddings_weights,
        module__embedding_dim=args.dimensions,
        module__kernel_size=1,
        device=device,
        train_split=skorch.dataset.CVSplit(stratified=False),
    )

    label_network.fit(dataset.y_train, dataset.y_train.astype(np.float32))

    # 5. Evaluation
    # -------------

    yhat_test_raw_logits = label_network.predict_proba(dataset.y_test)
    yhat_test_raw = torch.sigmoid(torch.Tensor(yhat_test_raw_logits)).numpy()
    yhat_test = np.array(yhat_test_raw >=
                         constants.NeuralNetworkTraining.threshold) \
        .astype(np.int64)

    report_evaluation(yhat_test, dataset.y_test, yhat_raw=yhat_test_raw)
Ejemplo n.º 19
0
from skorch import NeuralNet
from skorch.callbacks import EarlyStopping
from torch.nn import MSELoss
from torch.optim import SGD
import numpy as np

import sys
sys.path.append('..')
from helpers import load_data_in_chunks, save_model
from model import Net
from CustomLoss import CustomLoss

(Xs, Ys) = load_data_in_chunks('train', chunk_size=5)
Xs = Xs.astype(np.float32)
Ys = Ys.astype(np.float32)

regr = NeuralNet(Net,
                 max_epochs=10000000000,
                 batch_size=100,
                 iterator_train__shuffle=True,
                 criterion=MSELoss,
                 optimizer=SGD,
                 optimizer__lr=1e-5,
                 optimizer__momentum=0.95,
                 verbose=5,
                 callbacks=[('early_stop', EarlyStopping())])
regr.fit(Xs, Ys / 5000)

save_model(regr, 'lstm-mse')
Ejemplo n.º 20
0
        config = {
            f"module__{name}": value
            for name, value in model_config.items()
        }

    with open(options.configdir / "train.json") as f:
        config.update(json.load(f))

    net = NeuralNet(module=Model,
                    module__input_dim=dataset.input_dim,
                    module__output_dim=dataset.output_dim,
                    criterion=nn.BCEWithLogitsLoss,
                    iterator_train__collate_fn=dataset.data_collator,
                    iterator_valid__collate_fn=dataset.data_collator,
                    callbacks=[
                        EpochScoring(scoring=metrics.make_scorer(error_rate),
                                     lower_is_better=True),
                        EpochScoring(scoring=metrics.make_scorer(accuracy),
                                     lower_is_better=False),
                        EarlyStopping(monitor="valid_loss", patience=5),
                        LRScheduler(policy="ReduceLROnPlateau", patience=3)
                    ],
                    device=options.device,
                    **config)

    net.fit(dataset)
    logits = net.forward(dataset)
    probits = torch.sigmoid(logits)
    preds = (probits > .5).long().numpy()
    labels = np.stack(dataset.labels)
    correct = (preds == labels)
    positive = (labels == 1)