Beispiel #1
0
def train_nn(
    dataset: str, batch_size: int, depth: int, epochs: int
) -> Tuple[CNN, Tuple[Union[np.ndarray, np.ndarray], Union[
        np.ndarray, np.ndarray]], Tuple[Union[np.ndarray, np.ndarray], Union[
            np.ndarray, np.ndarray]]]:
    experiment = Experiment(project_name="cphap", auto_output_logging=False)
    experiment.add_tag(dataset)
    experiment.add_tag("NN-depth-{}".format(depth))
    (x_train, y_train), (x_test, y_test) = fetch_dataset(dataset)
    scaler = TimeSeriesScalerMeanVariance()
    x_train: np.ndarray = scaler.fit_transform(x_train)
    x_test: np.ndarray = scaler.transform(x_test)

    x_train = x_train.transpose((0, 2, 1)).astype(np.float32)
    x_test = x_test.transpose((0, 2, 1)).astype(np.float32)

    n_features = x_train.shape[1]
    n_targets = len(np.unique(y_train))

    train_ds = get_dataset(x_train, y_train)
    test_ds = get_dataset(x_test, y_test)

    train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_ds, batch_size=batch_size, shuffle=False)

    model = CNN(n_features, 32, n_targets, depth=depth)
    optimizer = optim.Adam(model.parameters())
    criterion = nn.CrossEntropyLoss()

    runner = ClassificationRunner(model, optimizer, criterion, experiment)
    runner.add_loader("train", train_loader)
    runner.add_loader("test", test_loader)
    runner.train_config(epochs=epochs)
    runner.run()
    runner.quite()

    return runner.model.eval(), (x_train, x_test), (y_train, y_test)
def tsleanr_scaler(data):
    result_list = data.values.tolist()
    TSS = TimeSeriesScalerMeanVariance().fit(result_list)
    scaled = TSS.transform(result_list)
    return scaled