Esempio n. 1
0
    def init_parameters(self,
                        random_state=123,
                        input_stage=1,
                        target_stage=1,
                        train_size=0.5,
                        simulate_batch_size=200):
        self.input_stage = input_stage
        self.target_stage = target_stage
        self.train_size = train_size
        self.random_state = random_state
        bae_set_seed(random_state)
        liveline_datastream = Liveline_DataStream(output_stage=target_stage,
                                                  input_stage=input_stage,
                                                  train_size=train_size)
        self.datastream = liveline_datastream
        self.x_train = liveline_datastream._quantities["train"]
        self.x_test = liveline_datastream._quantities["test"]
        self.x_ood = liveline_datastream._quantities["ood"]

        self.y_train = liveline_datastream._target["train"]
        self.y_test = liveline_datastream._target["test"]
        self.y_ood = liveline_datastream._target["ood"]

        # for simulation
        self.datastream_simulate = DataStreamMET4FOF()
        self.datastream_simulate.set_data_source(
            quantities=np.concatenate((self.x_test, self.x_ood)))
        self.simulate_batch_size = simulate_batch_size
Esempio n. 2
0
from baetorch.baetorch.plotting import *
from baetorch.baetorch.util.seed import bae_set_seed
from baetorch.baetorch.util.misc import save_csv_pd
from _train_models_images import train_model_images
import pandas as pd

#set seed for reproduciliblity
bae_set_seed(2020)
use_cuda = torch.cuda.is_available()

print("USE CUDA:" + str(use_cuda))

#----STATIC GLOBAL PARAMETERS---
#available choices -- static
decoder_sigma_choices = ["infer", "dense"]
latent_dim_multiplier_choices = [0.2, 0.5, 1, 2, 5]
capacity_multiplier_choices = [0.2, 0.5, 1, 2, 5]

#set likelihood and model hyperparameters
latent_dim_multiplier = latent_dim_multiplier_choices[
    2]  #for easy scaling of latent dims & capacity
capacity_multiplier = capacity_multiplier_choices[2]
decoder_sigma_choice = decoder_sigma_choices[
    1]  #aleatoric uncertainty, unused in this experiment runs
num_epoch_sigma = 0  #aleatoric uncertainty, unused

#Change here:
model_types_choices = ["vanilla", "ensemble", "mcdropout", "vi", "vae"]
likelihood_choices = ["bernoulli", "cbernoulli", "gaussian_none"]
train_set_choices = ["FashionMNIST", "MNIST", "SVHN", "CIFAR"]
num_epoch_mu = 1  #number of training epochs
Esempio n. 3
0
import torch
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt

from baetorch.baetorch.util.convert_dataloader import convert_dataloader
from baetorch.baetorch.lr_range_finder import run_auto_lr_range_v2
from baetorch.baetorch.models.bae_hydra import BAE_Hydra, Hydra_Autoencoder
from baetorch.baetorch.models.base_autoencoder import Encoder, DenseLayers, infer_decoder
from baetorch.baetorch.models.cholesky_layer import CholLayer
from baetorch.baetorch.plotting import plot_contour, get_grid2d_latent
from baetorch.baetorch.util.seed import bae_set_seed

bae_set_seed(321)

#=========load data
from sklearn.datasets import make_blobs

# iris_data = load_iris()
# x_train = iris_data['data']
# y_train = iris_data['target']
# x_train, y_train = make_moons(n_samples=500)
x_train, y_train = make_blobs(n_samples=100, centers=5, n_features=2)

x_train = MinMaxScaler().fit_transform(x_train)

x_train_torch = torch.tensor(x_train).float()
y_train_torch = torch.tensor(y_train).int()

#=======AutoEncoder
latent_dim = 100
Esempio n. 4
0
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler

from agentMET4FOF_ml_extension.agentMET4FOF.agentMET4FOF.streams import DataStreamMET4FOF
from baetorch.baetorch.util.seed import bae_set_seed
from util.calc_outlier import get_num_outliers_df

bae_set_seed(1231)


class Liveline_DataStream(DataStreamMET4FOF):
    def __init__(self,
                 dataset_folder="multi-stage-dataset/",
                 output_stage=1,
                 input_stage=1,
                 upper_quartile=80,
                 train_size=0.5,
                 apply_scaling=True):

        lower_quartile = 100 - upper_quartile
        df = pd.read_csv(dataset_folder + "continuous_factory_process.csv")

        #drop columns
        df = df.drop(["time_stamp"], axis=1)
        column_names = df.columns

        # filter Y columns
        if output_stage == 1:
            Y_df_actual = df[[