def evaluate_sa_cnn2d(x_train: List, x_test: List, y_test: np.array) -> Dict:
    sc = CustomMinMaxScaler()
    x_test = sc.fit(x_train).transform(x_test)

    training_stats = load_experiment('../../models/sa_cnn2d/experiments.json')
    score = evaluate(x_test, y_test, training_stats['experiments'])
    return score
def evaluate_hybrid_model_ae(x_train: np.ndarray, x_test: np.ndarray,
                             y_test: np.array) -> Dict:
    sc = StandardScaler()
    x_test = sc.fit(x_train).transform(x_test)

    training_stats = load_experiment(
        '../../models/hybrid_ae_small/experiments.json')
    score = evaluate(x_test, y_test, training_stats['experiments'])
    return score
def evaluate_hybrid_model_if(x_train: np.ndarray, x_test: np.ndarray,
                             y_test: np.array) -> Dict:
    sc = StandardScaler()
    x_test = sc.fit(x_train).transform(x_test)

    training_stats = load_experiment(
        '../../models/hybrid_if_small/experiments.json')
    evaluated_hyperparams = evaluate_unsupervised(
        x_test, y_test, training_stats['experiments'])
    return evaluated_hyperparams
Ejemplo n.º 4
0
def train_aetcnn(x_train: List, x_test: List, y_train: np.array, y_test: np.array) -> Dict:
    sc = CustomMinMaxScaler()
    x_train = sc.fit_transform(x_train)
    x_test = sc.transform(x_test)

    model = AETCN()

    experiments = load_experiment('../../models/AETCN-hyperparameters-embeddings-clipping-HDFS1.json')
    evaluated_hyperparams = random_search((x_train[y_train == 0], x_test, None, y_test), model, experiments)
    return evaluated_hyperparams
Ejemplo n.º 5
0
def train_cnn1d(x_train: List, x_test: List, y_train: np.array, y_test: np.array) -> Dict:
    sc = CustomMinMaxScaler()
    x_train = sc.fit_transform(x_train)
    x_test = sc.transform(x_test)

    model = CNN1D()

    experiments = load_experiment('../../models/CNN1D-inverse-bottleneck-hyperparameters-embeddings-HDFS1.json')
    evaluated_hyperparams = random_search((x_train[y_train == 0], x_test, None, y_test), model, experiments)
    return evaluated_hyperparams
Ejemplo n.º 6
0
def train_hybrid_model_if(x_train: List, x_test: List, y_train: np.array, y_test: np.array) -> Dict:
    sc = StandardScaler()
    x_train = sc.fit_transform(x_train)
    x_test = sc.transform(x_test)

    model = IsolationForest(bootstrap=True, n_jobs=1, random_state=SEED)

    experiments = load_experiment('../../models/IF-AETCN-hybrid-hyperparameters-HDFS1.json')
    evaluated_hyperparams = random_search_unsupervised((x_train, x_test, None, y_test), model, experiments)
    return evaluated_hyperparams
Ejemplo n.º 7
0
def train_hybrid_model_ae(x_train: List, x_test: List, y_train: np.array, y_test: np.array) -> Dict:
    sc = StandardScaler()
    x_train = sc.fit_transform(x_train)
    x_test = sc.transform(x_test)

    model = AutoEncoder()

    experiments = load_experiment('../../models/AE-AETCN-hybrid-hyperparameters-HDFS1.json')
    evaluated_hyperparams = random_search((x_train[y_train == 0], x_test, None, y_test), model, experiments)
    return evaluated_hyperparams
def evaluate_lof(x_train: Dict, x_test: Dict, y_test: np.array) -> Dict:
    fe = FeatureExtractor(method='tf-idf', preprocessing='mean')
    y_test = get_labels_from_csv(y_test, x_test.keys())
    fe.fit_transform(x_train)
    x_test = fe.transform(x_test)

    training_stats = load_experiment(
        '../../models/lof_baseline/experiments.json')
    score = evaluate_unsupervised(x_test, y_test,
                                  training_stats['experiments'])
    return score
Ejemplo n.º 9
0
def train_iso_forest(x_train: Dict, x_test: Dict,  y_train: pd.DataFrame, y_test: pd.DataFrame) -> Dict:
    fe = FeatureExtractor(method='tf-idf', preprocessing='mean')
    y_test = get_labels_from_csv(y_test, x_test.keys())
    x_train = fe.fit_transform(x_train)
    x_test = fe.transform(x_test)

    clf = IsolationForest(bootstrap=True, n_jobs=os.cpu_count(), random_state=SEED)

    experiments = load_experiment('../../models/IF-hyperparameters-Drain3-HDFS1.json')
    evaluated_hyperparams = random_search_unsupervised((x_train, x_test, None, y_test), clf, experiments)
    return evaluated_hyperparams
Ejemplo n.º 10
0
def train_lof(x_train: Dict, x_test: Dict,  y_train: pd.DataFrame, y_test: pd.DataFrame) -> Dict:
    fe = FeatureExtractor(method='tf-idf', preprocessing='mean')
    y_test = get_labels_from_csv(y_test, x_test.keys())
    fe.fit_transform(x_train)
    x_test = fe.transform(x_test)

    clf = LocalOutlierFactor(n_jobs=os.cpu_count())

    experiments = load_experiment('../../models/LOF-hyperparameters-Drain3-HDFS1.json')
    evaluated_hyperparams = random_search_unsupervised((None, x_test, None, y_test), clf, experiments)
    return evaluated_hyperparams
Ejemplo n.º 11
0
def train_autoencoder(x_train: Dict, x_test: Dict, y_train: pd.DataFrame, y_test: pd.DataFrame) -> Dict:
    fe = FeatureExtractor(method='tf-idf', preprocessing='mean')
    y_train = get_labels_from_csv(y_train, x_train.keys())
    y_test = get_labels_from_csv(y_test, x_test.keys())
    x_train = fe.fit_transform(x_train)
    x_test = fe.transform(x_test)

    model = AutoEncoder()

    experiments = load_experiment('../../models/AE-hyperparameters-Drain3-HDFS1.json')
    evaluated_hyperparams = random_search((x_train[y_train == 0], x_test, None, y_test), model, experiments)
    return evaluated_hyperparams