def __init__(self,
                 data_name,
                 hidden_dim=256,
                 seed=0,
                 learning_rate=3e-4,
                 batch_size=128,
                 training_ratio=0.8,
                 validation_ratio=0.1,
                 max_epochs=100):
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)
        use_cuda = torch.cuda.is_available()

        # if data_name == 'shuttle'
        self.device = torch.device("cuda" if use_cuda else "cpu")
        self.result_path = "./results/{}/0.0/RobustRealNVP/{}/".format(
            data_name, seed)
        data_path = "./data/" + data_name + ".npy"
        self.model_save_path = "./trained_model/{}/RobustRealNVP/{}/".format(
            data_name, seed)

        os.makedirs(self.model_save_path, exist_ok=True)

        self.learning_rate = learning_rate

        # self.dataset = RealGraphDataset(data_path, missing_ratio=0, radius=2)
        self.dataset = RealDataset(data_path, missing_ratio=0)
        self.seed = seed
        self.hidden_dim = hidden_dim
        self.max_epochs = max_epochs

        self.data_path = data_path
        self.data_anomaly_ratio = self.dataset.__anomalyratio__()
        self.batch_size = batch_size
        self.input_dim = self.dataset.__dim__()
        self.data_normaly_ratio = 1 - self.data_anomaly_ratio
        n_sample = self.dataset.__len__()
        self.n_train = int(n_sample * training_ratio)
        self.n_test = n_sample - self.n_train
        print('|data dimension: {}|data noise ratio:{}'.format(
            self.dataset.__dim__(), self.data_anomaly_ratio))

        training_data, testing_data = data.random_split(
            dataset=self.dataset, lengths=[self.n_train, self.n_test])

        self.training_loader = data.DataLoader(training_data,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               drop_last=True)
        self.testing_loader = data.DataLoader(testing_data,
                                              batch_size=self.n_test,
                                              shuffle=False)
        self.ae = None
        self.discriminator = None
        self.build_model()
        self.print_network()
Exemplo n.º 2
0
    def __init__(
        self,
        data_name,
        seed=0,
        learning_rate=1e-3,
        training_ratio=0.8,
        validation_ratio=0.1,
        missing_ratio=0.5,
    ):
        # Data loader
        # read data here
        np.random.seed(seed)
        data_path = "./data/" + data_name + ".npy"
        self.result_path = "./results/{}/{}/LOF/{}/".format(data_name, missing_ratio, seed)

        self.learning_rate = learning_rate
        self.dataset = RealDataset(data_path, missing_ratio=missing_ratio)
        self.seed = seed

        self.data_path = data_path
        self.data_anomaly_ratio = self.dataset.__anomalyratio__()
        self.input_dim = self.dataset.__dim__()
        self.data_normaly_ratio = 1 - self.data_anomaly_ratio
        n_sample = self.dataset.__len__()
        self.n_train = int(n_sample * training_ratio)
        self.n_validation = int(n_sample * validation_ratio)
        self.n_test = n_sample - self.n_train - self.n_validation
        self.best_model = None
        if missing_ratio == 0.0:
            self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
                self.dataset.x,
                self.dataset.y,
                test_size=1 - config.training_ratio - config.validation_ratio,
                random_state=seed,
            )
        if missing_ratio > 0.0:
            # TODO: impute
            x = self.dataset.x
            m = self.dataset.m
            x_with_missing = x
            x_with_missing[m == 0] = np.nan
            imputer = KNNImputer(n_neighbors=2)
            x = imputer.fit_transform(x_with_missing)
            self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
                x,
                self.dataset.y,
                test_size=1 - config.training_ratio - config.validation_ratio,
                random_state=seed,
            )

        print(
            "|data dimension: {}|data noise ratio:{}".format(
                self.dataset.__dim__(), self.data_anomaly_ratio
            )
        )
Exemplo n.º 3
0
    def __init__(
        self,
        data_name,
        seed=0,
        learning_rate=1e-3,
        training_ratio=0.8,
        validation_ratio=0.1,
        missing_ratio=0.5,
        max_epochs=100,
        z_dim=10,
        batch_size=64,
    ):
        # Data loader
        # read data here
        self.max_epochs = max_epochs
        self.z_dim = z_dim
        self.batch_size = batch_size
        np.random.seed(seed)
        data_path = "./data/" + data_name + ".npy"
        self.result_path = "./results/{}/{}/SO_GAAL/{}/".format(
            data_name, missing_ratio, seed
        )

        self.learning_rate = learning_rate
        self.dataset = RealDataset(data_path, missing_ratio=missing_ratio)
        self.seed = seed

        self.data_path = data_path
        self.data_anomaly_ratio = self.dataset.__anomalyratio__()
        self.input_dim = self.dataset.__dim__()
        self.data_normaly_ratio = 1 - self.data_anomaly_ratio
        n_sample = self.dataset.__len__()
        self.n_train = int(n_sample * training_ratio)
        self.n_validation = int(n_sample * validation_ratio)
        self.n_test = n_sample - self.n_train - self.n_validation
        self.best_model = None

        self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
            self.dataset.x,
            self.dataset.y,
            test_size=1 - config.training_ratio - config.validation_ratio,
            random_state=seed,
        )

        print(
            "|data dimension: {}|data noise ratio:{}".format(
                self.dataset.__dim__(), self.data_anomaly_ratio
            )
        )
Exemplo n.º 4
0
class Solver_OCSVM:
    def __init__(
        self,
        data_name,
        missing_ratio=0.0,
        seed=0,
        learning_rate=1e-3,
        training_ratio=0.8,
    ):
        # Data loader
        # read data here
        np.random.seed(seed)
        data_path = "./data/" + data_name + ".npy"
        self.result_path = "./results/{}/{}/OCSVM/{}/".format(
            data_name, missing_ratio, seed)
        self.missing_ratio = missing_ratio
        self.learning_rate = learning_rate
        self.dataset = RealDataset(data_path, missing_ratio=missing_ratio)
        self.seed = seed

        self.data_path = data_path
        self.data_anomaly_ratio = self.dataset.__anomalyratio__()
        self.input_dim = self.dataset.__dim__()
        self.data_normaly_ratio = 1 - self.data_anomaly_ratio
        n_sample = self.dataset.__len__()
        self.n_train = int(n_sample * training_ratio)
        self.n_test = n_sample - self.n_train
        if missing_ratio == 0.0:
            self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
                self.dataset.x,
                self.dataset.y,
                test_size=1 - config.training_ratio,
                random_state=seed,
            )
        if missing_ratio > 0.0:
            x = self.dataset.x
            m = self.dataset.m
            x_with_missing = x
            x_with_missing[m == 0] = np.nan
            # imputer = KNNImputer(n_neighbors=2)
            imputer = SimpleImputer()
            x = imputer.fit_transform(x_with_missing)
            self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
                x,
                self.dataset.y,
                test_size=1 - config.training_ratio - config.validation_ratio,
                random_state=seed,
            )

        print("|data dimension: {}|data noise ratio:{}".format(
            self.dataset.__dim__(), self.data_anomaly_ratio))

    def train(self):
        model = OneClassSVM()
        model.fit(self.X_train)
        self.best_model = model

    def test(self):
        print("======================TEST MODE======================")
        # pred = self.best_model.predict(self.X_test)
        score = self.best_model.score_samples(self.X_test)
        thresh = np.percentile(score, self.data_anomaly_ratio * 100)
        print("Threshold :", thresh)

        pred = (score < thresh).astype(int)
        # pred = pred < 0
        gt = self.y_test.astype(int)

        from sklearn.metrics import (precision_recall_fscore_support as prf,
                                     accuracy_score, roc_auc_score)
        auc = roc_auc_score(gt,
                            -self.best_model.decision_function(self.X_test))

        accuracy = accuracy_score(gt, pred)
        precision, recall, f_score, support = prf(gt, pred, average="binary")

        print(
            "Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f}, AUC: {:0.4f}"
            .format(accuracy, precision, recall, f_score, auc))

        os.makedirs(self.result_path, exist_ok=True)

        np.save(
            self.result_path + "result.npy",
            {
                "auc": auc,
                "accuracy": accuracy,
                "precision": precision,
                "recall": recall,
                "f1": f_score,
            },
        )
        return accuracy, precision, recall, f_score
Exemplo n.º 5
0
class Solver_LOF:
    def __init__(
        self,
        data_name,
        seed=0,
        learning_rate=1e-3,
        training_ratio=0.8,
        validation_ratio=0.1,
        missing_ratio=0.5,
    ):
        # Data loader
        # read data here
        np.random.seed(seed)
        data_path = "./data/" + data_name + ".npy"
        self.result_path = "./results/{}/{}/LOF/{}/".format(data_name, missing_ratio, seed)

        self.learning_rate = learning_rate
        self.dataset = RealDataset(data_path, missing_ratio=missing_ratio)
        self.seed = seed

        self.data_path = data_path
        self.data_anomaly_ratio = self.dataset.__anomalyratio__()
        self.input_dim = self.dataset.__dim__()
        self.data_normaly_ratio = 1 - self.data_anomaly_ratio
        n_sample = self.dataset.__len__()
        self.n_train = int(n_sample * training_ratio)
        self.n_validation = int(n_sample * validation_ratio)
        self.n_test = n_sample - self.n_train - self.n_validation
        self.best_model = None
        if missing_ratio == 0.0:
            self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
                self.dataset.x,
                self.dataset.y,
                test_size=1 - config.training_ratio - config.validation_ratio,
                random_state=seed,
            )
        if missing_ratio > 0.0:
            # TODO: impute
            x = self.dataset.x
            m = self.dataset.m
            x_with_missing = x
            x_with_missing[m == 0] = np.nan
            imputer = KNNImputer(n_neighbors=2)
            x = imputer.fit_transform(x_with_missing)
            self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
                x,
                self.dataset.y,
                test_size=1 - config.training_ratio - config.validation_ratio,
                random_state=seed,
            )

        print(
            "|data dimension: {}|data noise ratio:{}".format(
                self.dataset.__dim__(), self.data_anomaly_ratio
            )
        )

    def train(self):
        model = LocalOutlierFactor(n_neighbors=5,
            contamination=self.data_anomaly_ratio
        )  # 20-fold cross-validation
        model.fit(self.X_train)

        self.best_model = model

    def test(self):
        print("======================TEST MODE======================")
        pred = self.best_model.fit_predict(self.X_test)

        gt = self.y_test.astype(int)

        from sklearn.metrics import (
            precision_recall_fscore_support as prf,
            accuracy_score,
            roc_auc_score,
        )

        auc = roc_auc_score(gt, -pred)
        pred = pred < 0
        accuracy = accuracy_score(gt, pred)
        precision, recall, f_score, support = prf(gt, pred, average="binary")

        print(
            "Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f}".format(
                accuracy, precision, recall, f_score
            )
        )

        os.makedirs(self.result_path, exist_ok=True)

        np.save(
            self.result_path + "result.npy",
            {
                "accuracy": accuracy,
                "precision": precision,
                "recall": recall,
                "f1": f_score,
            },
        )
        return accuracy, precision, recall, f_score
Exemplo n.º 6
0
class Solver_SO_GAAL:
    def __init__(
        self,
        data_name,
        seed=0,
        learning_rate=1e-3,
        training_ratio=0.8,
        validation_ratio=0.1,
        missing_ratio=0.5,
        max_epochs=100,
        z_dim=10,
        batch_size=64,
    ):
        # Data loader
        # read data here
        self.max_epochs = max_epochs
        self.z_dim = z_dim
        self.batch_size = batch_size
        np.random.seed(seed)
        data_path = "./data/" + data_name + ".npy"
        self.result_path = "./results/{}/{}/SO_GAAL/{}/".format(
            data_name, missing_ratio, seed
        )

        self.learning_rate = learning_rate
        self.dataset = RealDataset(data_path, missing_ratio=missing_ratio)
        self.seed = seed

        self.data_path = data_path
        self.data_anomaly_ratio = self.dataset.__anomalyratio__()
        self.input_dim = self.dataset.__dim__()
        self.data_normaly_ratio = 1 - self.data_anomaly_ratio
        n_sample = self.dataset.__len__()
        self.n_train = int(n_sample * training_ratio)
        self.n_validation = int(n_sample * validation_ratio)
        self.n_test = n_sample - self.n_train - self.n_validation
        self.best_model = None

        self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
            self.dataset.x,
            self.dataset.y,
            test_size=1 - config.training_ratio - config.validation_ratio,
            random_state=seed,
        )

        print(
            "|data dimension: {}|data noise ratio:{}".format(
                self.dataset.__dim__(), self.data_anomaly_ratio
            )
        )

    def train(self):
        model = SO_GAAL(
            stop_epochs=self.max_epochs, contamination=self.data_anomaly_ratio
        )
        model.fit(self.X_train)

        self.best_model = model

    def test(self):
        print("======================TEST MODE======================")
        pred = self.best_model.predict(self.X_test)

        gt = self.y_test.astype(int)

        from sklearn.metrics import (
            precision_recall_fscore_support as prf,
            accuracy_score,
            roc_auc_score,
        )

        auc = roc_auc_score(gt, -self.best_model.decision_function(self.X_test))
        pred = pred < 0
        accuracy = accuracy_score(gt, pred)
        precision, recall, f_score, support = prf(gt, pred, average="binary")

        print(
            "Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f}, AUC-score : {:0.4f}".format(
                accuracy, precision, recall, f_score, auc
            )
        )

        os.makedirs(self.result_path, exist_ok=True)

        np.save(
            self.result_path + "result.npy",
            {
                "accuracy": accuracy,
                "precision": precision,
                "recall": recall,
                "f1": f_score,
                "auc": auc,
            },
        )
        return accuracy, precision, recall, f_score, auc
Exemplo n.º 7
0
    def __init__(self,
                 data_name,
                 start_ratio=0.0,
                 decay_ratio=0.01,
                 hidden_dim=128,
                 z_dim=10,
                 seed=0,
                 learning_rate=1e-3,
                 batch_size=128,
                 training_ratio=0.8,
                 validation_ratio=0.1,
                 max_epochs=100,
                 coteaching=0.0,
                 knn_impute=False,
                 missing_ratio=0.0):
        # Data loader
        # read data here
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)

        use_cuda = torch.cuda.is_available()
        self.data_name = data_name
        self.device = torch.device("cuda" if use_cuda else "cpu")
        data_path = "./data/" + data_name + ".npy"
        self.model_save_path = "./trained_model/{}/{}/SVDD/{}/".format(
            data_name, missing_ratio, seed)
        self.result_path = "./results/{}/{}/SVDD/{}/".format(
            data_name, missing_ratio, seed)
        os.makedirs(self.model_save_path, exist_ok=True)
        self.learning_rate = learning_rate
        self.missing_ratio = missing_ratio
        self.dataset = RealDataset(data_path,
                                   missing_ratio=self.missing_ratio,
                                   knn_impute=knn_impute)
        self.seed = seed
        self.start_ratio = start_ratio
        self.decay_ratio = decay_ratio
        self.hidden_dim = hidden_dim
        self.z_dim = z_dim
        self.max_epochs = max_epochs
        self.coteaching = coteaching

        self.data_path = data_path
        self.data_anomaly_ratio = self.dataset.__anomalyratio__()
        self.input_dim = self.dataset.__dim__()
        self.data_normaly_ratio = 1 - self.data_anomaly_ratio
        n_sample = self.dataset.__len__()
        self.n_train = int(n_sample * (training_ratio + validation_ratio))
        # self.n_validation = int(n_sample * validation_ratio)
        self.n_test = n_sample - self.n_train
        print("|data dimension: {}|data noise ratio:{}".format(
            self.dataset.__dim__(), self.data_anomaly_ratio))

        self.decay_ratio = abs(self.start_ratio -
                               (1 - self.data_anomaly_ratio)) / (
                                   self.max_epochs / 2)
        training_data, testing_data = data.random_split(
            dataset=self.dataset, lengths=[self.n_train, self.n_test])

        self.training_loader = data.DataLoader(training_data,
                                               batch_size=batch_size,
                                               shuffle=True)

        self.testing_loader = data.DataLoader(testing_data,
                                              batch_size=self.n_test,
                                              shuffle=False)
        self.ae = None
        self.discriminator = None
        self.build_model()
        self.print_network()
Exemplo n.º 8
0
class Solver_AE:
    def __init__(self,
                 data_name,
                 start_ratio=0.0,
                 decay_ratio=0.01,
                 hidden_dim=128,
                 z_dim=10,
                 seed=0,
                 learning_rate=1e-3,
                 batch_size=128,
                 training_ratio=0.8,
                 validation_ratio=0.1,
                 max_epochs=100,
                 coteaching=0.0,
                 knn_impute=False,
                 missing_ratio=0.0):
        # Data loader
        # read data here
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)

        use_cuda = torch.cuda.is_available()
        self.data_name = data_name
        self.device = torch.device("cuda" if use_cuda else "cpu")
        data_path = "./data/" + data_name + ".npy"
        self.model_save_path = "./trained_model/{}/{}/SVDD/{}/".format(
            data_name, missing_ratio, seed)
        self.result_path = "./results/{}/{}/SVDD/{}/".format(
            data_name, missing_ratio, seed)
        os.makedirs(self.model_save_path, exist_ok=True)
        self.learning_rate = learning_rate
        self.missing_ratio = missing_ratio
        self.dataset = RealDataset(data_path,
                                   missing_ratio=self.missing_ratio,
                                   knn_impute=knn_impute)
        self.seed = seed
        self.start_ratio = start_ratio
        self.decay_ratio = decay_ratio
        self.hidden_dim = hidden_dim
        self.z_dim = z_dim
        self.max_epochs = max_epochs
        self.coteaching = coteaching

        self.data_path = data_path
        self.data_anomaly_ratio = self.dataset.__anomalyratio__()
        self.input_dim = self.dataset.__dim__()
        self.data_normaly_ratio = 1 - self.data_anomaly_ratio
        n_sample = self.dataset.__len__()
        self.n_train = int(n_sample * (training_ratio + validation_ratio))
        # self.n_validation = int(n_sample * validation_ratio)
        self.n_test = n_sample - self.n_train
        print("|data dimension: {}|data noise ratio:{}".format(
            self.dataset.__dim__(), self.data_anomaly_ratio))

        self.decay_ratio = abs(self.start_ratio -
                               (1 - self.data_anomaly_ratio)) / (
                                   self.max_epochs / 2)
        training_data, testing_data = data.random_split(
            dataset=self.dataset, lengths=[self.n_train, self.n_test])

        self.training_loader = data.DataLoader(training_data,
                                               batch_size=batch_size,
                                               shuffle=True)

        self.testing_loader = data.DataLoader(testing_data,
                                              batch_size=self.n_test,
                                              shuffle=False)
        self.ae = None
        self.discriminator = None
        self.build_model()
        self.print_network()

    def build_model(self):
        self.ae = SVDD(input_dim=self.input_dim,
                       hidden_dim=self.hidden_dim,
                       z_dim=self.z_dim)
        self.ae = self.ae.to(self.device)

    def print_network(self):
        num_params = 0
        for p in self.ae.parameters():
            num_params += p.numel()
        print("The number of parameters: {}".format(num_params))

    def train(self):
        optimizer = torch.optim.Adam(self.ae.parameters(),
                                     lr=self.learning_rate)
        # scheduler = StepLR(optimizer, step_size=30, gamma=0.1)
        """
        pretrain autoencoder
        """
        mse_loss = torch.nn.MSELoss()

        if self.data_name == 'optdigits':
            mse_loss = torch.nn.BCELoss()
        min_val_error = 1e10
        for epoch in tqdm(range(50)):  # train 3 time classifier
            for i, (x, y, m) in enumerate(self.training_loader):
                x = x.to(self.device).float()
                m = m.to(self.device).float()

                # x_missing = x * m + (1-m) * -10
                n = x.shape[0]
                optimizer.zero_grad()
                self.ae.train()
                z1, xhat1, _ = self.ae(x.float(), m)

                loss = mse_loss(xhat1, x)
                loss.backward()
                optimizer.step()
            # scheduler.step()
        # svm
        #init c
        svm_loss = SVMLoss()
        z = []
        with torch.no_grad():
            self.ae.eval()
            for i, (x, y, m) in enumerate(self.training_loader):
                x = x.to(self.device).float()
                m = m.to(self.device).float()

                z1, _, _ = self.ae(x.float(), m.float())
                z.append(z1)
                # x_intersect = x[index_intersect, :]
            z = torch.cat(z).mean(dim=0)
            center = self.ae.init_c(z)

        self.ae.train()
        for epoch in range(self.max_epochs):
            for i, (x, y, m) in enumerate(self.training_loader):
                x = x.to(self.device).float()
                m = m.to(self.device).float()

                # x_missing = x * m + (1-m) * -10
                n = x.shape[0]
                optimizer.zero_grad()

                z1, _, _ = self.ae(x.float(), m)

                loss = svm_loss(z1, center)
                loss.backward()
                optimizer.step()

            valerror = 0
            for i, (x, y, m) in enumerate(self.testing_loader):
                x = x.to(self.device).float()
                m = m.to(self.device).float()

                # x_missing = x * m + (1-m) * -10
                n = x.shape[0]
                optimizer.zero_grad()
                self.ae.train()
                z1, _, _ = self.ae(x.float(), m)
                loss = svm_loss(z1, center)
            valerror = valerror + loss.item()

            if valerror < min_val_error:
                min_val_error = valerror
                torch.save(
                    self.ae.state_dict(),
                    os.path.join(self.model_save_path, "parameter.pth"),
                )

    def test(self):
        print("======================TEST MODE======================")
        self.ae.load_state_dict(
            torch.load(self.model_save_path + "parameter.pth"))
        self.ae.eval()
        loss = SVMLoss()

        for _, (x, y, m) in enumerate(self.testing_loader):
            y = y.data.cpu().numpy()
            x = x.to(self.device).float()
            m = m.to(self.device).float()

            z1, _, _ = self.ae(x.float(), m)
            error = ((z1 - self.ae.c1)**2)
            error = error.sum(dim=1)
        error = error.data.cpu().numpy()
        thresh = np.percentile(error, self.data_normaly_ratio * 100)
        print("Threshold :", thresh)

        pred = (error > thresh).astype(int)
        gt = y.astype(int)

        from sklearn.metrics import (precision_recall_fscore_support as prf,
                                     accuracy_score, roc_auc_score)
        gt = gt.squeeze()
        auc = roc_auc_score(gt, error)
        accuracy = accuracy_score(gt, pred)
        precision, recall, f_score, support = prf(gt, pred, average="binary")

        print(
            "Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f}"
            .format(accuracy, precision, recall, f_score))

        os.makedirs(self.result_path, exist_ok=True)

        np.save(
            self.result_path + "result.npy",
            {
                "auc": auc,
                "accuracy": accuracy,
                "precision": precision,
                "recall": recall,
                "f1": f_score,
            },
        )
        return accuracy, precision, recall, f_score, auc
Exemplo n.º 9
0
    def __init__(
        self,
        data_name,
        hidden_dim=128,  # number of hidden neurons in RCA
        z_dim=10,  # bottleneck dimension
        seed=0,  # random seed
        learning_rate=1e-3,  # learning rate
        batch_size=128,  #  batchsize
        training_ratio=0.8,  #  training data percentage
        max_epochs=100,  #  training epochs
        coteaching=1.0,  #  whether selects sample based on loss value
        oe=0.0,  # how much we overestimate the ground-truth anomaly ratio
        missing_ratio=0.0,  # missing ratio in the data
    ):
        # Data loader
        # read data here
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)

        use_cuda = torch.cuda.is_available()
        self.data_name = data_name
        self.device = torch.device("cuda" if use_cuda else "cpu")
        data_path = "./data/" + data_name + ".npy"
        self.missing_ratio = missing_ratio
        self.model_save_path = "./trained_model/{}/{}/RCA/{}/".format(
            data_name, missing_ratio, seed
        )
        if oe == 0.0:
            self.result_path = "./results/{}/{}/RCA/{}/".format(
                data_name, missing_ratio, seed
            )
        else:
            self.result_path = "./results/{}/{}/RCA_{}/{}/".format(
                data_name, missing_ratio, oe, seed
            )

        os.makedirs(self.model_save_path, exist_ok=True)
        self.learning_rate = learning_rate
        self.dataset = RealDataset(
            data_path, missing_ratio=self.missing_ratio
        )
        self.seed = seed
        self.hidden_dim = hidden_dim
        self.z_dim = z_dim
        self.max_epochs = max_epochs
        self.coteaching = coteaching
        self.beta = 0.0  # initially, select all data
        self.alpha = 0.5
        self.data_path = data_path

        self.data_anomaly_ratio = self.dataset.__anomalyratio__() + oe

        self.input_dim = self.dataset.__dim__()
        self.data_normaly_ratio = 1 - self.data_anomaly_ratio

        n_sample = self.dataset.__len__()
        self.n_train = int(n_sample * (training_ratio))
        self.n_test = n_sample - self.n_train
        print(
            "|data dimension: {}|data noise ratio:{}".format(
                self.dataset.__dim__(), self.data_anomaly_ratio
            )
        )

        self.decay_ratio = abs(self.beta - (1 - self.data_anomaly_ratio)) / (
            self.max_epochs / 2
        )
        training_data, testing_data = data.random_split(
            dataset=self.dataset, lengths=[self.n_train, self.n_test]
        )

        self.training_loader = data.DataLoader(
            training_data, batch_size=batch_size, shuffle=True
        )

        self.testing_loader = data.DataLoader(
            testing_data, batch_size=self.n_test, shuffle=False
        )
        self.ae = None
        self.discriminator = None
        self.build_model()
        self.print_network()
Exemplo n.º 10
0
class Solver_RCA:
    def __init__(
        self,
        data_name,
        hidden_dim=128,  # number of hidden neurons in RCA
        z_dim=10,  # bottleneck dimension
        seed=0,  # random seed
        learning_rate=1e-3,  # learning rate
        batch_size=128,  #  batchsize
        training_ratio=0.8,  #  training data percentage
        max_epochs=100,  #  training epochs
        coteaching=1.0,  #  whether selects sample based on loss value
        oe=0.0,  # how much we overestimate the ground-truth anomaly ratio
        missing_ratio=0.0,  # missing ratio in the data
    ):
        # Data loader
        # read data here
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)

        use_cuda = torch.cuda.is_available()
        self.data_name = data_name
        self.device = torch.device("cuda" if use_cuda else "cpu")
        data_path = "./data/" + data_name + ".npy"
        self.missing_ratio = missing_ratio
        self.model_save_path = "./trained_model/{}/{}/RCA/{}/".format(
            data_name, missing_ratio, seed
        )
        if oe == 0.0:
            self.result_path = "./results/{}/{}/RCA/{}/".format(
                data_name, missing_ratio, seed
            )
        else:
            self.result_path = "./results/{}/{}/RCA_{}/{}/".format(
                data_name, missing_ratio, oe, seed
            )

        os.makedirs(self.model_save_path, exist_ok=True)
        self.learning_rate = learning_rate
        self.dataset = RealDataset(
            data_path, missing_ratio=self.missing_ratio
        )
        self.seed = seed
        self.hidden_dim = hidden_dim
        self.z_dim = z_dim
        self.max_epochs = max_epochs
        self.coteaching = coteaching
        self.beta = 0.0  # initially, select all data
        self.alpha = 0.5
        self.data_path = data_path

        self.data_anomaly_ratio = self.dataset.__anomalyratio__() + oe

        self.input_dim = self.dataset.__dim__()
        self.data_normaly_ratio = 1 - self.data_anomaly_ratio

        n_sample = self.dataset.__len__()
        self.n_train = int(n_sample * (training_ratio))
        self.n_test = n_sample - self.n_train
        print(
            "|data dimension: {}|data noise ratio:{}".format(
                self.dataset.__dim__(), self.data_anomaly_ratio
            )
        )

        self.decay_ratio = abs(self.beta - (1 - self.data_anomaly_ratio)) / (
            self.max_epochs / 2
        )
        training_data, testing_data = data.random_split(
            dataset=self.dataset, lengths=[self.n_train, self.n_test]
        )

        self.training_loader = data.DataLoader(
            training_data, batch_size=batch_size, shuffle=True
        )

        self.testing_loader = data.DataLoader(
            testing_data, batch_size=self.n_test, shuffle=False
        )
        self.ae = None
        self.discriminator = None
        self.build_model()
        self.print_network()

    def build_model(self):
        self.ae = AE(
            input_dim=self.input_dim, hidden_dim=self.hidden_dim, z_dim=self.z_dim
        )
        self.ae = self.ae.to(self.device)

    def print_network(self):
        num_params = 0
        for p in self.ae.parameters():
            num_params += p.numel()
        print("The number of parameters: {}".format(num_params))

    def train(self):
        optimizer = torch.optim.Adam(self.ae.parameters(), lr=self.learning_rate)
        self.ae.eval()
        loss_mse = torch.nn.MSELoss(reduction='none')
        if self.data_name == 'optdigits':
            loss_mse = torch.nn.BCELoss(reduction='none')

        for epoch in tqdm(range(self.max_epochs)):  # train 3 time classifier
            for i, (x, y) in enumerate(self.training_loader):
                x = x.to(self.device).float()
                n = x.shape[0]
                n_selected = int(n * (1-self.beta))

                if config.coteaching == 0.0:
                    n_selected = n
                if i == 0:
                    current_ratio = "{}/{}".format(n_selected, n)

                optimizer.zero_grad()

                with torch.no_grad():
                    self.ae.eval()
                    z1, z2, xhat1, xhat2 = self.ae(x.float(), x.float())

                    error1 = loss_mse(xhat1, x)
                    error1 = error1
                    error2 = loss_mse(xhat2, x)
                    error2 = error2

                    error1 = error1.sum(dim=1)
                    error2 = error2.sum(dim=1)
                    _, index1 = torch.sort(error1)
                    _, index2 = torch.sort(error2)

                    index1 = index1[:n_selected]
                    index2 = index2[:n_selected]

                    x1 = x[index2, :]
                    x2 = x[index1, :]


                self.ae.train()
                z1, z2, xhat1, xhat2 = self.ae(x1.float(), x2.float())
                loss = loss_mse(xhat1, x1) + loss_mse(xhat2, x2)
                loss = loss.sum()
                loss.backward()
                optimizer.step()

            if self.beta < self.data_anomaly_ratio:
                self.beta = min(
                    self.data_anomaly_ratio, self.beta + self.decay_ratio
                )

    def test(self):
        print("======================TEST MODE======================")
        self.ae.train()
        mse_loss = torch.nn.MSELoss(reduction='none')
        if self.data_name == 'optdigits':
            mse_loss = torch.nn.BCELoss(reduction='none')

        error_list = []
        for _ in range(1000):  # ensemble score over 100 stochastic feedforward
            with torch.no_grad():
                for _, (x, y) in enumerate(self.testing_loader):  # testing data loader has n_test batchsize, if it is image data, need change this part
                    y = y.data.cpu().numpy()
                    x = x.to(self.device).float()
                    _, _, xhat1, xhat2 = self.ae(x.float(), x.float())
                    error = mse_loss(xhat1, x) + mse_loss(xhat2, x)
                    error = error.mean(dim=1)
                error = error.data.cpu().numpy()
                error_list.append(error)
        error_list = np.array(error_list)
        error = error_list.mean(axis=0)
        from sklearn.metrics import (
            precision_recall_fscore_support as prf,
            accuracy_score,
            roc_auc_score,
        )
        gt = y.astype(int)

        thresh = np.percentile(error, self.dataset.__anomalyratio__() * 100)
        print("Threshold :", thresh)

        pred = (error > thresh).astype(int)
        gt = y.astype(int)
        auc = roc_auc_score(gt, error)
        accuracy = accuracy_score(gt, pred)
        precision, recall, f_score, support = prf(gt, pred, average="binary")

        print(
            "Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f}, AUC : {:0.4f}".format(
                accuracy, precision, recall, f_score, auc
            )
        )

        os.makedirs(self.result_path, exist_ok=True)

        np.save(
            self.result_path + "result.npy",
            {
                "accuracy": accuracy,
                "precision": precision,
                "recall": recall,
                "f1": f_score,
                "auc": auc,
            },
        )
        print("result save to {}".format(self.result_path))
        return accuracy, precision, recall, f_score, auc
Exemplo n.º 11
0
class Solver_RealNVP():
    def __init__(self,
                 data_name,
                 hidden_dim=256,
                 seed=0,
                 learning_rate=3e-4,
                 batch_size=128,
                 training_ratio=0.8,
                 validation_ratio=0.1,
                 max_epochs=100):
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)
        use_cuda = torch.cuda.is_available()

        # if data_name == 'shuttle'
        self.device = torch.device("cuda" if use_cuda else "cpu")
        self.result_path = "./results/{}/0.0/RobustRealNVP/{}/".format(
            data_name, seed)
        data_path = "./data/" + data_name + ".npy"
        self.model_save_path = "./trained_model/{}/RobustRealNVP/{}/".format(
            data_name, seed)

        os.makedirs(self.model_save_path, exist_ok=True)

        self.learning_rate = learning_rate

        # self.dataset = RealGraphDataset(data_path, missing_ratio=0, radius=2)
        self.dataset = RealDataset(data_path, missing_ratio=0)
        self.seed = seed
        self.hidden_dim = hidden_dim
        self.max_epochs = max_epochs

        self.data_path = data_path
        self.data_anomaly_ratio = self.dataset.__anomalyratio__()
        self.batch_size = batch_size
        self.input_dim = self.dataset.__dim__()
        self.data_normaly_ratio = 1 - self.data_anomaly_ratio
        n_sample = self.dataset.__len__()
        self.n_train = int(n_sample * training_ratio)
        self.n_test = n_sample - self.n_train
        print('|data dimension: {}|data noise ratio:{}'.format(
            self.dataset.__dim__(), self.data_anomaly_ratio))

        training_data, testing_data = data.random_split(
            dataset=self.dataset, lengths=[self.n_train, self.n_test])

        self.training_loader = data.DataLoader(training_data,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               drop_last=True)
        self.testing_loader = data.DataLoader(testing_data,
                                              batch_size=self.n_test,
                                              shuffle=False)
        self.ae = None
        self.discriminator = None
        self.build_model()
        self.print_network()

    def build_model(self):
        nets = lambda: nn.Sequential(
            nn.Linear(self.input_dim, self.hidden_dim), nn.LeakyReLU(),
            nn.Linear(self.hidden_dim, self.hidden_dim), nn.LeakyReLU(),
            nn.Linear(self.hidden_dim, self.input_dim), nn.Tanh())
        nett = lambda: nn.Sequential(
            nn.Linear(self.input_dim, self.hidden_dim), nn.LeakyReLU(),
            nn.Linear(self.hidden_dim, self.hidden_dim), nn.LeakyReLU(),
            nn.Linear(self.hidden_dim, self.input_dim))
        first_mask = np.array([0] * self.input_dim)
        second_mask = np.array([0] * self.input_dim)
        first_mask[int(self.input_dim / 2):] = 1
        second_mask[:(self.input_dim - int(self.input_dim / 2))] = 1

        masks = torch.from_numpy(
            np.array([first_mask, second_mask] * 3).astype(
                np.float32))  # 3 is the number of layers
        prior = distributions.MultivariateNormal(torch.zeros(self.input_dim),
                                                 torch.eye(self.input_dim))
        self.ae = Model_RealNVP(nets, nett, masks, prior)
        self.ae = self.ae.cuda()

    def print_network(self):
        num_params = 0
        for p in self.ae.parameters():
            num_params += p.numel()
        print("The number of parameters: {}".format(num_params))

    def train(self):
        optimizer_ae = Adam(self.ae.parameters(), lr=self.learning_rate)
        for block in self.ae.s:
            for layer in block:
                if isinstance(layer, nn.Linear):
                    # nn.utils.spectral_norm(layer)
                    spectral_norm(layer, L=1.5)

        for block in self.ae.t:
            for layer in block:
                if isinstance(layer, nn.Linear):
                    # nn.utils.spectral_norm(layer)
                    spectral_norm(layer, L=1.5)
        self.ae.train()
        select_rate = 1.0
        for epoch in range(self.max_epochs):
            self.ae.train()
            training_loss = 0.0

            for batch_idx, (x, _, _) in enumerate((self.training_loader)):
                """ train RealNVP"""
                x = to_var(x)
                x = x.float()
                optimizer_ae.zero_grad()
                self.ae.zero_grad()
                loss = -self.ae.log_prob(x)
                # loss = loss.mean()
                with torch.no_grad():
                    _, index = torch.sort(loss)
                loss = loss[index[:int(self.batch_size * select_rate)]].mean()
                training_loss = training_loss + loss.item()
                loss.backward()
                optimizer_ae.step()
            select_rate = max(
                select_rate - (self.data_anomaly_ratio /
                               (0.05 * self.max_epochs)),
                1 - self.data_anomaly_ratio)
            # self.test()
            print("training epoch: {}| training loss: {:0.3f}".format(
                epoch, training_loss))

    def test(self):
        log_density_test = []
        y_test = []

        self.ae.eval()
        for batch_idx, (x, y, _) in enumerate(self.testing_loader):
            x = to_var(x)
            x = x.float()
            y = y.float()
            log_density = self.ae.log_prob(x)
            y_test.append(y)

            log_density_test.append(log_density)

        log_density_test = torch.cat(log_density_test)
        y_test = torch.cat(y_test)

        y_test = y_test.data.cpu().numpy()
        log_density_test = log_density_test.data.cpu().numpy()

        clean_index = np.where(y_test.squeeze() == 0)
        anomaly_index = np.where(y_test.squeeze() == 1)

        thresh = np.percentile(log_density_test,
                               (1 - self.data_normaly_ratio) * 100)
        print("Threshold :", thresh)

        pred = (log_density_test < thresh).astype(int)
        gt = y_test.astype(int)
        auc = roc_auc_score(gt, -log_density_test)

        from sklearn.metrics import precision_recall_fscore_support as prf, accuracy_score

        accuracy = accuracy_score(gt, pred)
        precision, recall, f_score, support = prf(gt, pred, average='binary')

        print(
            "Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f}, AUC:{:0.4f}"
            .format(accuracy, precision, recall, f_score, auc))

        os.makedirs(self.result_path, exist_ok=True)

        np.save(
            self.result_path + "result.npy",
            {
                "accuracy": accuracy,
                "precision": precision,
                "recall": recall,
                "f1": f_score,
                "auc": auc,
            },
        )
        print("result save to {}".format(self.result_path))
        return accuracy, precision, recall, f_score, auc
Exemplo n.º 12
0
class Solver_AE_Coteaching:
    def __init__(
        self,
        data_name,
        start_ratio=0.0,
        decay_ratio=0.01,
        hidden_dim=128,
        z_dim=10,
        seed=0,
        learning_rate=1e-3,
        batch_size=128,
        training_ratio=0.8,
        validation_ratio=0.1,
        max_epochs=100,
        coteaching=1.0,
        missing_ratio=0.0,
        knn_impute=False,
    ):
        # Data loader
        # read data here
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)

        use_cuda = torch.cuda.is_available()
        self.knn_impute = knn_impute
        self.device = torch.device("cuda" if use_cuda else "cpu")
        data_path = "./data/" + data_name + ".npy"
        self.missing_ratio = missing_ratio
        self.model_save_path = "./trained_model/{}/{}/Coteaching_VAE/{}/".format(
            data_name, missing_ratio, seed)
        self.result_path = "./results/{}/{}/Coteaching_VAE/{}/".format(
            data_name, missing_ratio, seed)
        os.makedirs(self.model_save_path, exist_ok=True)
        self.learning_rate = learning_rate
        self.dataset = RealDataset(data_path,
                                   missing_ratio=self.missing_ratio,
                                   knn_impute=knn_impute)
        self.data_name = data_name
        self.seed = seed
        self.start_ratio = start_ratio

        self.decay_ratio = decay_ratio
        self.hidden_dim = hidden_dim
        self.z_dim = z_dim
        self.max_epochs = max_epochs
        self.coteaching = coteaching
        self.start_ratio = start_ratio
        self.data_path = data_path
        self.data_anomaly_ratio = self.dataset.__anomalyratio__()

        self.input_dim = self.dataset.__dim__()
        self.data_normaly_ratio = 1 - self.data_anomaly_ratio

        n_sample = self.dataset.__len__()
        self.n_train = int(n_sample * (training_ratio + validation_ratio))
        # self.n_validation = int(n_sample * validation_ratio)
        self.n_test = n_sample - self.n_train
        print("|data dimension: {}|data noise ratio:{}".format(
            self.dataset.__dim__(), self.data_anomaly_ratio))

        self.decay_ratio = abs(self.start_ratio -
                               (1 - self.data_anomaly_ratio)) / (
                                   self.max_epochs / 2)
        training_data, testing_data = data.random_split(
            dataset=self.dataset, lengths=[self.n_train, self.n_test])

        self.training_loader = data.DataLoader(training_data,
                                               batch_size=batch_size,
                                               shuffle=True)
        self.testing_loader = data.DataLoader(testing_data,
                                              batch_size=self.n_test,
                                              shuffle=False)
        self.ae = None
        self.discriminator = None
        self.build_model()
        self.print_network()

    def build_model(self):
        self.ae = AE(input_dim=self.input_dim,
                     hidden_dim=self.hidden_dim,
                     z_dim=self.z_dim)
        self.ae = self.ae.to(self.device)
        self.ae.float()

    def print_network(self):
        num_params = 0
        for p in self.ae.parameters():
            num_params += p.numel()
        print("The number of parameters: {}".format(num_params))

    def train(self):
        if self.data_name == 'optdigits':
            loss_type = 'BCE'
        else:
            loss_type = 'MSE'
        optimizer = torch.optim.Adam(self.ae.parameters(),
                                     lr=self.learning_rate)
        mse_loss = torch.nn.MSELoss()
        vae_loss = VAE_LOSS()
        vae_score = VAE_LOSS_SCORE()

        min_val_error = 1e10
        for epoch in tqdm(range(self.max_epochs)):  # train 3 time classifier
            for i, (x, y, m) in enumerate(self.training_loader):
                x = x.to(self.device)
                x = x.float()
                m = m.to(self.device).float()
                n = x.shape[0]
                n_selected = n

                if config.coteaching == 0.0:
                    n_selected = n
                if i == 0:
                    current_ratio = "{}/{}".format(n_selected, n)
                optimizer.zero_grad()
                with torch.no_grad():
                    self.ae.eval()

                    z1, z2, xhat1, xhat2, mu1, mu2, logvar1, logvar2 = self.ae(
                        x.float(), x.float())
                    error1 = vae_score(xhat1, x, mu1, logvar1)
                    error2 = vae_score(xhat2, x, mu2, logvar2)

                    _, index1 = torch.sort(error1)
                    _, index2 = torch.sort(error2)

                    index1 = index1[:n_selected]
                    index2 = index2[:n_selected]

                    x1 = x[index2, :]
                    x2 = x[index1, :]
                    m1 = m[index2, :]
                    m2 = m[index1, :]

                self.ae.train()
                z1, z2, xhat1, xhat2, mu1, logvar1, mu2, logvar2 = self.ae(
                    x1.float(), x2.float())
                loss = vae_loss(xhat1, x, mu1, logvar1, loss_type) + vae_loss(
                    xhat2, x, mu2, logvar2, loss_type)
                loss.backward()
                optimizer.step()
            #
            # if self.start_ratio < self.data_anomaly_ratio:
            #     self.start_ratio = min(
            #         self.data_anomaly_ratio, self.start_ratio + self.decay_ratio
            #     )
            # if self.start_ratio > self.data_anomaly_ratio:
            #     self.start_ratio = max(
            #         self.data_anomaly_ratio, self.start_ratio - self.decay_ratio
            #     )  # 0.0005 for 0.1 anomaly, 0.0001 for 0.001 anomaly

            # with torch.no_grad():
            #     self.ae.eval()
            #     for i, (x, y, m) in enumerate(self.testing_loader):
            #         x = x.to(self.device)
            #         m = m.to(self.device).float()
            #         # y = y.to(device)
            #         x = x.float()
            #         _, _, xhat1, xhat2, mu1, mu2, logvar1, logvar2 = self.ae(x, x, m, m)
            #         error1 = vae_score(xhat1, x, mu1, logvar1, loss_type)
            #         error2 = vae_score(xhat2, x, mu2, logvar2, loss_type)
            #
            #         n_non_missing = m.sum(dim=1)
            #         error1 = error1 / n_non_missing
            #         error2 = error2 / n_non_missing
            #
            #         n_val = x.shape[0]
            #         n_selected = int(n_val * (1 - self.data_anomaly_ratio))
            #         if self.coteaching == 0.0:
            #             n_selected = n
            #         _, index1 = torch.sort(error1)
            #         _, index2 = torch.sort(error2)
            #         index1 = index1[:n_selected]
            #         index2 = index2[:n_selected]
            #
            #         x1 = x[index2, :]
            #         x2 = x[index1, :]
            #         m1 = m[index2, :]
            #         m2 = m[index1, :]
            #         z1, z2, xhat1, xhat2, mu1, mu2, logvar1, logvar2 = self.ae(x1, x2, m1, m2)
            #         val_loss = vae_loss(xhat1, x1, mu1, logvar1, loss_type) + vae_loss(xhat2, x2, mu2, logvar2, loss_type)
            #
            #         if val_loss < min_val_error:
            #             min_val_error = val_loss
            #             torch.save(
            #                 self.ae.state_dict(),
            #                 os.path.join(self.model_save_path, "parameter.pth"),
            #             )

    def test(self):
        print("======================TEST MODE======================")
        # self.dagmm.load_stat
        self.ae.load_state_dict(
            torch.load(self.model_save_path + "parameter.pth"))
        self.ae.eval()
        vae_loss = VAE_LOSS()
        vae_score = VAE_Outlier_SCORE()

        if self.data_name == 'optdigits':
            loss_type = 'BCE'
        else:
            loss_type = 'MSE'

        for _, (x, y, m) in enumerate(self.testing_loader):
            y = y.data.cpu().numpy()
            x = x.to(self.device).float()
            m = m.to(self.device).float()
            _, _, xhat1, xhat2, mu1, mu2, logvar1, logvar2 = self.ae(
                x.float(), x.float(), m, m)
            error1 = vae_score(xhat1, x, mu1, logvar1, loss_type)
            error2 = vae_score(xhat2, x, mu2, logvar2, loss_type)
            n_non_missing = m.sum(dim=1)
            error = (error1 / n_non_missing + error2 / n_non_missing)

        error = error.data.cpu().numpy()
        thresh = np.percentile(error, self.data_normaly_ratio * 100)
        print("Threshold :", thresh)

        pred = (error > thresh).astype(int)
        gt = y.astype(int)

        from sklearn.metrics import (
            precision_recall_fscore_support as prf,
            accuracy_score,
            roc_auc_score,
        )

        auc = roc_auc_score(gt, error)
        accuracy = accuracy_score(gt, pred)
        precision, recall, f_score, support = prf(gt, pred, average="binary")

        print(
            "Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f}, AUC : {:0.4f}"
            .format(accuracy, precision, recall, f_score, auc))

        os.makedirs(self.result_path, exist_ok=True)

        np.save(
            self.result_path + "result.npy",
            {
                "accuracy": accuracy,
                "precision": precision,
                "recall": recall,
                "f1": f_score,
                "auc": auc,
            },
        )
        return accuracy, precision, recall, f_score, auc
Exemplo n.º 13
0
    def __init__(self,
                 data_name,
                 lambda_energy=0.1,
                 lambda_cov_diag=0.005,
                 hidden_dim=128,
                 z_dim=10,
                 seed=0,
                 learning_rate=1e-3,
                 gmm_k=2,
                 batch_size=128,
                 training_ratio=0.8,
                 validation_ratio=0.1,
                 max_epochs=100,
                 missing_ratio=0.0):
        # Data loader
        self.gmm_k = gmm_k
        self.lambda_energy = lambda_energy
        self.lambda_cov_diag = lambda_cov_diag
        # read data here
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)
        use_cuda = torch.cuda.is_available()
        self.device = torch.device("cuda" if use_cuda else "cpu")
        data_path = "./data/" + data_name + ".npy"
        self.model_save_path = "./trained_model/{}/{}/DAGMM/{}/".format(
            data_name, missing_ratio, seed)
        self.result_path = "./results/{}/{}/DAGMM/{}/".format(
            data_name, missing_ratio, seed)
        os.makedirs(self.model_save_path, exist_ok=True)

        self.learning_rate = learning_rate
        self.missing_ratio = missing_ratio
        self.dataset = RealDataset(data_path, missing_ratio=self.missing_ratio)
        self.seed = seed
        self.hidden_dim = hidden_dim
        self.z_dim = z_dim
        self.max_epochs = max_epochs

        self.data_path = data_path
        self.data_anomaly_ratio = self.dataset.__anomalyratio__()
        self.input_dim = self.dataset.__dim__()
        self.data_normaly_ratio = 1 - self.data_anomaly_ratio
        n_sample = self.dataset.__len__()
        self.n_train = int(n_sample * (training_ratio + validation_ratio))
        # self.n_validation = int(n_sample * validation_ratio)
        self.n_test = n_sample - self.n_train
        print('|data dimension: {}|data noise ratio:{}'.format(
            self.dataset.__dim__(), self.data_anomaly_ratio))

        training_data, testing_data = data.random_split(
            dataset=self.dataset, lengths=[self.n_train, self.n_test])

        self.training_loader = data.DataLoader(training_data,
                                               batch_size=batch_size,
                                               shuffle=True)
        # self.validation_loader = data.DataLoader(validation_data, batch_size=self.n_validation, shuffle=False)
        self.testing_loader = data.DataLoader(testing_data,
                                              batch_size=self.n_test,
                                              shuffle=False)
        self.build_model()
        self.print_network()
Exemplo n.º 14
0
class Solver():
    DEFAULTS = {}

    def __init__(self,
                 data_name,
                 lambda_energy=0.1,
                 lambda_cov_diag=0.005,
                 hidden_dim=128,
                 z_dim=10,
                 seed=0,
                 learning_rate=1e-3,
                 gmm_k=2,
                 batch_size=128,
                 training_ratio=0.8,
                 validation_ratio=0.1,
                 max_epochs=100,
                 missing_ratio=0.0):
        # Data loader
        self.gmm_k = gmm_k
        self.lambda_energy = lambda_energy
        self.lambda_cov_diag = lambda_cov_diag
        # read data here
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)
        use_cuda = torch.cuda.is_available()
        self.device = torch.device("cuda" if use_cuda else "cpu")
        data_path = "./data/" + data_name + ".npy"
        self.model_save_path = "./trained_model/{}/{}/DAGMM/{}/".format(
            data_name, missing_ratio, seed)
        self.result_path = "./results/{}/{}/DAGMM/{}/".format(
            data_name, missing_ratio, seed)
        os.makedirs(self.model_save_path, exist_ok=True)

        self.learning_rate = learning_rate
        self.missing_ratio = missing_ratio
        self.dataset = RealDataset(data_path, missing_ratio=self.missing_ratio)
        self.seed = seed
        self.hidden_dim = hidden_dim
        self.z_dim = z_dim
        self.max_epochs = max_epochs

        self.data_path = data_path
        self.data_anomaly_ratio = self.dataset.__anomalyratio__()
        self.input_dim = self.dataset.__dim__()
        self.data_normaly_ratio = 1 - self.data_anomaly_ratio
        n_sample = self.dataset.__len__()
        self.n_train = int(n_sample * (training_ratio + validation_ratio))
        # self.n_validation = int(n_sample * validation_ratio)
        self.n_test = n_sample - self.n_train
        print('|data dimension: {}|data noise ratio:{}'.format(
            self.dataset.__dim__(), self.data_anomaly_ratio))

        training_data, testing_data = data.random_split(
            dataset=self.dataset, lengths=[self.n_train, self.n_test])

        self.training_loader = data.DataLoader(training_data,
                                               batch_size=batch_size,
                                               shuffle=True)
        # self.validation_loader = data.DataLoader(validation_data, batch_size=self.n_validation, shuffle=False)
        self.testing_loader = data.DataLoader(testing_data,
                                              batch_size=self.n_test,
                                              shuffle=False)
        self.build_model()
        self.print_network()

    def build_model(self):
        # Define model
        self.dagmm = DaGMM(input_dim=self.input_dim,
                           hidden_dim=self.hidden_dim,
                           z_dim=self.z_dim,
                           n_gmm=self.gmm_k)
        # Optimizers
        self.optimizer = torch.optim.Adam(self.dagmm.parameters(),
                                          lr=self.learning_rate)
        # Print networks
        self.print_network()

        if torch.cuda.is_available():
            self.dagmm.cuda()

    def print_network(self):
        num_params = 0
        for p in self.dagmm.parameters():
            num_params += p.numel()
        # print(name)
        # print(model)
        print("The number of parameters: {}".format(num_params))

    def reset_grad(self):
        self.dagmm.zero_grad()

    def to_var(self, x, volatile=False):
        if torch.cuda.is_available():
            x = x.cuda()
        return Variable(x, volatile=volatile)

    def train(self):
        iters_per_epoch = len(self.training_loader)

        # # Start with trained model if exists
        # if self.pretrained_model:
        #     start = int(self.pretrained_model.split('_')[0])
        # else:
        #     start = 0

        start = 0
        # Start training
        iter_ctr = 0
        start_time = time.time()
        min_val_loss = 1e+15
        # self.ap_global_train = np.array([0, 0, 0])
        for e in tqdm(range(start, self.max_epochs)):
            for i, (input_data, labels, _) in enumerate(self.training_loader):
                iter_ctr += 1
                start_time = time.time()

                input_data = self.to_var(input_data)

                # training
                total_loss, sample_energy, recon_error, cov_diag = self.dagmm_step(
                    input_data)
                # Logging
                loss = {}
                loss['total_loss'] = total_loss.data.item()
                loss['sample_energy'] = sample_energy.item()
                loss['recon_error'] = recon_error.item()
                loss['cov_diag'] = cov_diag.item()

                self.dagmm.eval()

            for i, (input_data, labels, _) in enumerate((self.testing_loader)):
                iter_ctr += 1
                start_time = time.time()

                input_data = self.to_var(input_data)

                # validation
                self.dagmm.eval()
                total_loss, sample_energy, recon_error, cov_diag = self.dagmm_step(
                    input_data, validation_flag=True)
                # Logging
                loss = {}
                loss['total_loss'] = total_loss.data.item()
                loss['sample_energy'] = sample_energy.item()
                loss['recon_error'] = recon_error.item()
                loss['cov_diag'] = cov_diag.item()
                # Print out log info
                # if (i + 1) % self.log_step == 0:
                #     elapsed = time.time() - start_time
                #     total_time = ((self.num_epochs * iters_per_epoch) - (e * iters_per_epoch + i)) * elapsed / (
                #                 e * iters_per_epoch + i + 1)
                #     epoch_time = (iters_per_epoch - i) * elapsed / (e * iters_per_epoch + i + 1)
                #
                #     epoch_time = str(datetime.timedelta(seconds=epoch_time))
                #     total_time = str(datetime.timedelta(seconds=total_time))
                #     elapsed = str(datetime.timedelta(seconds=elapsed))
                #
                #     lr_tmp = []
                #     for param_group in self.optimizer.param_groups:
                #         lr_tmp.append(param_group['lr'])
                #     tmplr = np.squeeze(np.array(lr_tmp))
                #
                #     log = "Elapsed {}/{} -- {} , Epoch [{}/{}], Iter [{}/{}], lr {}".format(
                #         elapsed, epoch_time, total_time, e + 1, self.num_epochs, i + 1, iters_per_epoch, tmplr)
                #
                #     for tag, value in loss.items():
                #         log += ", {}: {:.4f}".format(tag, value)
                #
                #     IPython.display.clear_output()
                #     print(log)
                #
                #     if self.use_tensorboard:
                #         for tag, value in loss.items():
                #             self.logger.scalar_summary(tag, value, e * iters_per_epoch + i + 1)
                #     else:
                #         plt_ctr = 1
                #         if not hasattr(self, "loss_logs"):
                #             self.loss_logs = {}
                #             for loss_key in loss:
                #                 self.loss_logs[loss_key] = [loss[loss_key]]
                #                 plt.subplot(2, 2, plt_ctr)
                #                 plt.plot(np.array(self.loss_logs[loss_key]), label=loss_key)
                #                 plt.legend()
                #                 plt_ctr += 1
                #         else:
                #             for loss_key in loss:
                #                 self.loss_logs[loss_key].append(loss[loss_key])
                #                 plt.subplot(2, 2, plt_ctr)
                #                 plt.plot(np.array(self.loss_logs[loss_key]), label=loss_key)
                #                 plt.legend()
                #                 plt_ctr += 1
                #
                #         plt.show()
                #
                #     print("phi", self.dagmm.phi, "mu", self.dagmm.mu, "cov", self.dagmm.cov)
                # Save model checkpoints

            if loss['total_loss'] < min_val_loss:
                min_val_loss = loss['total_loss']
                torch.save(self.dagmm.state_dict(),
                           os.path.join(self.model_save_path, 'parameter.pth'))

    def dagmm_step(self, input_data, validation_flag=False):
        input_data = input_data.float()
        if not validation_flag:
            self.optimizer.zero_grad()
            self.dagmm.train()

            enc, dec, z, gamma = self.dagmm(input_data)
            if torch.isnan(z.sum()):
                for p in self.dagmm.parameters():
                    print(p)
                print("pause")
            total_loss, sample_energy, recon_error, cov_diag = self.dagmm.loss_function(
                input_data, dec, z, gamma, self.lambda_energy,
                self.lambda_cov_diag)

            # self.reset_grad()
            total_loss.backward()

            torch.nn.utils.clip_grad_norm_(self.dagmm.parameters(), 5)
            self.optimizer.step()

        else:
            self.dagmm.eval()
            enc, dec, z, gamma = self.dagmm(input_data)

            total_loss, sample_energy, recon_error, cov_diag = self.dagmm.loss_function(
                input_data, dec, z, gamma, self.lambda_energy,
                self.lambda_cov_diag)

        return total_loss, sample_energy, recon_error, cov_diag

    def test(self):
        print("======================TEST MODE======================")
        # self.dagmm.load_stat
        self.dagmm.load_state_dict(
            torch.load(self.model_save_path + 'parameter.pth'))
        self.dagmm.eval()
        # self.data_loader.dataset.mode = "train"

        # compute the parameter of density estimation by using training and validation set
        N = 0
        mu_sum = 0
        cov_sum = 0
        gamma_sum = 0

        for it, (input_data, labels, _) in enumerate(self.training_loader):

            input_data = self.to_var(input_data)
            input_data = input_data.float()
            enc, dec, z, gamma = self.dagmm(input_data)
            phi, mu, cov = self.dagmm.compute_gmm_params(z, gamma)

            batch_gamma_sum = torch.sum(gamma, dim=0)

            gamma_sum += batch_gamma_sum
            mu_sum += mu * batch_gamma_sum.unsqueeze(
                -1)  # keep sums of the numerator only
            cov_sum += cov * batch_gamma_sum.unsqueeze(-1).unsqueeze(
                -1)  # keep sums of the numerator only

            N += input_data.size(0)

        train_phi = gamma_sum / N
        train_mu = mu_sum / gamma_sum.unsqueeze(-1)
        train_cov = cov_sum / gamma_sum.unsqueeze(-1).unsqueeze(-1)

        print("N:", N)
        print("phi :\n", train_phi)
        print("mu :\n", train_mu)
        print("cov :\n", train_cov)

        train_energy = []
        train_labels = []
        train_z = []
        for it, (input_data, labels, _) in enumerate(self.training_loader):
            input_data = self.to_var(input_data)
            input_data = input_data.float()
            enc, dec, z, gamma = self.dagmm(input_data)
            sample_energy, cov_diag = self.dagmm.compute_energy(
                z,
                phi=train_phi,
                mu=train_mu,
                cov=train_cov,
                size_average=False)

            train_energy.append(sample_energy.data.cpu().numpy())
            train_z.append(z.data.cpu().numpy())
            train_labels.append(labels.numpy())

        train_energy = np.concatenate(train_energy, axis=0)
        train_z = np.concatenate(train_z, axis=0)
        train_labels = np.concatenate(train_labels, axis=0)

        test_energy = []
        test_labels = []
        test_z = []
        for it, (input_data, labels, _) in enumerate(self.testing_loader):
            input_data = self.to_var(input_data)
            input_data = input_data.float()
            enc, dec, z, gamma = self.dagmm(input_data)
            sample_energy, cov_diag = self.dagmm.compute_energy(
                z, size_average=False)
            test_energy.append(sample_energy.data.cpu().numpy())
            test_z.append(z.data.cpu().numpy())
            test_labels.append(labels.numpy())

        test_energy = np.concatenate(test_energy, axis=0)
        test_z = np.concatenate(test_z, axis=0)
        test_labels = np.concatenate(test_labels, axis=0)

        combined_energy = np.concatenate([train_energy, test_energy], axis=0)
        combined_labels = np.concatenate([train_labels, test_labels], axis=0)

        # thresh = np.percentile(combined_energy, 100 - 20)

        thresh = np.percentile(combined_energy, self.data_normaly_ratio * 100)
        # thresh = np.percentile(test_energy, self.data_normaly_ratio * 100)
        print("Threshold :", thresh)

        pred = (test_energy > thresh).astype(int)
        gt = test_labels.astype(int)

        from sklearn.metrics import precision_recall_fscore_support as prf, accuracy_score
        from sklearn.metrics import roc_auc_score

        auc = roc_auc_score(gt, test_energy)
        accuracy = accuracy_score(gt, pred)
        precision, recall, f_score, support = prf(gt, pred, average='binary')

        os.makedirs(self.result_path, exist_ok=True)

        np.save(
            self.result_path + "result.npy", {
                'auc': auc,
                'accuracy': accuracy,
                'precision': precision,
                'recall': recall,
                'f1': f_score
            })
        print(
            "Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f}"
            .format(accuracy, precision, recall, f_score))
        return accuracy, precision, recall, f_score, auc
Exemplo n.º 15
0
class Solver():
    DEFAULTS = {}

    def __init__(self,
                 data_name,
                 lambda_energy=0.1,
                 lambda_cov_diag=0.005,
                 hidden_dim=128,
                 z_dim=10,
                 seed=0,
                 learning_rate=1e-3,
                 gmm_k=2,
                 batch_size=128,
                 training_ratio=0.8,
                 validation_ratio=0.1,
                 max_epochs=100,
                 missing_ratio=0.0):
        # Data loader
        self.gmm_k = gmm_k
        self.lambda_energy = lambda_energy
        self.lambda_cov_diag = lambda_cov_diag
        # read data here
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)
        use_cuda = torch.cuda.is_available()
        self.device = torch.device("cuda" if use_cuda else "cpu")
        data_path = "./data/" + data_name + ".npy"
        self.model_save_path = "./trained_model/{}/{}/DAGMM/{}/".format(
            data_name, missing_ratio, seed)
        self.result_path = "./results/{}/{}/DAGMM/{}/".format(
            data_name, missing_ratio, seed)
        os.makedirs(self.model_save_path, exist_ok=True)

        self.learning_rate = learning_rate
        self.missing_ratio = missing_ratio
        self.dataset = RealDataset(data_path, missing_ratio=self.missing_ratio)
        self.seed = seed
        self.hidden_dim = hidden_dim
        self.z_dim = z_dim
        self.max_epochs = max_epochs

        self.data_path = data_path
        self.data_anomaly_ratio = self.dataset.__anomalyratio__()
        self.input_dim = self.dataset.__dim__()
        self.data_normaly_ratio = 1 - self.data_anomaly_ratio
        n_sample = self.dataset.__len__()
        self.n_train = int(n_sample * (training_ratio))
        # self.n_validation = int(n_sample * validation_ratio)
        self.n_test = n_sample - self.n_train
        print('|data dimension: {}|data noise ratio:{}'.format(
            self.dataset.__dim__(), self.data_anomaly_ratio))

        training_data, testing_data = data.random_split(
            dataset=self.dataset, lengths=[self.n_train, self.n_test])

        self.training_loader = data.DataLoader(training_data,
                                               batch_size=batch_size,
                                               shuffle=True)
        # self.validation_loader = data.DataLoader(validation_data, batch_size=self.n_validation, shuffle=False)
        self.testing_loader = data.DataLoader(testing_data,
                                              batch_size=self.n_test,
                                              shuffle=False)
        self.build_model()
        self.print_network()

    def build_model(self):
        # Define model
        self.dagmm = DaGMM(input_dim=self.input_dim,
                           hidden_dim=self.hidden_dim,
                           z_dim=self.z_dim,
                           n_gmm=self.gmm_k)
        # Optimizers
        self.optimizer = torch.optim.Adam(self.dagmm.parameters(),
                                          lr=self.learning_rate)
        # Print networks
        self.print_network()

        if torch.cuda.is_available():
            self.dagmm.cuda()

    def print_network(self):
        num_params = 0
        for p in self.dagmm.parameters():
            num_params += p.numel()
        # print(name)
        # print(model)
        print("The number of parameters: {}".format(num_params))

    def reset_grad(self):
        self.dagmm.zero_grad()

    def to_var(self, x, volatile=False):
        if torch.cuda.is_available():
            x = x.cuda()
        return Variable(x, volatile=volatile)

    def train(self):
        iters_per_epoch = len(self.training_loader)

        start = 0
        # Start training
        iter_ctr = 0
        start_time = time.time()
        min_val_loss = 1e+15

        for e in tqdm(range(start, self.max_epochs)):
            for i, (input_data, labels) in enumerate(self.training_loader):
                iter_ctr += 1
                start_time = time.time()

                input_data = self.to_var(input_data)

                # training
                total_loss, sample_energy, recon_error, cov_diag = self.dagmm_step(
                    input_data)
                # Logging
                loss = {}
                loss['total_loss'] = total_loss.data.item()
                loss['sample_energy'] = sample_energy.item()
                loss['recon_error'] = recon_error.item()
                loss['cov_diag'] = cov_diag.item()

                self.dagmm.eval()

    def dagmm_step(self, input_data, validation_flag=False):
        input_data = input_data.float()
        if not validation_flag:
            self.optimizer.zero_grad()
            self.dagmm.train()

            enc, dec, z, gamma = self.dagmm(input_data)
            if torch.isnan(z.sum()):
                for p in self.dagmm.parameters():
                    print(p)
                print("pause")
            total_loss, sample_energy, recon_error, cov_diag = self.dagmm.loss_function(
                input_data, dec, z, gamma, self.lambda_energy,
                self.lambda_cov_diag)

            total_loss.backward()

            torch.nn.utils.clip_grad_norm_(self.dagmm.parameters(), 5)
            self.optimizer.step()

        else:
            self.dagmm.eval()
            enc, dec, z, gamma = self.dagmm(input_data)

            total_loss, sample_energy, recon_error, cov_diag = self.dagmm.loss_function(
                input_data, dec, z, gamma, self.lambda_energy,
                self.lambda_cov_diag)

        return total_loss, sample_energy, recon_error, cov_diag

    def test(self):
        print("======================TEST MODE======================")
        # self.dagmm.load_stat
        # self.dagmm.load_state_dict(torch.load(self.model_save_path + 'parameter.pth'))
        self.dagmm.eval()
        # self.data_loader.dataset.mode = "train"

        # compute the parameter of density estimation by using training and validation set
        N = 0
        mu_sum = 0
        cov_sum = 0
        gamma_sum = 0

        for it, (input_data, labels) in enumerate(self.training_loader):

            input_data = self.to_var(input_data)
            input_data = input_data.float()
            enc, dec, z, gamma = self.dagmm(input_data)
            phi, mu, cov = self.dagmm.compute_gmm_params(z, gamma)

            batch_gamma_sum = torch.sum(gamma, dim=0)

            gamma_sum += batch_gamma_sum
            mu_sum += mu * batch_gamma_sum.unsqueeze(
                -1)  # keep sums of the numerator only
            cov_sum += cov * batch_gamma_sum.unsqueeze(-1).unsqueeze(
                -1)  # keep sums of the numerator only

            N += input_data.size(0)

        train_phi = gamma_sum / N
        train_mu = mu_sum / gamma_sum.unsqueeze(-1)
        train_cov = cov_sum / gamma_sum.unsqueeze(-1).unsqueeze(-1)

        print("N:", N)
        print("phi :\n", train_phi)
        print("mu :\n", train_mu)
        print("cov :\n", train_cov)

        train_energy = []
        train_labels = []
        train_z = []
        for it, (input_data, labels) in enumerate(self.training_loader):
            input_data = self.to_var(input_data)
            input_data = input_data.float()
            enc, dec, z, gamma = self.dagmm(input_data)
            sample_energy, cov_diag = self.dagmm.compute_energy(
                z,
                phi=train_phi,
                mu=train_mu,
                cov=train_cov,
                size_average=False)

            train_energy.append(sample_energy.data.cpu().numpy())
            train_z.append(z.data.cpu().numpy())
            train_labels.append(labels.numpy())

        train_energy = np.concatenate(train_energy, axis=0)
        train_z = np.concatenate(train_z, axis=0)
        train_labels = np.concatenate(train_labels, axis=0)

        test_energy = []
        test_labels = []
        test_z = []
        for it, (input_data, labels) in enumerate(self.testing_loader):
            input_data = self.to_var(input_data)
            input_data = input_data.float()
            enc, dec, z, gamma = self.dagmm(input_data)
            sample_energy, cov_diag = self.dagmm.compute_energy(
                z, size_average=False)
            test_energy.append(sample_energy.data.cpu().numpy())
            test_z.append(z.data.cpu().numpy())
            test_labels.append(labels.numpy())

        test_energy = np.concatenate(test_energy, axis=0)
        test_z = np.concatenate(test_z, axis=0)
        test_labels = np.concatenate(test_labels, axis=0)

        combined_energy = np.concatenate([train_energy, test_energy], axis=0)
        combined_labels = np.concatenate([train_labels, test_labels], axis=0)

        thresh = np.percentile(combined_energy, self.data_normaly_ratio * 100)
        print("Threshold :", thresh)

        pred = (test_energy > thresh).astype(int)
        gt = test_labels.astype(int)

        from sklearn.metrics import precision_recall_fscore_support as prf, accuracy_score
        from sklearn.metrics import roc_auc_score

        auc = roc_auc_score(gt, test_energy)
        accuracy = accuracy_score(gt, pred)
        precision, recall, f_score, support = prf(gt, pred, average='binary')

        os.makedirs(self.result_path, exist_ok=True)

        np.save(
            self.result_path + "result.npy", {
                'auc': auc,
                'accuracy': accuracy,
                'precision': precision,
                'recall': recall,
                'f1': f_score
            })
        print(
            "Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f} auc:{:0.3f}"
            .format(accuracy, precision, recall, f_score, auc))
        return accuracy, precision, recall, f_score, auc
Exemplo n.º 16
0
class Solver_RCA_Multi:
    def __init__(
        self,
        data_name,
        n_member=2,
        start_ratio=0.0,
        decay_ratio=0.01,
        hidden_dim=128,
        z_dim=10,
        seed=0,
        learning_rate=1e-3,
        batch_size=128,
        training_ratio=0.8,
        validation_ratio=0.1,
        max_epochs=100,
        coteaching=1.0,
        oe=0.0,
        missing_ratio=0.0,
        knn_impute=False,
    ):
        # Data loader
        # read data here
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)

        use_cuda = torch.cuda.is_available()
        self.data_name = data_name
        self.knn_impute = knn_impute
        self.device = torch.device("cuda" if use_cuda else "cpu")
        data_path = "./data/" + data_name + ".npy"
        self.missing_ratio = missing_ratio
        self.model_save_path = "./trained_model/{}/{}/{}-RCA/{}/".format(
            data_name, missing_ratio, n_member, seed
        )
        if oe == 0.0:
            self.result_path = "./results/{}/{}/{}-RCA/{}/".format(
                data_name, missing_ratio, n_member, seed
            )
        else:
            self.result_path = "./results/{}/{}/{}-RCA_{}/{}/".format(
                data_name, missing_ratio, n_member, oe, seed
            )

        os.makedirs(self.model_save_path, exist_ok=True)
        self.learning_rate = learning_rate
        self.dataset = RealDataset(data_path, missing_ratio=self.missing_ratio)
        self.seed = seed
        self.start_ratio = start_ratio

        self.decay_ratio = decay_ratio
        self.hidden_dim = hidden_dim
        self.z_dim = z_dim
        self.max_epochs = max_epochs
        self.coteaching = coteaching
        self.start_ratio = start_ratio
        self.data_path = data_path

        self.data_anomaly_ratio = self.dataset.__anomalyratio__() + oe

        self.input_dim = self.dataset.__dim__()
        self.data_normaly_ratio = 1 - self.data_anomaly_ratio

        n_sample = self.dataset.__len__()
        self.n_train = int(n_sample * (training_ratio))
        # self.n_validation = int(n_sample * validation_ratio)
        self.n_test = n_sample - self.n_train
        print(
            "|data dimension: {}|data noise ratio:{}".format(
                self.dataset.__dim__(), self.data_anomaly_ratio
            )
        )

        self.decay_ratio = abs(self.start_ratio - (1 - self.data_anomaly_ratio)) / (
            self.max_epochs / 2
        )
        training_data, testing_data = data.random_split(
            dataset=self.dataset, lengths=[self.n_train, self.n_test]
        )

        self.training_loader = data.DataLoader(
            training_data, batch_size=batch_size, shuffle=True
        )

        self.testing_loader = data.DataLoader(
            testing_data, batch_size=self.n_test, shuffle=False
        )
        self.n_member = n_member
        self.ae = None
        self.discriminator = None
        self.build_model()
        self.print_network()

    def build_model(self):
        self.ae = []
        for _ in range(self.n_member):
            ae = SingleAE(
                input_dim=self.input_dim, hidden_dim=self.hidden_dim, z_dim=self.z_dim
            )
            ae = ae.to(self.device)
            self.ae.append(ae)

    def print_network(self):
        num_params = 0
        for p in self.ae[0].parameters():
            num_params += p.numel()
        print(
            "The number of parameters: {}, number of networks".format(
                num_params, self.n_member
            )
        )

    def train(self):
        optimizer = []
        for i in range(self.n_member):
            optimizer.append(
                torch.optim.Adam(self.ae[i].parameters(), lr=self.learning_rate)
            )
            self.ae[i].eval()

        loss_mse = torch.nn.MSELoss(reduction="none")
        if self.data_name == "optdigits":
            loss_mse = torch.nn.BCELoss(reduction="none")

        min_val_error = 1e10
        for epoch in tqdm(range(self.max_epochs)):  # train 3 time classifier
            for i, (x, y) in enumerate(self.training_loader):
                x = x.to(self.device).float()
                # m = m.to(self.device).float()
                n = x.shape[0]
                n_selected = int(n * (1 - self.start_ratio))

                if config.coteaching == 0.0:
                    n_selected = n
                if i == 0:
                    current_ratio = "{}/{}".format(n_selected, n)

                selected_all_model = []
                with torch.no_grad():
                    for model_idx in range(self.n_member):
                        self.ae[model_idx].eval()
                        xhat = self.ae[model_idx](x.float())
                        error = loss_mse(xhat, x)
                        error = error.sum(dim=1)
                        _, index = torch.sort(error)
                        index = index[:n_selected]
                        selected_all_model.append(index)

                    random.shuffle(selected_all_model)

                for model_idx in range(self.n_member):
                    optimizer[model_idx].zero_grad()
                    self.ae[model_idx].train()
                    xhat = self.ae[model_idx](x[selected_all_model[model_idx]])
                    error = loss_mse(xhat, x[selected_all_model[model_idx]])
                    error = error.mean()
                    error.backward()
                    optimizer[model_idx].step()

            if self.start_ratio < self.data_anomaly_ratio:
                self.start_ratio = min(
                    self.data_anomaly_ratio, self.start_ratio + self.decay_ratio
                )
            if self.start_ratio > self.data_anomaly_ratio:
                self.start_ratio = max(
                    self.data_anomaly_ratio, self.start_ratio - self.decay_ratio
                )  # 0.0005 for 0.1 anomaly, 0.0001 for 0.001 anomaly

            # with torch.no_grad():
            #     self.ae.eval()
            #     for i, (x, y, m) in enumerate(self.testing_loader):
            #         x = x.to(self.device).float()
            #         m = m.to(self.device).float()
            #         # y = y.to(device)
            #         x = x.float()
            #         _, _, xhat1, xhat2 = self.ae(x, x, m, m)
            #         error1 = loss_mse(xhat1, x)
            #         error2 = loss_mse(xhat2, x)
            #         error1 = error1.sum(dim=1)
            #         error2 = error2.sum(dim=1)
            #
            #         n_val = x.shape[0]
            #         n_selected = int(n_val * (1 - self.data_anomaly_ratio))
            #         if self.coteaching == 0.0:
            #             n_selected = n
            #         # n_selected = n_val
            #         _, index1 = torch.sort(error1)
            #         _, index2 = torch.sort(error2)
            #         index1 = index1[:n_selected]
            #         index2 = index2[:n_selected]
            #
            #         x1 = x[index2, :]
            #         x2 = x[index1, :]
            #         m1 = m[index2, :]
            #         m2 = m[index1, :]
            #         z1, z2, xhat1, xhat2 = self.ae(x1, x2, m1, m2)
            #         val_loss = loss_mse(x1, xhat1) + loss_mse(x2, xhat2)
            #         val_loss = val_loss.sum()
            #         if val_loss < min_val_error:
            #             # print(epoch)
            #             min_val_error = val_loss
            #             torch.save(
            #                 self.ae.state_dict(),
            #                 os.path.join(self.model_save_path, "parameter.pth"),
            #             )

            # scheduler.step()

    def test(self):
        print("======================TEST MODE======================")
        # self.dagmm.load_stat
        # self.ae.load_state_dict(torch.load(self.model_save_path + "parameter.pth"))
        # self.ae.eval()
        mse_loss = torch.nn.MSELoss(reduction="none")
        if self.data_name == "optdigits":
            mse_loss = torch.nn.BCELoss(reduction="none")

        error_list = []
        for _ in range(1000):  # ensemble score over 100 stochastic feedforward
            with torch.no_grad():
                error_average = torch.zeros(self.n_test).cuda()
                for model in self.ae:
                    model.train()
                    for _, (x, y) in enumerate(self.testing_loader):
                        y = y.data.cpu().numpy()
                        x = x.to(self.device).float()
                        # m = m.to(self.device).float()
                        xhat = model(x.float())
                        error = mse_loss(xhat, x)
                        error = error.sum(dim=1)
                        error_average = error_average + error

                error = error_average.data.cpu().numpy()
                error_list.append(error)
        error_list = np.array(error_list)
        # error_list = np.percentile(error, )
        error = error_list.mean(axis=0)
        from sklearn.metrics import (
            precision_recall_fscore_support as prf,
            accuracy_score,
            roc_auc_score,
        )

        gt = y.astype(int)
        auc = roc_auc_score(gt, error)

        thresh = np.percentile(error, self.dataset.__anomalyratio__() * 100)
        print("Threshold :", thresh)

        pred = (error > thresh).astype(int)
        gt = y.astype(int)
        auc = roc_auc_score(gt, error)
        accuracy = accuracy_score(gt, pred)
        precision, recall, f_score, support = prf(gt, pred, average="binary")

        print(
            "Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f}, AUC : {:0.4f}".format(
                accuracy, precision, recall, f_score, auc
            )
        )

        os.makedirs(self.result_path, exist_ok=True)

        np.save(
            self.result_path + "result.npy",
            {
                "accuracy": accuracy,
                "precision": precision,
                "recall": recall,
                "f1": f_score,
                "auc": auc,
            },
        )
        print("result save to {}".format(self.result_path))
        return accuracy, precision, recall, f_score, auc