Esempio n. 1
0
    def __init__(self,
                 name: str = 'LSTM-ED',
                 num_epochs: int = 10,
                 batch_size: int = 20,
                 lr: float = 1e-3,
                 hidden_size: int = 5,
                 sequence_length: int = 30,
                 train_gaussian_percentage: float = 0.25,
                 n_layers: tuple = (1, 1),
                 use_bias: tuple = (True, True),
                 dropout: tuple = (0, 0),
                 seed: int = None,
                 gpu: int = None,
                 details=True,
                 contamination=0.05):
        deepBase.__init__(self, __name__, name, seed, details=details)
        PyTorchUtils.__init__(self, seed, gpu)
        self.num_epochs = num_epochs
        self.batch_size = batch_size
        self.lr = lr

        self.hidden_size = hidden_size
        self.sequence_length = sequence_length
        self.train_gaussian_percentage = train_gaussian_percentage

        self.n_layers = n_layers
        self.use_bias = use_bias
        self.dropout = dropout

        self.lstmed = None
        self.mean, self.cov = None, None
        self.contamination = contamination
Esempio n. 2
0
    def __init__(self, n_features: int, hidden_size: int, n_layers: tuple,
                 use_bias: tuple, dropout: tuple, seed: int, gpu: int):
        super().__init__()
        PyTorchUtils.__init__(self, seed, gpu)
        self.n_features = n_features
        self.hidden_size = hidden_size

        self.n_layers = n_layers
        self.use_bias = use_bias
        self.dropout = dropout

        self.encoder = nn.LSTM(self.n_features,
                               self.hidden_size,
                               batch_first=True,
                               num_layers=self.n_layers[0],
                               bias=self.use_bias[0],
                               dropout=self.dropout[0])
        self.to_device(self.encoder)
        self.decoder = nn.LSTM(self.n_features,
                               self.hidden_size,
                               batch_first=True,
                               num_layers=self.n_layers[1],
                               bias=self.use_bias[1],
                               dropout=self.dropout[1])
        self.to_device(self.decoder)
        self.hidden2output = nn.Linear(self.hidden_size, self.n_features)
        self.to_device(self.hidden2output)
Esempio n. 3
0
    def __init__(self, len_in=1, len_out=10, num_epochs=10, lr=1e-3, batch_size=1,
                 seed: int=None, gpu: int=None, details=True,contamination=0.05):
        deepBase.__init__(self, __name__, 'LSTM-AD', seed, details=details)
        PyTorchUtils.__init__(self, seed, gpu)
        self.num_epochs = num_epochs
        self.lr = lr
        self.batch_size = batch_size

        self.len_in = len_in
        self.len_out = len_out

        self.mean, self.cov = None, None
        self.contamination=contamination
Esempio n. 4
0
    def __init__(self,
                 num_epochs=10,
                 lambda_energy=0.1,
                 lambda_cov_diag=0.005,
                 lr=1e-3,
                 batch_size=50,
                 gmm_k=3,
                 normal_percentile=80,
                 sequence_length=30,
                 autoencoder_type=AutoEncoderModule,
                 autoencoder_args=None,
                 hidden_size: int = 5,
                 seed: int = None,
                 gpu: int = None,
                 details=True,
                 contamination=0.05):
        _name = 'LSTM-DAGMM' if autoencoder_type == LSTMEDModule else 'DAGMM'
        deepBase.__init__(self, __name__, _name, seed, details=details)
        PyTorchUtils.__init__(self, seed, gpu)
        self.num_epochs = num_epochs
        self.lambda_energy = lambda_energy
        self.lambda_cov_diag = lambda_cov_diag
        self.lr = lr
        self.batch_size = batch_size
        self.sequence_length = sequence_length
        self.gmm_k = gmm_k  # Number of Gaussian mixtures
        self.normal_percentile = normal_percentile  # Up to which percentile data should be considered normal
        self.autoencoder_type = autoencoder_type
        if autoencoder_type == AutoEncoderModule:
            self.autoencoder_args = ({'sequence_length': self.sequence_length})
        elif autoencoder_type == LSTMEDModule:
            self.autoencoder_args = ({
                'n_layers': (1, 1),
                'use_bias': (True, True),
                'dropout': (0.0, 0.0)
            })
        self.autoencoder_args.update({'seed': seed, 'gpu': gpu})
        if autoencoder_args is not None:
            self.autoencoder_args.update(autoencoder_args)
        self.hidden_size = hidden_size

        self.dagmm, self.optimizer, self.train_energy, self._threshold = None, None, None, None
        self.contamination = contamination
Esempio n. 5
0
    def __init__(self, autoencoder, n_gmm, latent_dim, seed: int, gpu: int):
        super(DAGMMModule, self).__init__()
        PyTorchUtils.__init__(self, seed, gpu)

        self.add_module('autoencoder', autoencoder)

        layers = [
            nn.Linear(latent_dim, 10),
            nn.Tanh(),
            nn.Dropout(p=0.5),
            nn.Linear(10, n_gmm),
            nn.Softmax(dim=1)
        ]
        self.estimation = nn.Sequential(*layers)
        self.to_device(self.estimation)

        self.register_buffer('phi', self.to_var(torch.zeros(n_gmm)))
        self.register_buffer('mu', self.to_var(torch.zeros(n_gmm, latent_dim)))
        self.register_buffer(
            'cov', self.to_var(torch.zeros(n_gmm, latent_dim, latent_dim)))
Esempio n. 6
0
    def __init__(self, n_features: int, sequence_length: int, hidden_size: int,
                 seed: int, gpu: int):
        super().__init__()
        PyTorchUtils.__init__(self, seed, gpu)
        input_length = n_features * sequence_length

        dec_steps = 2**np.arange(max(np.ceil(np.log2(hidden_size)), 2),
                                 np.log2(input_length))[1:]
        dec_setup = np.concatenate([[hidden_size],
                                    dec_steps.repeat(2), [input_length]])
        enc_setup = dec_setup[::-1]

        layers = np.array([[nn.Linear(int(a), int(b)),
                            nn.Tanh()] for a, b in enc_setup.reshape(-1, 2)
                           ]).flatten()[:-1]
        self._encoder = nn.Sequential(*layers)
        self.to_device(self._encoder)

        layers = np.array([[nn.Linear(int(a), int(b)),
                            nn.Tanh()] for a, b in dec_setup.reshape(-1, 2)
                           ]).flatten()[:-1]
        self._decoder = nn.Sequential(*layers)
        self.to_device(self._decoder)
Esempio n. 7
0
    def __init__(self,
                 name: str = 'AutoEncoder',
                 num_epochs: int = 10,
                 batch_size: int = 20,
                 lr: float = 1e-3,
                 hidden_size: int = 5,
                 sequence_length: int = 30,
                 train_gaussian_percentage: float = 0.25,
                 seed: int = None,
                 gpu: int = None,
                 details=True,
                 contamination=0.05):
        deepBase.__init__(self, __name__, name, seed, details=details)
        PyTorchUtils.__init__(self, seed, gpu)
        self.num_epochs = num_epochs
        self.batch_size = batch_size
        self.lr = lr

        self.hidden_size = hidden_size
        self.sequence_length = sequence_length
        self.train_gaussian_percentage = train_gaussian_percentage
        self.contamination = contamination
        self.aed = None
        self.mean, self.cov = None, None