Exemple #1
0
def get_novelties(train_data, something_else, pos, neg):
    """
    :param train_data: vector embeddings of text train data
    :param something_else: vector embeddings of "something else" user utterances
    :param pos: vector embeddings of user utterances resulting in positive feedback
    :param neg: vector embeddings of user utterances resulting in negative feedback
    :return: novelty scores of all data passed in
    """
    clf = LocalOutlierFactor(n_neighbors=20,
                             novelty=True,
                             contamination='auto')
    clf.fit(train_data)
    y_train_scores = clf.negative_outlier_factor_
    y_train_scores = pd.DataFrame(y_train_scores, columns=['score'])
    y_train_scores['dataset'] = 'train'

    something_else_scores = clf.score_samples(something_else)  # outlier scores
    something_else_scores = pd.Series(something_else_scores, name='score')
    something_else_scores = something_else_scores.to_frame()
    something_else_scores['dataset'] = 'something else'

    pos_scores = clf.score_samples(pos)  # outlier scores
    pos_scores = pd.Series(pos_scores, name='score')
    pos_scores = pos_scores.to_frame()
    pos_scores['dataset'] = 'positive feedback'

    neg_scores = clf.score_samples(neg)  # outlier scores
    neg_scores = pd.Series(neg_scores, name='score')
    neg_scores = neg_scores.to_frame()
    neg_scores['dataset'] = 'negative feedback'

    scores = pd.concat(
        [y_train_scores, something_else_scores, pos_scores,
         neg_scores]).reset_index(drop=True)
    return scores
def test_pipeline_score_samples_pca_lof():
    X, y = make_classification(
        n_classes=2,
        class_sep=2,
        weights=[0.3, 0.7],
        n_informative=3,
        n_redundant=1,
        flip_y=0,
        n_features=20,
        n_clusters_per_class=1,
        n_samples=500,
        random_state=0,
    )
    # Test that the score_samples method is implemented on a pipeline.
    # Test that the score_samples method on pipeline yields same results as
    # applying transform and score_samples steps separately.
    rus = RandomUnderSampler(random_state=42)
    pca = PCA(svd_solver="full", n_components="mle", whiten=True)
    lof = LocalOutlierFactor(novelty=True)
    pipe = Pipeline([("rus", rus), ("pca", pca), ("lof", lof)])
    pipe.fit(X, y)
    # Check the shapes
    assert pipe.score_samples(X).shape == (X.shape[0], )
    # Check the values
    X_res, _ = rus.fit_resample(X, y)
    lof.fit(pca.fit_transform(X_res))
    assert_allclose(pipe.score_samples(X), lof.score_samples(pca.transform(X)))
Exemple #3
0
class LOF(AnomalyDetector):
    """
        Anomaly detector based on local outlier factor
    """
    def __init__(self):
        self._model = LocalOutlierFactor(novelty=True)

    def learn(self, data):
        self._model.fit(data)

    def predict(self, data, obs):
        return self._model.predict(obs) == -1

    def get_score(self, data, epoch=None):
        return self._model.score_samples(data)

    def anomalies_have_high_score(self):
        return False

    def get_memory_size(self):
        return 0

    def save(self, filename):
        joblib.dump(self._model, filename)

    def load(self, filename):
        self._model = joblib.load(filename)
Exemple #4
0
class StrangenessLOF:
    '''Strangeness based on the distance to the median data (or most central pattern)
    
    Attributes:
    -----------
    k : int
        Parameter to find the distance to the k-nearest-neighbours
    
    X : array-like, shape (n_samples, n_features)
    '''
    def __init__(self, k=10):
        utils.validate_int_higher(k, 0)

        self.k = k
        self.fitted = False
        self.lof = LocalOutlierFactor(n_neighbors=k,
                                      novelty=True,
                                      contamination="auto")

    def is_fitted(self):
        return self.fitted

    def fit(self, X):
        '''Keeps reference to X for computing knn
        
        Parameters:
        -----------
        X : array-like, shape (n_samples, n_features)
        '''

        X_ = list(X) + [X[-1] for _ in range(self.k - len(X))]
        self.lof.fit(X_)
        self.fitted = True
        return self

    def get_fit_scores(self):
        return -1 * self.lof.negative_outlier_factor_

    def get(self, x):
        '''Computes the strangeness of x with respect to X
        
        Parameters:
        -----------
        x : array-like, shape (n_features,)
            Sample for which the strangeness is computed.
            
        Returns:
        --------
        outlier_score : float
            The local outlier factor score
        '''

        outlier_score = -1 * self.lof.score_samples([x])[0]
        return outlier_score
Exemple #5
0
def test_pipeline_score_samples_pca_lof():
    X = iris.data
    # Test that the score_samples method is implemented on a pipeline.
    # Test that the score_samples method on pipeline yields same results as
    # applying transform and score_samples steps separately.
    pca = PCA(svd_solver='full', n_components='mle', whiten=True)
    lof = LocalOutlierFactor(novelty=True)
    pipe = Pipeline([('pca', pca), ('lof', lof)])
    pipe.fit(X)
    # Check the shapes
    assert pipe.score_samples(X).shape == (X.shape[0], )
    # Check the values
    lof.fit(pca.fit_transform(X))
    assert_allclose(pipe.score_samples(X), lof.score_samples(pca.transform(X)))
def _lof_stdev(data, sensitivity, n_neighbors, trigger_on='low'):
    if data.shape[0] <= n_neighbors:
        # Not enough data to find outliers...
        return []

    if data.ndim == 1:
        data = data.reshape(-1, 1)

    clf = LocalOutlierFactor(novelty=True,
                             contamination=0.1,
                             n_neighbors=n_neighbors)
    clf.fit(data)
    lofs = clf.score_samples(data)

    return _stdev(lofs, sensitivity=sensitivity, trigger_on=trigger_on)
Exemple #7
0
def d_lof(X_seen,
          X_unseen=None,
          n_neighbors=20,
          algorithm='auto',
          metric='minkowski'):
    lof = LocalOutlierFactor(n_neighbors=n_neighbors,
                             algorithm=algorithm,
                             metric=metric,
                             novelty=not (X_unseen is None),
                             n_jobs=-1)
    lof.fit(X_seen)
    if X_unseen is None:
        return -lof.negative_outlier_factor_
    else:
        return -lof.score_samples(X_unseen)
Exemple #8
0
    def schedule(self, event_input_name, event_input_value,  data_from_pickle, X_predict, X_train, y_train,
                 n_neighbors, algorithm, leaf_size, metric, p, metric_params, contamination, novelty, n_jobs):

        if event_input_name == 'INIT':

            return [event_input_value, None,self.classifier, self.prediction, self.score_samples]

        elif event_input_name == 'RUN':

            if data_from_pickle == None:
                # default values or not
                if n_neighbors is not None:
                    self.n_neighbors = int(n_neighbors)
                if algorithm is not None:
                    self.algorithm = algorithm
                if leaf_size is not None:
                    self.leaf_size = int(leaf_size)
                if metric is not None:
                    self.metric = metric
                if p is not None:
                    self.p = int(p)
                if metric_params is not None:
                    self.metric_params = metric_params
                if contamination is not None:
                    if contamination == 'auto':
                        self.contamination='auto'
                    else:
                        self.contamination=float(contamination)
                if novelty is not None:
                    self.novelty=novelty
                if n_jobs is not None:
                    self.n_jobs = int(n_jobs)

                classif = LocalOutlierFactor(n_neighbors=self.n_neighbors, algorithm=self.algorithm, leaf_size=self.leaf_size,
                                           metric=self.metric, p=self.p, metric_params=self.metric_params,
                                           contamination=self.contamination, novelty=self.novelty, n_jobs=self.n_jobs)

                classif.fit(np.array(X_train).astype(np.float64), np.array(y_train).astype(np.float64))
                self.classifier=classif

                return [None, event_input_value, self.classifier, self.prediction, self.score_samples]
            else:
                classif = data_from_pickle
                self.classifier = classif
                self.prediction=classif.predict(np.array(X_predict).astype(np.float64).reshape(1, -1))
                self.score_samples=classif.score_samples(np.array(X_predict).astype(np.float64).reshape(1, -1))

                return [None, event_input_value, self.classifier,  self.prediction, self.score_samples]
def lof_nd(x_train, x_test, y_test, plot_roc=False):
    # lof novelty detection
    auc_lof = []
    for n_neighbors in trange(10, 30):
        for algorithm in ['auto', 'ball_tree', 'kd_tree', 'brute']:
            clf = LocalOutlierFactor(novelty=True,
                                     n_neighbors=n_neighbors,
                                     algorithm=algorithm)
            clf.fit(x_train)
            y_scores = clf.score_samples(x_test)
            lof = roc.area(y_test=y_test,
                           y_scores=y_scores,
                           pos_label=1,
                           title='LOF - ',
                           plot_roc=plot_roc)
            auc_lof.append([(n_neighbors, algorithm), lof])
    return auc_lof
def test_pipeline_score_samples_pca_lof():
    X, y = load_iris(return_X_y=True)
    sampling_strategy = {0: 50, 1: 30, 2: 20}
    X, y = make_imbalance(X, y, sampling_strategy=sampling_strategy)
    # Test that the score_samples method is implemented on a pipeline.
    # Test that the score_samples method on pipeline yields same results as
    # applying transform and score_samples steps separately.
    rus = RandomUnderSampler()
    pca = PCA(svd_solver='full', n_components='mle', whiten=True)
    lof = LocalOutlierFactor(novelty=True)
    pipe = Pipeline([('rus', rus), ('pca', pca), ('lof', lof)])
    pipe.fit(X, y)
    # Check the shapes
    assert pipe.score_samples(X).shape == (X.shape[0], )
    # Check the values
    lof.fit(pca.fit_transform(X))
    assert_allclose(pipe.score_samples(X), lof.score_samples(pca.transform(X)))
Exemple #11
0
 def test_local_outlier_factor_score_samples(self):
     lof = LocalOutlierFactor(n_neighbors=2, novelty=True)
     data = np.array([[-1.1, -1.2], [0.3, 0.2], [0.5, 0.4], [100., 99.]],
                     dtype=np.float32)
     model = lof.fit(data)
     model_onnx = to_onnx(model,
                          data,
                          target_opset=TARGET_OPSET,
                          options={'score_samples': True})
     sess = InferenceSession(model_onnx.SerializeToString())
     names = [o.name for o in sess.get_outputs()]
     self.assertEqual(names, ['label', 'scores', 'score_samples'])
     got = sess.run(None, {'X': data})
     self.assertEqual(len(got), 3)
     expected_label = lof.predict(data)
     expected_decif = lof.decision_function(data)
     expected_score = lof.score_samples(data)
     assert_almost_equal(expected_label, got[0].ravel())
     assert_almost_equal(expected_decif, got[1].ravel(), decimal=5)
     assert_almost_equal(expected_score, got[2].ravel(), decimal=5)
Exemple #12
0
class StrangenessLOF(Strangeness):
    '''Strangeness based on the local outlier factor (LOF)'''
    def __init__(self, k=10):
        super().__init__()
        utils.validate_int_higher(k, 0)
        self.k = k
        self.lof = LocalOutlierFactor(n_neighbors=k,
                                      novelty=True,
                                      contamination="auto")

    def fit(self, X):
        super().fit(X)
        X_ = list(X) + [X[-1] for _ in range(self.k - len(X))]
        self.lof.fit(X_)
        self.scores = -1 * self.lof.negative_outlier_factor_

    def predict(self, x):
        super().predict(x)
        outlier_score = -1 * self.lof.score_samples([x])[0]
        med = np.median(self.X, axis=0)  # FIXME: temporary hack
        diff = x - med
        return outlier_score, diff, med
def _lof(data, sensitivity, n_neighbors, trigger_on='low'):

    if data.shape[0] <= n_neighbors:
        # Not enough data to find outliers...
        return []

    if data.ndim == 1:
        data = data.reshape(-1, 1)

    clf = LocalOutlierFactor(novelty=True,
                             contamination=0.1,
                             n_neighbors=n_neighbors)
    clf.fit(data)
    lofs = clf.score_samples(data)

    sensitivity /= 100

    index = np.argsort(lofs)

    if trigger_on == 'low':
        return index[:int(len(index) * sensitivity)]

    return index[-int(len(index) * sensitivity):]
def predict_new_points(bi_df_pca_unique, test_dataset, mrs, cutoff):
    test_bi_pca_all = data_utils.get_test_transformed(test_dataset, mrs)
    true_labels = test_bi_pca_all[['label']]
    test_bi_pca = test_bi_pca_all.drop(['label'], axis=1)
    train_bi_pca = bi_df_pca_unique.drop(['label'], axis=1)

    # see what happened
    ot = test_bi_pca_all[test_bi_pca_all['label'] == -1]
    plt.scatter(ot[['pca_1']],
                ot[['pca_2']],
                s=50,
                linewidth=0,
                c='yellow',
                alpha=1,
                label='Test outliers')
    noot = test_bi_pca_all[test_bi_pca_all['label'] != -1]
    plt.scatter(noot[['pca_1']],
                noot[['pca_2']],
                s=50,
                linewidth=0,
                c='blue',
                alpha=1,
                label='Test data points')
    legend = plt.legend(loc='upper left')
    legend.legendHandles[2]._sizes = [30]
    legend.legendHandles[3]._sizes = [40]
    #

    clf_predict = LocalOutlierFactor(n_neighbors=k_neighbors,
                                     contamination=outliers_fraction,
                                     novelty=True)
    clf_predict.fit(train_bi_pca)
    outlier_scores = -clf_predict.score_samples(test_bi_pca)
    test_labels = (outlier_scores > cutoff).astype(int) * -1
    sensitivity, specificity, accuracy = data_utils.show_performance(
        true_labels, test_labels)
    return sensitivity, specificity, accuracy
Exemple #15
0
class myLocalOutlierFactor():
    def __init__(self, n_neighbors=20, p=2):
        self.n_neighbors_ = n_neighbors
        metric = 'manhattan' if p == 1 else 'sqeuclidean'
        self.lof_ = LocalOutlierFactor(n_neighbors=n_neighbors,
                                       metric=metric,
                                       novelty=False)
        self.lof_novel_ = LocalOutlierFactor(n_neighbors=n_neighbors,
                                             metric=metric,
                                             novelty=True)

    def fit(self, X_tr):
        # self.weight_ = X_tr.std(axis=0) ** (-1)
        self.weight_ = np.ones(X_tr.shape[1])
        self.X_tr_ = X_tr * self.weight_
        self.lof_ = self.lof_.fit(X_tr)
        self.lof_novel_ = self.lof_novel_.fit(X_tr)
        return self

    def k_distance(self, ind_tr):
        return self.lof_._distances_fit_X_[ind_tr, self.n_neighbors_ - 1]

    def local_reachability_density(self, ind_tr):
        return self.lof_._lrd[ind_tr]

    def get_params(self, subsample=-1, kernel='rbf'):
        if (subsample > 1):
            prototypes = prototype_selection(self.X_tr_,
                                             subsample=subsample,
                                             kernel=kernel)
        else:
            prototypes = np.array(range(self.X_tr_.shape[0]))
        return prototypes, self.k_distance(
            prototypes), self.local_reachability_density(prototypes)

    def local_outlier_factor(self, X_ts):
        return -self.lof_novel_.score_samples(X_ts * self.weight_)
def predictAnomalies(trainFeatures, testFeatures):
    nAttr = len(trainFeatures.dtype)
    nCatAttr = 3
    nRealAttr = nAttr - nCatAttr
    nTrainSamples = np.size(trainFeatures)
    nTestSamples = np.size(testFeatures)

    # Get list of names of categorical attributes
    catAttrNames = list(trainFeatures.dtype.names[-nCatAttr:])

    # Convert categorical features to binary using 1-of-K representation
    trainCat = trainFeatures[catAttrNames]
    trainCatDict = catFeatureDict(trainCat, catAttrNames)
    dv = DictVectorizer()
    trainCatEncoded = dv.fit_transform(trainCatDict).toarray()
    testCat = testFeatures[catAttrNames]
    testCatDict = catFeatureDict(testCat, catAttrNames)
    testCatEncoded = dv.transform(testCatDict).toarray()

    # Extract real features and convert all to float type
    trainReal = np.zeros((nTrainSamples, nRealAttr))
    testReal = np.zeros((nTestSamples, nRealAttr))
    for attr in range(nRealAttr):
        trainReal[:, attr] = trainFeatures['f' + str(attr)].astype(float)
        testReal[:, attr] = testFeatures['f' + str(attr)].astype(float)

    # Combine real features and encoded categorical features (now all of type
    # float)
    trainAll = np.c_[trainReal, trainCatEncoded]
    testAll = np.c_[testReal, testCatEncoded]

    # Simple anomaly detector--compute distance of each test sample from mean
    # over all training samples

    #LOF "Large values corespond to inliers, abs makes lower more normal (need to normalize data)
    outlierFactor = LocalOutlierFactor(n_neighbors=10,
                                       novelty=True,
                                       contamination="auto")
    outlierFactor.fit(trainAll)
    outlierScore = outlierFactor.score_samples(testAll)
    outlierScore = np.abs(outlierScore)
    outlierScore = (outlierScore - min(outlierScore)) / (max(outlierScore) -
                                                         min(outlierScore))

    #gives 0 to 1 results
    SVMfunction = svm.OneClassSVM(kernel="rbf", gamma="auto")
    SVMfunction.fit(trainAll)
    SVMScore = SVMfunction.score_samples(testAll)
    SVMScore = np.abs(SVMScore)
    SVMScore = (SVMScore - min(SVMScore)) / (max(SVMScore) - min(SVMScore))

    #isoforest the more negative the more abnormal np.abs to abso value more positive is more abnormal now
    isolateForest = IsolationForest(contamination="auto", behaviour="new")
    isolateForest.fit(trainAll)
    isoScore = isolateForest.score_samples(testAll)
    isoScore = np.abs(isoScore)
    isoScore = (isoScore - min(isoScore)) / (max(isoScore) - min(isoScore))

    AverageScore = (outlierScore + SVMScore + isoScore) / 3

    return AverageScore
tsvd = TruncatedSVD(n_components=100)
tsvd.fit(train_x)
_train_x = tsvd.transform(train_x)

# print('explained variance', tsvd.singular_values_)
clf = IsolationForest(max_samples=10000,
                      contamination=0.0,
                      n_estimators=1000,
                      behaviour='old')
clf.fit(_train_x)

clf_1 = LocalOutlierFactor(n_neighbors=20, novelty=True)
clf_1.fit(_train_x)

_test_x = tsvd.transform(test_x)
result = clf_1.score_samples(_test_x)

res = list(result)
_id_score_dict = {id: _res for id, _res in zip(all_ids, res)}
tmp = sorted(_id_score_dict.items(), key=operator.itemgetter(1))
sorted_id_score_dict = OrderedDict()

for e in tmp:
    sorted_id_score_dict[e[0]] = e[1]

bounds = []
# training_pos_scores = clf.score_samples(
#     _train_x
# )
# training_pos_scores = [_[0] for _ in training_pos_scores]
from pprint import pprint
Exemple #18
0
class LOF(BaseDetector):
    """Wrapper of scikit-learn LOF Class with more functionalities.
    Unsupervised Outlier Detection using Local Outlier Factor (LOF).

    The anomaly score of each sample is called Local Outlier Factor.
    It measures the local deviation of density of a given sample with
    respect to its neighbors.
    It is local in that the anomaly score depends on how isolated the object
    is with respect to the surrounding neighborhood.
    More precisely, locality is given by k-nearest neighbors, whose distance
    is used to estimate the local density.
    By comparing the local density of a sample to the local densities of
    its neighbors, one can identify samples that have a substantially lower
    density than their neighbors. These are considered outliers.
    See :cite:`breunig2000lof` for details.

    Parameters
    ----------
    n_neighbors : int, optional (default=20)
        Number of neighbors to use by default for `kneighbors` queries.
        If n_neighbors is larger than the number of samples provided,
        all samples will be used.

    algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
        Algorithm used to compute the nearest neighbors:

        - 'ball_tree' will use BallTree
        - 'kd_tree' will use KDTree
        - 'brute' will use a brute-force search.
        - 'auto' will attempt to decide the most appropriate algorithm
          based on the values passed to :meth:`fit` method.

        Note: fitting on sparse input will override the setting of
        this parameter, using brute force.

    leaf_size : int, optional (default=30)
        Leaf size passed to `BallTree` or `KDTree`. This can
        affect the speed of the construction and query, as well as the memory
        required to store the tree. The optimal value depends on the
        nature of the problem.

    metric : string or callable, default 'minkowski'
        metric used for the distance computation. Any metric from scikit-learn
        or scipy.spatial.distance can be used.

        If 'precomputed', the training input X is expected to be a distance
        matrix.

        If metric is a callable function, it is called on each
        pair of instances (rows) and the resulting value recorded. The callable
        should take two arrays as input and return one value indicating the
        distance between them. This works for Scipy's metrics, but is less
        efficient than passing the metric name as a string.

        Valid values for metric are:

        - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
          'manhattan']

        - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
          'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
          'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
          'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
          'sqeuclidean', 'yule']

        See the documentation for scipy.spatial.distance for details on these
        metrics:
        http://docs.scipy.org/doc/scipy/reference/spatial.distance.html

    p : integer, optional (default = 2)
        Parameter for the Minkowski metric from
        sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
        equivalent to using manhattan_distance (l1), and euclidean_distance
        (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
        See http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances

    metric_params : dict, optional (default = None)
        Additional keyword arguments for the metric function.

    contamination : float in (0., 0.5), optional (default=0.1)
        The amount of contamination of the data set, i.e. the proportion
        of outliers in the data set. When fitting this is used to define the
        threshold on the decision function.

    n_jobs : int, optional (default = 1)
        The number of parallel jobs to run for neighbors search.
        If ``-1``, then the number of jobs is set to the number of CPU cores.
        Affects only kneighbors and kneighbors_graph methods.

    novelty : bool (default=False)
        By default, LocalOutlierFactor is only meant to be used for outlier
        detection (novelty=False). Set novelty to True if you want to use
        LocalOutlierFactor for novelty detection. In this case be aware that
        that you should only use predict, decision_function and score_samples
        on new unseen data and not on the training set.

    Attributes
    ----------
    n_neighbors_ : int
        The actual number of neighbors used for `kneighbors` queries.

    decision_scores_ : numpy array of shape (n_samples,)
        The outlier scores of the training data.
        The higher, the more abnormal. Outliers tend to have higher
        scores. This value is available once the detector is
        fitted.

    threshold_ : float
        The threshold is based on ``contamination``. It is the
        ``n_samples * contamination`` most abnormal samples in
        ``decision_scores_``. The threshold is calculated for generating
        binary outlier labels.

    labels_ : int, either 0 or 1
        The binary labels of the training data. 0 stands for inliers
        and 1 for outliers/anomalies. It is generated by applying
        ``threshold_`` on ``decision_scores_``.
    """
    def __init__(self,
                 n_neighbors=20,
                 algorithm='auto',
                 leaf_size=30,
                 metric='minkowski',
                 p=2,
                 metric_params=None,
                 contamination=0.1,
                 n_jobs=1,
                 novelty=True):
        super(LOF, self).__init__(contamination=contamination)
        self.n_neighbors = n_neighbors
        self.algorithm = algorithm
        self.leaf_size = leaf_size
        self.metric = metric
        self.p = p
        self.metric_params = metric_params
        self.n_jobs = n_jobs
        self.novelty = novelty

    # noinspection PyIncorrectDocstring
    def fit(self, X, y=None):
        """Fit detector. y is ignored in unsupervised methods.

        Parameters
        ----------
        X : numpy array of shape (n_samples, n_features)
            The input samples.

        y : Ignored
            Not used, present for API consistency by convention.

        Returns
        -------
        self : object
            Fitted estimator.
        """
        # validate inputs X and y (optional)
        X = check_array(X)
        self._set_n_classes(y)

        self.detector_ = LocalOutlierFactor(n_neighbors=self.n_neighbors,
                                            algorithm=self.algorithm,
                                            leaf_size=self.leaf_size,
                                            metric=self.metric,
                                            p=self.p,
                                            metric_params=self.metric_params,
                                            contamination=self.contamination,
                                            n_jobs=self.n_jobs,
                                            novelty=self.novelty)
        self.detector_.fit(X=X, y=y)

        # Invert decision_scores_. Outliers comes with higher outlier scores
        self.decision_scores_ = invert_order(
            self.detector_.negative_outlier_factor_)
        self._process_decision_scores()
        return self

    def decision_function(self, X):
        """Predict raw anomaly score of X using the fitted detector.

        The anomaly score of an input sample is computed based on different
        detector algorithms. For consistency, outliers are assigned with
        larger anomaly scores.

        Parameters
        ----------
        X : numpy array of shape (n_samples, n_features)
            The training input samples. Sparse matrices are accepted only
            if they are supported by the base estimator.

        Returns
        -------
        anomaly_scores : numpy array of shape (n_samples,)
            The anomaly score of the input samples.
        """

        check_is_fitted(self, ['decision_scores_', 'threshold_', 'labels_'])

        # Invert outlier scores. Outliers comes with higher outlier scores
        # noinspection PyProtectedMember
        try:
            return invert_order(self.detector_._score_samples(X))
        except AttributeError:
            try:
                return invert_order(self.detector_._decision_function(X))
            except AttributeError:
                return invert_order(self.detector_.score_samples(X))

    @property
    def n_neighbors_(self):
        """The actual number of neighbors used for kneighbors queries.
        Decorator for scikit-learn LOF attributes.
        """
        return self.detector_.n_neighbors_
Exemple #19
0
class LOF(BaseOutlierDetector):
    """Local Outlier Factor.

    Parameters
    ----------
    algorithm : str, default 'auto'
        Tree algorithm to use. Valid algorithms are
        ['kd_tree'|'ball_tree'|'auto'].

    contamination : float, default 'auto'
        Proportion of outliers in the data set. Used to define the threshold.

    leaf_size : int, default 30
        Leaf size of the underlying tree.

    metric : str or callable, default 'minkowski'
        Distance metric to use.

    novelty : bool, default False
        If True, you can use predict, decision_function and anomaly_score on
        new unseen data and not on the training data.

    n_jobs : int, default 1
        Number of jobs to run in parallel. If -1, then the number of jobs is
        set to the number of CPU cores.

    n_neighbors : int, default 20
        Number of neighbors.

    p : int, default 2
        Power parameter for the Minkowski metric.

    metric_params : dict, default None
        Additioal parameters passed to the requested metric.

    Attributes
    ----------
    anomaly_score_ : array-like of shape (n_samples,)
        Anomaly score for each training data.

    contamination_ : float
        Actual proportion of outliers in the data set.

    threshold_ : float
        Threshold.

    References
    ----------
    .. [#breunig00] Breunig, M. M., Kriegel, H.-P., Ng, R. T., and Sander, J.,
        "LOF: identifying density-based local outliers,"
        In Proceedings of SIGMOD, pp. 93-104, 2000.

    .. [#kriegel11] Kriegel, H.-P., Kroger, P., Schubert, E., and Zimek, A.,
        "Interpreting and unifying outlier scores,"
        In Proceedings of SDM, pp. 13-24, 2011.

    Examples
    --------
    >>> import numpy as np
    >>> from kenchi.outlier_detection import LOF
    >>> X = np.array([
    ...     [0., 0.], [1., 1.], [2., 0.], [3., -1.], [4., 0.],
    ...     [5., 1.], [6., 0.], [7., -1.], [8., 0.], [1000., 1.]
    ... ])
    >>> det = LOF(n_neighbors=3)
    >>> det.fit_predict(X)
    array([ 1,  1,  1,  1,  1,  1,  1,  1,  1, -1])
    """
    @property
    def negative_outlier_factor_(self):
        """array-like of shape (n_samples,): Opposite LOF of the training
        samples.
        """

        return self.estimator_.negative_outlier_factor_

    @property
    def n_neighbors_(self):
        """int: Actual number of neighbors used for ``kneighbors`` queries.
        """

        return self.estimator_.n_neighbors_

    @property
    def X_(self):
        """array-like of shape (n_samples, n_features): Training data.
        """

        return self.estimator_._fit_X

    def __init__(self,
                 algorithm='auto',
                 contamination='auto',
                 leaf_size=30,
                 metric='minkowski',
                 novelty=False,
                 n_jobs=1,
                 n_neighbors=20,
                 p=2,
                 metric_params=None):
        self.algorithm = algorithm
        self.contamination = contamination
        self.leaf_size = leaf_size
        self.metric = metric
        self.novelty = novelty
        self.n_jobs = n_jobs
        self.n_neighbors = n_neighbors
        self.p = p
        self.metric_params = metric_params

    def _check_is_fitted(self):
        super()._check_is_fitted()

        check_is_fitted(self,
                        ['negative_outlier_factor_', 'n_neighbors_', 'X_'])

    def _get_threshold(self):
        return -self.estimator_.offset_ - 1.

    def _fit(self, X):
        self.estimator_ = LocalOutlierFactor(
            algorithm=self.algorithm,
            contamination=self.contamination,
            leaf_size=self.leaf_size,
            metric=self.metric,
            novelty=self.novelty,
            n_jobs=self.n_jobs,
            n_neighbors=self.n_neighbors,
            p=self.p,
            metric_params=self.metric_params).fit(X)

        return self

    def _anomaly_score(self, X, regularize=True):
        lof = self._lof(X)

        if regularize:
            return np.maximum(0., lof - 1.)
        else:
            return lof

    def _lof(self, X):
        """Compute the Local Outlier Factor (LOF) for each sample."""

        if X is self.X_:
            return -self.negative_outlier_factor_
        else:
            return -self.estimator_.score_samples(X)
"""
train_data = [tuple(values) for values in df.iloc[:100, 0:columns].values]

test_data=[tuple(values) for values in df.iloc[1:10, 0:columns].values]

"""
train_data = [
    tuple(values) for values in df.iloc[:train_len, 0:columns].values
]

test_data = [tuple(values) for values in df.iloc[train_len:, 0:columns].values]

clf = LocalOutlierFactor(n_neighbors=15, novelty=True, contamination=0.00001)
y_pred_X = clf.fit(train_data)
y_pred = clf.predict(test_data)
y_score = clf.score_samples(test_data)

#print(type(y_list))
for td in range(len(test_data)):
    value = y_list[td]
    #print(value)
    if value < 1:
        th = 1
    else:
        th = 0
    t = list(test_data[td])
    t.append(th)
    test_data[td] = t

count_outlier = 0
for i in range(len(test_data)):
ax = plt.figure(figsize=(40, 5)).gca()
ax.xaxis.set_major_locator(mticker.MaxNLocator(integer=True))
'''
PCA + LOF(20)
'''

pca_20 = PCA(n_components=20)
data_reduced_20 = pca_20.fit_transform(data)

lof = LocalOutlierFactor(n_neighbors=5, novelty=True, metric='euclidean')
lof.fit(data_reduced_20)

data_test_reduced = pca_20.fit_transform(data_test)

scores = -lof.score_samples(
    data_test_reduced
)  # we take the opposite since we want the higher the more abnormal

# np.savetxt('/content/gdrive/My Drive/scores_anomaly_pca_lof.csv', scores, fmt = '%1.6f', delimiter=',')
'''
Average + ADF
'''

a = np.zeros((data_test.shape[0], int(data_test.shape[1] / 1024)))
s = 0
for i in range(data_test.shape[0]):
    k = 0
    for j in range(data_test.shape[1]):
        s += data_test[i, j]
        if (j % 1024 != 0):
            continue
data = np.load(data_np_path + '.npz')
review = data['review']
novel = data['novel']

print(review.shape)
print(novel.shape)

lof = LocalOutlierFactor(n_neighbors=20, contamination='auto', n_jobs=-1, novelty=True)
res = lof.fit(review)

data_review = pd.read_csv(data_review_csv_path)
ngf = lof.negative_outlier_factor_
data_outliers = data_review.iloc[np.argsort(ngf)[0:10]]
data_outliers.to_csv(data_outliers_csv_path, header=1, index=0)

print(np.sort(ngf))

res = lof.predict(novel)
scores = lof.score_samples(novel)
print("LOF results:")
print(res)
print(scores)

clf = OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(review)
res = clf.predict(novel)
scores = clf.score_samples(novel)
print("oc-SVM results:")
print(res)
print(scores)
Exemple #23
0
lof = LocalOutlierFactor(n_neighbors=20,
                         contamination='auto',
                         n_jobs=-1,
                         novelty=True)
res = lof.fit(feats)

# find the outliers of the dataset
'''
data_review = pd.read_csv(data_review_csv_path)
outliers = np.where(res == -1)
ngf = lof.negative_outlier_factor_
data_outliers = data_review.iloc[np.argsort(ngf)[0:outliers[0].shape[0]]]
data_outliers.to_csv(data_outliers_csv_path, header=1, index=0)
print("The outliers are saved!")
'''

# novelty
res = lof.predict(novel_feats)
scores = lof.score_samples(novel_feats)
print("results:")
print(res)
print(scores)

clf = OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(feats)
res = clf.predict(novel_feats)
scores = clf.score_samples(novel_feats)
print("results:")
print(res)
print(scores)
Exemple #24
0
"""
train_data = [
    tuple(values) for values in df.iloc[:train_len, 0:columns].values
]

test_data = [tuple(values) for values in df.iloc[train_len:, 0:columns].values]

min_max_scaler = preprocessing.MinMaxScaler()
X_train_minmax = min_max_scaler.fit_transform(train_data)
X_test_minmax = min_max_scaler.fit_transform(test_data)

clf = LocalOutlierFactor(n_neighbors=15, novelty=True, contamination=0.05)
y_pred_X = clf.fit(X_train_minmax)
y_pred = clf.predict(X_test_minmax)
y_score = clf.score_samples(X_train_minmax)
y_score = list(y_score)
#X_test_minmax=list(X_test_minmax)
#y_neg=clf.negative_outlier_factor_(test_data)

#print(type(y_score))

y_list = y_score

count = 0
#print(type(y_list))
for td in range(len(y_list)):
    value = y_list[td]
    print(value)
    if value < -1.5:
        th = 1
Exemple #25
0
print("Local Outlier Factor has processing time = {} and  AUC = {}".format(
    processing_time, roc_auc_score(ground_truth, y_pred)))

# Question 2. (Isolation Forest)
# Remove rows with missing values. Perform unsupervised outlier detection using Isolation Forest
# with number of trees = 100, sub-sampling size = 256. Use default values for other parameters. Plot
# the ROC curve. Report the processing time and AUC.

from sklearn.ensemble import IsolationForest

clf = IsolationForest(behaviour='new',
                      max_samples=256,
                      random_state=0,
                      contamination='auto')
tstart = time()
clf.fit(X)
y_pred = clf.score_samples(X)
processing_time = time() - tstart

from sklearn.metrics import roc_curve

fpr, tpr, thresholds = roc_curve(Y, y_pred)
import matplotlib.pyplot as plt

plt.plot(fpr, tpr, 'k-', lw=2)
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.show()
print("Isolation Forest has processing time = {} and AUC = {}".format(
    processing_time, roc_auc_score(ground_truth, y_pred)))
Exemple #26
0
def evaluate(X,
             Z_list,
             n_classes,
             dataset,
             D=None,
             max_nbr_dimensions=100,
             max_nbr_instances_lof=1000,
             verbose=False):
    n_clusters = 5 * n_classes

    if D is not None:
        vectorizer = D['vectorizer']
        X = vectorizer.transform(X).toarray()

    is_image = False
    if X.ndim > 2:
        is_image = True
        s0, s1, s2 = X.shape
        X_r = X.reshape(s0, s1 * s2)
    else:
        X_r = X

    if verbose:
        print(datetime.datetime.now(), 'Scaling')
    scaler = StandardScaler()
    X_r_s = scaler.fit_transform(X_r)

    pca = None
    if max_nbr_dimensions is not None and X_r_s.shape[1] > max_nbr_dimensions:
        if verbose:
            print(datetime.datetime.now(), 'PCA')
        pca = PCA(n_components=max_nbr_dimensions)
        pca.fit(X_r_s)
        X_r_s = pca.transform(X_r_s)

    if verbose:
        print(datetime.datetime.now(), 'K-Means')
    kmeans = KMeans(n_clusters=n_clusters,
                    init='k-means++',
                    n_init=10,
                    max_iter=250,
                    tol=1e-4,
                    n_jobs=-1)
    kmeans.fit(X_r_s)

    if max_nbr_instances_lof is not None and len(
            X_r_s) > max_nbr_instances_lof:
        idx = np.random.choice(len(X_r_s),
                               size=max_nbr_instances_lof,
                               replace=False)
        X_r_s_lof = X_r_s[idx]
    else:
        X_r_s_lof = X_r_s

    if verbose:
        print(datetime.datetime.now(), 'LOF')

    lof = LocalOutlierFactor(n_neighbors=5, novelty=True, n_jobs=8)
    lof.fit(X_r_s_lof)

    # if len(X) > n_samples:
    #     idx = np.random.choice(len(X), size=n_samples, replace=False)
    #     X_real = X[idx]
    # else:
    #     X_real = X
    # y_real = [1] * len(X_real)

    distances_list = list()
    silhouette_list = list()
    lof_list = list()
    nbr_clusters_list = list()
    cluster_purity_list = list()
    nbr_classes_list = list()
    class_purity_list = list()
    accuracy_dict = defaultdict(list)
    deltas_list = defaultdict(list)

    real_mean = np.mean(X_r_s)
    real_std = np.std(X_r_s)
    real_min = np.min(X_r_s)
    real_max = np.max(X_r_s)
    real_median = np.median(X_r_s)

    # rus = RandomUnderSampler()

    for i, Z in enumerate(Z_list):
        # print(datetime.datetime.now(), i)

        if D is not None:
            vectorizer = D['vectorizer']
            Z = vectorizer.transform(Z).toarray()

        # if isinstance(Z, np.ndarray) and Z.ndim > 2:
        if is_image:
            if isinstance(Z, list):
                Z = np.array(Z)
            s0, s1, s2 = Z.shape
            Z_r = Z.reshape(s0, s1 * s2)
        else:
            Z_r = Z

        Z_s = scaler.transform(Z_r)
        Z_s_o = Z_s
        if pca is not None:
            Z_s = pca.transform(Z_s)

        labels = kmeans.predict(Z_s)
        dist = kmeans.transform(Z_s)

        # print('qui')
        if verbose:
            print(datetime.datetime.now(), 'distances', i)
        distances = [d[l] for l, d in zip(labels, dist)]
        distances_list.append([
            np.mean(distances),
            np.std(distances),
            np.sum(distances),
            np.median(distances),
            np.min(distances),
            np.max(distances)
        ])

        # print('quo')
        if verbose:
            print(datetime.datetime.now(), 'silhouette', i)
        if 1 < len(np.unique(labels)) < len(labels):
            sil_values = silhouette_samples(Z_s, labels)
            silhouette_list.append([
                np.mean(sil_values),
                np.std(sil_values),
                np.sum(sil_values),
                np.median(sil_values),
                np.min(sil_values),
                np.max(sil_values)
            ])
        else:
            silhouette_list.append([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])

        # print('qua')
        if verbose:
            print(datetime.datetime.now(), 'lof', i)
        lof_values = -lof.score_samples(Z_s)
        lof_list.append([
            np.mean(lof_values),
            np.std(lof_values),
            np.sum(lof_values),
            np.median(lof_values),
            np.min(distances),
            np.max(lof_values)
        ])

        nbr_clusters_list.append(len(np.unique(labels)))

        _, counts = np.unique(labels, return_counts=True)
        cluster_purity = np.max(counts) / np.sum(counts)
        cluster_purity_list.append(cluster_purity)

        if verbose:
            print(datetime.datetime.now(), 'classes', i)
        clf = pickle.load(
            open(path_clf + '%s_%s.pickle' % (dataset, 'RF'), 'rb'))
        y_pred = clf.predict(Z_s_o)

        nbr_classes_list.append(len(np.unique(y_pred)))

        _, counts = np.unique(y_pred, return_counts=True)
        class_purity = np.max(counts) / np.sum(counts)
        class_purity_list.append(class_purity)

        # X_fake = Z_s
        # y_fake = [0] * len(X_fake)
        #
        # X_rf = np.concatenate([X_real, X_fake])
        # y_rf = np.concatenate([y_real, y_fake])
        #
        # X_rf, y_rf = rus.fit_resample(X_rf, y_rf)
        #
        # X_train, X_test, y_train, y_test = train_test_split(X_rf, y_rf, train_size=0.7, stratify=y_rf)
        #
        # for clf_name, clf in clf_list.items():
        #     # print(clf_name)
        #     clf.fit(X_train, y_train)
        #     y_pred_train = clf.predict(X_train)
        #     y_pred_test = clf.predict(X_test)
        #     acc_train = accuracy_score(y_train, y_pred_train)
        #     acc_test = accuracy_score(y_test, y_pred_test)
        #     accuracy_dict['%s_acc_train' % clf_name].append(acc_train)
        #     accuracy_dict['%s_acc_test' % clf_name].append(acc_test)

        fake_mean = np.mean(Z_s)
        fake_std = np.std(Z_s)
        fake_min = np.min(Z_s)
        fake_max = np.max(Z_s)
        fake_median = np.median(Z_s)

        if verbose:
            print(datetime.datetime.now(), 'deltas', i)
        deltas_list['delta_mean'] = np.abs(fake_mean - real_mean)
        deltas_list['delta_std'] = np.abs(fake_std - real_std)
        deltas_list['delta_min'] = np.abs(fake_min - real_min)
        deltas_list['delta_max'] = np.abs(fake_max - real_max)
        deltas_list['delta_median'] = np.abs(fake_median - real_median)

        y_true = np.zeros(len(Z_s))

        if verbose:
            print(datetime.datetime.now(), 'discriminator', i)

        for clf_name in clf_list:
            clf = pickle.load(
                open(path_discr + '%s_%s.pickle' % (dataset, clf_name), 'rb'))
            data_type = datasets[dataset]
            if data_type == 'img':
                Z_s_o = Z_s_o * 255.0
                Z_s_o = (Z_s_o - 127.5) / 127.5
            y_pred = clf.predict(Z_s_o)
            acc = accuracy_score(y_true, y_pred)
            accuracy_dict['%s_accuracy' % clf_name].append(acc)

    distances_list = np.array(distances_list)
    silhouette_list = np.array(silhouette_list)
    lof_list = np.array(lof_list)

    eval_ng = {
        'dist_mean': np.mean(distances_list[:, 0]),
        'dist_std': np.mean(distances_list[:, 1]),
        'dist_sum': np.mean(distances_list[:, 2]),
        'dist_median': np.mean(distances_list[:, 3]),
        'dist_min': np.mean(distances_list[:, 4]),
        'dist_max': np.mean(distances_list[:, 5]),
        'sil_mean': np.mean(silhouette_list[:, 0]),
        'sil_std': np.mean(silhouette_list[:, 1]),
        'sil_sum': np.mean(silhouette_list[:, 2]),
        'sil_median': np.mean(silhouette_list[:, 3]),
        'sil_min': np.mean(silhouette_list[:, 4]),
        'sil_max': np.mean(silhouette_list[:, 5]),
        'lof_mean': np.mean(lof_list[:, 0]),
        'lof_std': np.mean(lof_list[:, 1]),
        'lof_sum': np.mean(lof_list[:, 2]),
        'lof_median': np.mean(lof_list[:, 3]),
        'lof_min': np.mean(lof_list[:, 4]),
        'lof_max': np.mean(lof_list[:, 5]),
        'cluster_purity_mean': float(np.mean(cluster_purity_list)),
        'cluster_purity_std': float(np.std(cluster_purity_list)),
        'cluster_purity_sum': float(np.sum(cluster_purity_list)),
        'cluster_purity_median': float(np.median(cluster_purity_list)),
        'cluster_purity_min': float(np.min(cluster_purity_list)),
        'cluster_purity_max': float(np.max(cluster_purity_list)),
        'nbr_clus_mean': float(np.mean(nbr_clusters_list)),
        'nbr_clus_std': float(np.std(nbr_clusters_list)),
        'nbr_clus_sum': float(np.sum(nbr_clusters_list)),
        'nbr_clus_median': float(np.median(nbr_clusters_list)),
        'nbr_clus_min': float(np.min(nbr_clusters_list)),
        'nbr_clus_max': float(np.max(nbr_clusters_list)),
        'nbr_clusters': n_clusters,
        'class_purity_mean': float(np.mean(class_purity_list)),
        'class_purity_std': float(np.std(class_purity_list)),
        'class_purity_sum': float(np.sum(class_purity_list)),
        'class_purity_median': float(np.median(class_purity_list)),
        'class_purity_min': float(np.min(class_purity_list)),
        'class_purity_max': float(np.max(class_purity_list)),
        'nbr_class_mean': float(np.mean(nbr_classes_list)),
        'nbr_class_std': float(np.std(nbr_classes_list)),
        'nbr_class_sum': float(np.sum(nbr_classes_list)),
        'nbr_class_median': float(np.median(nbr_classes_list)),
        'nbr_class_min': float(np.min(nbr_classes_list)),
        'nbr_class_max': float(np.max(nbr_classes_list)),
        'nbr_classs': n_classes,
    }

    for acc_name in accuracy_dict:
        eval_ng[acc_name] = np.mean(accuracy_dict[acc_name])

    for delta_name in deltas_list:
        eval_ng[delta_name] = np.mean(deltas_list[delta_name])

    return eval_ng