Пример #1
0
  def testSerialization(self):
    """serialization using pickle"""
    # instances to test
    aDef = Anomaly()
    aLike = Anomaly(mode=Anomaly.MODE_LIKELIHOOD)
    aWeig = Anomaly(mode=Anomaly.MODE_WEIGHTED)
    # test anomaly with all whistles (MovingAverage, Likelihood, ...)
    aAll = Anomaly(mode=Anomaly.MODE_LIKELIHOOD, slidingWindowSize=5)
    inst = [aDef, aLike, aWeig, aAll] 

    for a in inst:
      stored = pickle.dumps(a)
      restored = pickle.loads(stored)
      self.assertEqual(a, restored)
Пример #2
0
    def testEquals(self):
        an = Anomaly()
        anP = Anomaly()
        self.assertEqual(an, anP, "default constructors equal")

        anN = Anomaly(mode=Anomaly.MODE_LIKELIHOOD)
        self.assertNotEqual(an, anN)
        an = Anomaly(mode=Anomaly.MODE_LIKELIHOOD)
        self.assertEqual(an, anN)

        an = Anomaly(slidingWindowSize=5,
                     mode=Anomaly.MODE_WEIGHTED,
                     binaryAnomalyThreshold=0.9)
        anP = Anomaly(slidingWindowSize=5,
                      mode=Anomaly.MODE_WEIGHTED,
                      binaryAnomalyThreshold=0.9)
        anN = Anomaly(slidingWindowSize=4,
                      mode=Anomaly.MODE_WEIGHTED,
                      binaryAnomalyThreshold=0.9)
        self.assertEqual(an, anP)
        self.assertNotEqual(an, anN)
        anN = Anomaly(slidingWindowSize=5,
                      mode=Anomaly.MODE_WEIGHTED,
                      binaryAnomalyThreshold=0.5)
        self.assertNotEqual(an, anN)
Пример #3
0
def getAnomalyScores(activeColumnList, predictiveColumnList):
    # calculates the anomaly scores from the list of active and predicted columns at each step - deprecated
    anomalyScoreList = []
    an = Anomaly()
    for i in range(len(activeColumnList)):
        score = an.computeRawAnomalyScore(activeColumnList[i],
                                          predictiveColumnList[i])
        anomalyScoreList.append(score)
    return anomalyScoreList
Пример #4
0
def definir_AnomDetect(N_DATA):

    """ 
    retorna as classes de anom_score, a classe de anom_likelihood, e os arrays que guardarão a anom_score e a anom_likelihood
    """

    anom_score_txt = np.zeros((N_DATA+1,))
    anom_logscore_txt = np.zeros((N_DATA+1,))

    anomaly_score = Anomaly(slidingWindowSize=25)

    anomaly_likelihood = AnomalyLikelihood(learningPeriod=600, historicWindowSize=313)

    return anomaly_score, anomaly_likelihood, anom_score_txt, anom_logscore_txt
Пример #5
0
    #On each active segment, there will be permanence increase of every active synapse, decrease on every inactive synapse and ...
    #creation of new synapses to cells that were active in previous state
    predictedSegmentDecrement=
    0.0004,  #punishment for SEGMENTS for incorrect predictions
    #from nupic documentation: predictedSegmentDecrement: A good value is just a bit larger than (the column-level sparsity * permanenceIncrement)...
    #So, if column-level sparsity is 2% and permanenceIncrement is 0.01, this parameter should be something like 4% * 0.01 = 0.0004).
    seed=1960,
    maxSegmentPerCell=255,
    maxSynapsesPerSegment=255)

############## ANOMALY DETECTIONS #############

anom_score = np.zeros((N_DATA + 1, ))
anom_logscore = np.zeros((N_DATA + 1, ))

anomaly_score = Anomaly(slidingWindowSize=25)

anomaly_likelihood = AnomalyLikelihood(learningPeriod=500,
                                       historicWindowSize=213)

dd = 0

for i, linha in enumerate(teste):

    #####################################################

    scalar_encoder.encodeIntoArray(linha[1], bits_scalar)
    time_encoder.encodeIntoArray(linha[0], bits_time)

    encoder_output = np.concatenate((bits_time, bits_scalar))