def runNetwork(network):
    """
    """
    sensorRegion = network.regions["sensor"]
    spatialPoolerRegion  = network.regions["spatialPoolerRegion"]
    temporalPoolerRegion = network.regions["temporalPoolerRegion"]
    classifierRegion     = network.regions["classifierRegion"]

    prevPredictedColumns = []

    for i, data in enumerate(get_input_data() * 100):
        # add data
        sensorRegion.getSelf().dataSource.push(data)

        # Run the network for a single iteration
        network.run(1)

        # Calculate the anomaly score using the active columns
        # and previous predicted columns
        activeColumns = spatialPoolerRegion.getOutputData("bottomUpOut").nonzero()[0]
        anomalyScore = computeAnomalyScore(activeColumns, prevPredictedColumns)
        prevPredictedColumns = copy.deepcopy(activeColumns)

        # Classifier
        activeCells = temporalPoolerRegion.getOutputData("bottomUpOut").nonzero()[0]
        res = classifierRegion.getSelf().customCompute(
                        recordNum=i,
                        patternNZ=activeCells,
                        classification={
                            'bucketIdx': createClassifierEncoder().getBucketIndices(data)[0] ,
                            'actValue': data['y']}
                        )
        predict = res['actualValues'][res[0].tolist().index(max(res[0]))]
        rate    = max(res[0])
        print '%s  x:%d  y:%s  p:%s  rate:%5.2f  anomaly:%5.2f' % (i, data['x'], data['y'], predict, rate, anomalyScore)
Пример #2
0
def runNetwork(network, writer):
  """Run the network and write output to writer.

  :param network: a Network instance to run
  :param writer: a csv.writer instance to write output to
  """
  sensorRegion = network.regions["sensor"]
  spatialPoolerRegion = network.regions["spatialPoolerRegion"]
  temporalPoolerRegion = network.regions["temporalPoolerRegion"]

  prevPredictedColumns = []

  i = 0
  for _ in xrange(_NUM_RECORDS):
    # Run the network for a single iteration
    network.run(1)

    activeColumns = spatialPoolerRegion.getOutputData(
        "bottomUpOut").nonzero()[0]

    # Calculate the anomaly score using the active columns
    # and previous predicted columns
    anomalyScore = computeAnomalyScore(activeColumns, prevPredictedColumns)

    # Write out the anomaly score along with the record number and consumption
    # value.
    consumption = sensorRegion.getOutputData("sourceOut")[0]
    writer.writerow((i, consumption, anomalyScore))

    # Store the predicted columns for the next timestep
    predictedColumns = temporalPoolerRegion.getOutputData(
        "topDownOut").nonzero()[0]
    prevPredictedColumns = copy.deepcopy(predictedColumns)

    i += 1
    def _calc_anomaly(self):
        """
        各層のanomalyを計算
        """

        score = 0
        anomalyScore = {}
        for name in self.dest_region_params.keys():
            #sp_bottomUpOut = self.network.regions["sp_"+name].getOutputData("bottomUpOut").nonzero()[0]
            sp_bottomUpOut = self.network.regions["tp_"+name].getInputData("bottomUpIn").nonzero()[0]

            if self.prevPredictedColumns.has_key(name):
                score = computeAnomalyScore(sp_bottomUpOut, self.prevPredictedColumns[name])
            #topdown_predict = self.network.regions["TP"].getSelf()._tfdr.topDownCompute().copy().nonzero()[0]
            topdown_predict = self.network.regions["tp_"+name].getSelf()._tfdr.topDownCompute().nonzero()[0]
            self.prevPredictedColumns[name] = copy.deepcopy(topdown_predict)

            anomalyScore[name] = score

        return anomalyScore
    def _calc_anomaly(self):
        """
        各層のanomalyを計算
        """

        score = 0
        anomalyScore = {}
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            #sp_bottomUpOut = self.network.regions["sp_"+name].getOutputData("bottomUpOut").nonzero()[0]
            sp_bottomUpOut = self.network.regions["tp_"+name].getInputData("bottomUpIn").nonzero()[0]

            if self.prevPredictedColumns.has_key(name):
                score = computeAnomalyScore(sp_bottomUpOut, self.prevPredictedColumns[name])
            #topdown_predict = self.network.regions["TP"].getSelf()._tfdr.topDownCompute().copy().nonzero()[0]
            topdown_predict = self.network.regions["tp_"+name].getSelf()._tfdr.topDownCompute().nonzero()[0]
            self.prevPredictedColumns[name] = copy.deepcopy(topdown_predict)

            anomalyScore[name] = score

        return anomalyScore
    def _calc_anomaly(self):
        """
        各層のanomalyを計算
        """
        import copy
        import itertools
        from nupic.algorithms.anomaly import computeAnomalyScore

        score = 0
        anomalyScore = {}
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            sp_bottomUpOut = self.network.regions["sp_"+name].getOutputData("bottomUpOut").nonzero()[0]
            if self.prevPredictedColumns.has_key(name):
                score = computeAnomalyScore(sp_bottomUpOut, self.prevPredictedColumns[name])
            #topdown_predict = self.network.regions["TP"].getSelf()._tfdr.topDownCompute().copy().nonzero()[0]
            topdown_predict = self.network.regions["tp_"+name].getSelf()._tfdr.topDownCompute().nonzero()[0]
            self.prevPredictedColumns[name] = copy.deepcopy(topdown_predict)

            anomalyScore[name] = score

        return anomalyScore
Пример #6
0
def runNetwork(network):
    """
    """
    sensorRegion = network.regions["sensor"]
    spatialPoolerRegion = network.regions["spatialPoolerRegion"]
    temporalPoolerRegion = network.regions["temporalPoolerRegion"]
    classifierRegion = network.regions["classifierRegion"]

    prevPredictedColumns = []

    for i, data in enumerate(get_input_data() * 100):
        # add data
        sensorRegion.getSelf().dataSource.push(data)

        # Run the network for a single iteration
        network.run(1)

        # Calculate the anomaly score using the active columns
        # and previous predicted columns
        activeColumns = spatialPoolerRegion.getOutputData(
            "bottomUpOut").nonzero()[0]
        anomalyScore = computeAnomalyScore(activeColumns, prevPredictedColumns)
        prevPredictedColumns = copy.deepcopy(activeColumns)

        # Classifier
        activeCells = temporalPoolerRegion.getOutputData(
            "bottomUpOut").nonzero()[0]
        res = classifierRegion.getSelf().customCompute(
            recordNum=i,
            patternNZ=activeCells,
            classification={
                'bucketIdx':
                createClassifierEncoder().getBucketIndices(data)[0],
                'actValue': data['y']
            })
        predict = res['actualValues'][res[0].tolist().index(max(res[0]))]
        rate = max(res[0])
        print '%s  x:%d  y:%s  p:%s  rate:%5.2f  anomaly:%5.2f' % (
            i, data['x'], data['y'], predict, rate, anomalyScore)
Пример #7
0
  def constructClassificationRecord(self, inputs):
    """
    Construct a _CLAClassificationRecord based on the state of the model
    passed in through the inputs.

    Types for self.classificationVectorType:
      1 - TP active cells in learn state
      2 - SP columns concatenated with error from TP column predictions and SP
    """
    # Count the number of unpredicted columns
    allSPColumns = inputs["spBottomUpOut"]
    activeSPColumns = allSPColumns.nonzero()[0]

    score = computeAnomalyScore(activeSPColumns, self._prevPredictedColumns)

    spSize = len(allSPColumns)


    allTPCells = inputs['tpTopDownOut']
    tpSize = len(inputs['tpLrnActiveStateT'])

    classificationVector = numpy.array([])

    if self.classificationVectorType == 1:
      # Classification Vector: [---TP Cells---]
      classificationVector = numpy.zeros(tpSize)
      activeCellMatrix = inputs["tpLrnActiveStateT"].reshape(tpSize, 1)
      activeCellIdx = numpy.where(activeCellMatrix > 0)[0]
      if activeCellIdx.shape[0] > 0:
        classificationVector[numpy.array(activeCellIdx, dtype=numpy.uint16)] = 1
    elif self.classificationVectorType == 2:
      # Classification Vecotr: [---SP---|---(TP-SP)----]
      classificationVector = numpy.zeros(spSize+spSize)
      if activeSPColumns.shape[0] > 0:
        classificationVector[activeSPColumns] = 1.0

      errorColumns = numpy.setdiff1d(self._prevPredictedColumns,
          activeSPColumns)
      if errorColumns.shape[0] > 0:
        errorColumnIndexes = ( numpy.array(errorColumns, dtype=numpy.uint16) +
          spSize )
        classificationVector[errorColumnIndexes] = 1.0
    else:
      raise TypeError("Classification vector type must be either 'tpc' or"
        " 'sp_tpe', current value is %s" % (self.classificationVectorType))

    # Store the state for next time step
    numPredictedCols = len(self._prevPredictedColumns)
    predictedColumns = allTPCells.nonzero()[0]
    self._prevPredictedColumns = copy.deepcopy(predictedColumns)

    if self._anomalyVectorLength is None:
      self._anomalyVectorLength = len(classificationVector)

    result = _CLAClassificationRecord(
      ROWID=self._iteration, #__numRunCalls called
        #at beginning of model.run
      anomalyScore=score,
      anomalyVector=classificationVector.nonzero()[0].tolist(),
      anomalyLabel=[]
    )
    return result
Пример #8
0
 def testComputeAnomalyScorePartialMatch(self):
     score = anomaly.computeAnomalyScore(array([2, 3, 6]), array([3, 5, 7]))
     self.assertAlmostEqual(score, 2.0 / 3.0)
Пример #9
0
 def testComputeAnomalyScoreNoMatch(self):
     score = anomaly.computeAnomalyScore(array([2, 4, 6]), array([3, 5, 7]))
     self.assertAlmostEqual(score, 1.0)
Пример #10
0
 def testComputeAnomalyScorePerfectMatch(self):
     score = anomaly.computeAnomalyScore(array([3, 5, 7]), array([3, 5, 7]))
     self.assertAlmostEqual(score, 0.0)
Пример #11
0
 def testComputeAnomalyScoreNoActive(self):
     score = anomaly.computeAnomalyScore(array([]), array([3, 5]))
     self.assertAlmostEqual(score, 1.0)
Пример #12
0
 def testComputeAnomalyScoreNoActiveOrPredicted(self):
   score = anomaly.computeAnomalyScore(array([]), array([]))
   self.assertAlmostEqual(score, 0.0)
Пример #13
0
    def constructClassificationRecord(self, inputs):
        """
    Construct a _CLAClassificationRecord based on the state of the model
    passed in through the inputs.

    Types for self.classificationVectorType:
      1 - TP active cells in learn state
      2 - SP columns concatenated with error from TP column predictions and SP
    """
        # Count the number of unpredicted columns
        allSPColumns = inputs["spBottomUpOut"]
        activeSPColumns = allSPColumns.nonzero()[0]

        score = computeAnomalyScore(activeSPColumns,
                                    self._prevPredictedColumns)

        spSize = len(allSPColumns)

        allTPCells = inputs['tpTopDownOut']
        tpSize = len(inputs['tpLrnActiveStateT'])

        classificationVector = numpy.array([])

        if self.classificationVectorType == 1:
            # Classification Vector: [---TP Cells---]
            classificationVector = numpy.zeros(tpSize)
            activeCellMatrix = inputs["tpLrnActiveStateT"].reshape(tpSize, 1)
            activeCellIdx = numpy.where(activeCellMatrix > 0)[0]
            if activeCellIdx.shape[0] > 0:
                classificationVector[numpy.array(activeCellIdx,
                                                 dtype=numpy.uint16)] = 1
        elif self.classificationVectorType == 2:
            # Classification Vecotr: [---SP---|---(TP-SP)----]
            classificationVector = numpy.zeros(spSize + spSize)
            if activeSPColumns.shape[0] > 0:
                classificationVector[activeSPColumns] = 1.0

            errorColumns = numpy.setdiff1d(self._prevPredictedColumns,
                                           activeSPColumns)
            if errorColumns.shape[0] > 0:
                errorColumnIndexes = (
                    numpy.array(errorColumns, dtype=numpy.uint16) + spSize)
                classificationVector[errorColumnIndexes] = 1.0
        else:
            raise TypeError(
                "Classification vector type must be either 'tpc' or"
                " 'sp_tpe', current value is %s" %
                (self.classificationVectorType))

        # Store the state for next time step
        numPredictedCols = len(self._prevPredictedColumns)
        predictedColumns = allTPCells.nonzero()[0]
        self._prevPredictedColumns = copy.deepcopy(predictedColumns)

        if self._anomalyVectorLength is None:
            self._anomalyVectorLength = len(classificationVector)

        result = _CLAClassificationRecord(
            ROWID=self._iteration,  #__numRunCalls called
            #at beginning of model.run
            anomalyScore=score,
            anomalyVector=classificationVector.nonzero()[0].tolist(),
            anomalyLabel=[])
        return result
Пример #14
0
 def testComputeAnomalyScorePartialMatch(self):
   score = anomaly.computeAnomalyScore(array([2, 3, 6]), array([3, 5, 7]))
   self.assertAlmostEqual(score, 2.0 / 3.0)
Пример #15
0
 def testComputeAnomalyScoreNoMatch(self):
   score = anomaly.computeAnomalyScore(array([2, 4, 6]), array([3, 5, 7]))
   self.assertAlmostEqual(score, 1.0)
Пример #16
0
 def testComputeAnomalyScorePerfectMatch(self):
   score = anomaly.computeAnomalyScore(array([3, 5, 7]), array([3, 5, 7]))
   self.assertAlmostEqual(score, 0.0)
Пример #17
0
 def testComputeAnomalyScoreNoActive(self):
   score = anomaly.computeAnomalyScore(array([]), array([3, 5]))
   self.assertAlmostEqual(score, 1.0)
Пример #18
0
 def testComputeAnomalyScoreNoActiveOrPredicted(self):
     score = anomaly.computeAnomalyScore(array([]), array([]))
     self.assertAlmostEqual(score, 0.0)
Пример #19
0
def main():
    def calc(sensor, sp, tp):
        sensor.prepareInputs()
        sensor.compute()
        sp.prepareInputs()
        sp.compute()
        tp.prepareInputs()
        tp.compute()
    # ファイル渡すだけなら, 絶対パスに変換しているだけ.
    #trainFile = findDataset(_DATA_PATH)
    trainFile = os.path.abspath(_DATA_PATH)
    print trainFile

    # TODO: sensor layerには, dataSourceの形で渡す必要がある.
    # 後から渡すような形にするためには, DataBuffer@nupic/frameworks/opf/clamodel.py のように,
    # getNextRecordDict methodを持った, instanceを作れば良いのかな?
    dataSource = FileRecordStream(streamID=trainFile)
    # print dataSource
    # for i in range(2000):
    #     print dataSource.getNextRecordDict()

    network = createNetwork(dataSource)

    #
    sensorRegion = network.regions["sensor"]
    SPRegion     = network.regions["SP"]
    TPRegion     = network.regions["TP"]
    classifier   = network.regions["Classifier"]

    prevPredictedColumns = []


    # TODO: ここの内容, OPFとの違いをちゃんと押さえる.
    #for calc_num in xrange(10000):
    f = open(trainFile, "rb")
    csvReader = csv.reader(f)
    csvReader.next()
    csvReader.next()
    csvReader.next()

    #for calc_num in xrange(10000):
    for calc_num, row in enumerate(csvReader):
        gym = row[0]
        timestamp = datetime.datetime.strptime(row[1], "%Y-%m-%d %H:%M:%S.0")
        consumption = float(row[3])
        input_data = {
                'timestamp': timestamp,
                'consumption': consumption,
                'gym': gym
                }

        sensorRegion.getSelf().dataSource.push(input_data)

        # TODO: このrunって実際何をやってるんだ? どこに書いてあるか探す.
        #       多分, 各Regionの_computeでやってることをまとめて実行してるのだと思う.
        network.run(1)
        #calc(sensorRegion, SPRegion, TPRegion)

        #
        print
        print "####################################"
        print
        print "==== EC layer ===="
        consumption = sensorRegion.getOutputData("sourceOut")[0]
        print 'sourceOut',consumption
        categoryOut = sensorRegion.getOutputData("categoryOut")[0]
        print 'categoryOut',categoryOut
        # spatialTopDownOut = sensorRegion.getOutputData("spatialTopDownOut")[0]
        # print 'spatialTopDownOut',spatialTopDownOut
        # temporalTopDownOut = sensorRegion.getOutputData("temporalTopDownOut")[0]
        # print 'temporalTopDownOut',temporalTopDownOut

        # SPへの入力(encode後)
        print
        print "==== SP layer ===="
        sp_bottomUpIn  = SPRegion.getInputData("bottomUpIn").nonzero()[0]
        print 'bottomUpIn', sp_bottomUpIn[:10]
        sp_bottomUpOut = SPRegion.getOutputData("bottomUpOut").nonzero()[0]
        print 'bottomUpOut', sp_bottomUpOut[:10]
        # sp_input  = SPRegion.getInputData("topDownIn").nonzero()[0]
        # print 'topDownIn',sp_input[:10]
        # topDownOut = SPRegion.getOutputData("topDownOut").nonzero()[0]
        # print 'topDownOut', topDownOut[:10]
        # spatialTopDownOut = SPRegion.getOutputData("spatialTopDownOut").nonzero()[0]
        # print 'spatialTopDownOut', spatialTopDownOut[:10]
        # temporalTopDownOut = SPRegion.getOutputData("temporalTopDownOut").nonzero()[0]
        # print 'temporalTopDownOut', temporalTopDownOut[:10]
        # anomalyScore = SPRegion.getOutputData("anomalyScore").nonzero()[0]
        # print 'anomalyScore', anomalyScore[:10]

        # anomaly

        # TPへのinput前
        print
        print "==== TP layer ===="
        # TPへのinput後 ※ topdownmdoeをfalseにしたらできたが..
        tp_bottomUpIn = TPRegion.getInputData("bottomUpIn").nonzero()[0]
        print 'bottomUpIn', sorted(tp_bottomUpIn)[:10]
        tp_bottomUpOut = TPRegion.getOutputData("bottomUpOut").nonzero()[0]
        print 'bottomUpOut', tp_bottomUpOut[:10]
        # tp_topDownOut = TPRegion.getOutputData("topDownOut").nonzero()[0]
        # print 'topDownOut', tp_topDownOut[:10]
        # predictedColumns = TPRegion.getOutputData("lrnActiveStateT").nonzero()[0]
        # print 'lrnActiveStateT',predictedColumns[:10]
        # predictedColumns = TPRegion.getOutputData("anomalyScore").nonzero()[0]
        # print 'anomalyScore',predictedColumns[:10]

        #
        print
        print "==== Anomaly ===="
        print 'sp_bottomUpOut', sp_bottomUpOut[:10]
        print 'prevPredictedColumns', prevPredictedColumns[:10]
        anomalyScore = computeAnomalyScore(sp_bottomUpOut, prevPredictedColumns)
        print 'anomalyScore', anomalyScore

        print
        print "==== Predict ===="
        topdown_predict = TPRegion.getSelf()._tfdr.topDownCompute().copy().nonzero()[0]
        print 'tp_topdown_predict', sorted(topdown_predict)[:10]
        prevPredictedColumns = copy.deepcopy(topdown_predict)

        categoryIn = classifier.getInputData("categoryIn").nonzero()[0]
        print 'categoryIn', categoryIn[:10]

        cl_bottomUpIn = classifier.getInputData("bottomUpIn").nonzero()[0]
        print 'bottomUpIn', cl_bottomUpIn[:10]

        # predict consumption
        # enc_list  = sensorRegion.getSelf().encoder.getEncoderList()
        # bucketIdx = enc_list[0].getBucketIndices(consumption)[0]
        # classificationIn = {
        #         'bucketIdx': bucketIdx,
        #         'actValue': float(consumption)
        #         }

        # predict gym
        enc_list  = sensorRegion.getSelf().disabledEncoder.getEncoderList()
        bucketIdx = enc_list[0].getBucketIndices(gym)[0]
        classificationIn = {
                'bucketIdx': bucketIdx,
                'actValue': gym
                }
        clResults = classifier.getSelf().customCompute(
                recordNum=calc_num,
                patternNZ=tp_bottomUpOut,
                classification=classificationIn
                )
        max_index = [i for i, j in enumerate(clResults[1] ) if j == max(clResults[1] )]
        print 'predict value: ', clResults['actualValues'][max_index[0]]
        print 'actual value: ', gym
        print


        """