def __init__(self, steps=(1,), alpha=0.001, actValueAlpha=0.3, verbosity=0, callsPerSerialize=CALLS_PER_SERIALIZE): self._claClassifier = CLAClassifier(steps, alpha, actValueAlpha, verbosity) self._fastCLAClassifier = FastCLAClassifier(steps, alpha, actValueAlpha, verbosity) self._calls = 0 self._callsPerSerialize = callsPerSerialize
def testWriteRead(self): c1 = CLAClassifier([1], 0.1, 0.1, 0) # Create a vector of input bit indices input1 = [1, 5, 9] result = c1.compute(recordNum=0, patternNZ=input1, classification={'bucketIdx': 4, 'actValue': 34.7}, learn=True, infer=True) proto1 = ClaClassifier_capnp.ClaClassifierProto.new_message() c1.write(proto1) # Write the proto to a temp file and read it back into a new proto with tempfile.TemporaryFile() as f: proto1.write(f) f.seek(0) proto2 = ClaClassifier_capnp.ClaClassifierProto.read(f) # Load the deserialized proto c2 = CLAClassifier.read(proto2) self.assertEqual(c1.steps, c2.steps) self.assertAlmostEqual(c1.alpha, c2.alpha) self.assertAlmostEqual(c1.actValueAlpha, c2.actValueAlpha) self.assertEqual(c1._learnIteration, c2._learnIteration) self.assertEqual(c1._recordNumMinusLearnIteration, c2._recordNumMinusLearnIteration) self.assertEqual(c1._patternNZHistory, c2._patternNZHistory) self.assertEqual(c1._activeBitHistory.keys(), c2._activeBitHistory.keys()) for bit, nSteps in c1._activeBitHistory.keys(): c1BitHistory = c1._activeBitHistory[(bit, nSteps)] c2BitHistory = c2._activeBitHistory[(bit, nSteps)] self.assertEqual(c1BitHistory._id, c2BitHistory._id) self.assertEqual(c1BitHistory._stats, c2BitHistory._stats) self.assertEqual(c1BitHistory._lastTotalUpdate, c2BitHistory._lastTotalUpdate) self.assertEqual(c1BitHistory._learnIteration, c2BitHistory._learnIteration) self.assertEqual(c1._maxBucketIdx, c2._maxBucketIdx) self.assertEqual(len(c1._actualValues), len(c2._actualValues)) for i in xrange(len(c1._actualValues)): self.assertAlmostEqual(c1._actualValues[i], c2._actualValues[i], 5) self.assertEqual(c1._version, c2._version) self.assertEqual(c1.verbosity, c2.verbosity) result1 = c1.compute(recordNum=1, patternNZ=input1, classification={'bucketIdx': 4, 'actValue': 34.7}, learn=True, infer=True) result2 = c2.compute(recordNum=1, patternNZ=input1, classification={'bucketIdx': 4, 'actValue': 34.7}, learn=True, infer=True) self.assertEqual(result1.keys(), result2.keys()) for key in result1.keys(): for i in xrange(len(c1._actualValues)): self.assertAlmostEqual(result1[key][i], result2[key][i], 5)
def create(*args, **kwargs): impl = kwargs.pop('implementation', None) if impl is None: impl = Configuration.get('nupic.opf.claClassifier.implementation') if impl == 'py': return CLAClassifier(*args, **kwargs) elif impl == 'cpp': return FastCLAClassifier(*args, **kwargs) elif impl == 'diff': return CLAClassifierDiff(*args, **kwargs) else: raise ValueError('Invalid classifier implementation (%r). Value must be ' '"py" or "cpp".' % impl)
def initializeClassifiers(Nelements, encoder): claClassiiier = CLAClassifier(steps=[0]) sdrClassifier = SDRClassifier(steps=[0], alpha=0.1) patternNZ = list(numpy.where(encoder.encode(Nelements - 1))[0]) classification = {'bucketIdx': Nelements - 1, 'actValue': Nelements - 1} # feed in the pattern with the highest bucket index claRetval = claClassiiier.compute(0, patternNZ, classification, learn=True, infer=True) sdrRetval = sdrClassifier.compute(0, patternNZ, classification, learn=True, infer=True) return claClassiiier, sdrClassifier
def initialize(self): """ Initialize this node. """ # Create Classifier instance with appropriate parameters self.minProbabilityThreshold = 0.0001 self.steps = [] for step in range(maxFutureSteps): self.steps.append(step + 1) self.classifier = CLAClassifier(steps=self.steps) # Increase history according to inference flag if self.enableInference: maxLen = maxPreviousStepsWithInference self.bestPredictedValue = MachineState(0, maxLen) else: maxLen = maxPreviousSteps self.currentValue = MachineState(0, maxLen)
def __init__(self, params): """ :param params: A dict of modelParams in the format {'clParams':{'alpha':float,'steps':'1,2,3'}, 'sensorParams':{'encoders':{} """ modelParams = params['modelParams'] self._encoders = { field: getattr(nupic.encoders, args['type'])(**dict( (arg, val) for arg, val in args.items() if arg not in ['type', 'fieldname'])) for field, args in modelParams['sensorParams']['encoders'].items() if args is not None } self.predicted_field = modelParams['predictedField'] modelParams['spParams']['inputWidth'] = sum( map(lambda x: x.getWidth(), self._encoders.values())) self.sp = SpatialPooler(**modelParams['spParams']) self.sp.initialize(None, None) self.tm = TemporalMemory(**modelParams['tpParams']) self.tm.initialize(None, None) self.classifier = CLAClassifier(**modelParams['clParams']) self.spOutputs = { 'bottomUpOut': np.zeros(modelParams['spParams']['columnCount'], dtype=np.float32), 'anomalyScore': np.zeros(modelParams['spParams']['columnCount'], dtype=np.float32) } self.tmOutputs = { 'bottomUpOut': np.zeros(modelParams['tpParams']['columnCount'] * modelParams['tpParams']['cellsPerColumn'], dtype=np.float32) } self.recordNum = 0
maxBoost=10.0, seed=42, spVerbosity=0) tm = TemporalMemory(columnDimensions=(20, ), cellsPerColumn=(6), initialPermanence=0.2, connectedPermanence=0.8, minThreshold=5, maxNewSynapseCount=6, permanenceDecrement=0.1, permanenceIncrement=0.1, activationThreshold=4) classifier = CLAClassifier(steps=[1], alpha=0.1, actValueAlpha=0.3, verbosity=0) sp.printParameters() print "" layer = Layer(encoder, sp, tm, classifier) firstWeek = 0 i = 1 for x in range(2000): if i == 1: tm.reset() if firstWeek == 0 and layer.getWeeksAnomaly( ) > 0 and layer.getWeeksAnomaly() < 7.0: