def testWriteRead(self): c1 = SDRClassifier([0], 0.1, 0.1, 0) # Create a vector of input bit indices input1 = [1, 5, 9] result = c1.compute(recordNum=0, patternNZ=input1, classification={'bucketIdx': 4, 'actValue': 34.7}, learn=True, infer=True) proto1 = SdrClassifier_capnp.SdrClassifierProto.new_message() c1.write(proto1) # Write the proto to a temp file and read it back into a new proto with tempfile.TemporaryFile() as f: proto1.write(f) f.seek(0) proto2 = SdrClassifier_capnp.SdrClassifierProto.read(f) # Load the deserialized proto c2 = SDRClassifier.read(proto2) self.assertEqual(c1.steps, c2.steps) self.assertAlmostEqual(c1.alpha, c2.alpha) self.assertAlmostEqual(c1.actValueAlpha, c2.actValueAlpha) self.assertEqual(c1._learnIteration, c2._learnIteration) self.assertEqual(c1._recordNumMinusLearnIteration, c2._recordNumMinusLearnIteration) self.assertEqual(c1._patternNZHistory, c2._patternNZHistory) self.assertEqual(c1._weightMatrix.keys(), c2._weightMatrix.keys()) for step in c1._weightMatrix.keys(): c1Weight = c1._weightMatrix[step] c2Weight = c2._weightMatrix[step] self.assertSequenceEqual(list(c1Weight.flatten()), list(c2Weight.flatten())) self.assertEqual(c1._maxBucketIdx, c2._maxBucketIdx) self.assertEqual(c1._maxInputIdx, c2._maxInputIdx) self.assertEqual(len(c1._actualValues), len(c2._actualValues)) for i in xrange(len(c1._actualValues)): self.assertAlmostEqual(c1._actualValues[i], c2._actualValues[i], 5) self.assertEqual(c1._version, c2._version) self.assertEqual(c1.verbosity, c2.verbosity) result1 = c1.compute(recordNum=1, patternNZ=input1, classification={'bucketIdx': 4, 'actValue': 34.7}, learn=True, infer=True) result2 = c2.compute(recordNum=1, patternNZ=input1, classification={'bucketIdx': 4, 'actValue': 34.7}, learn=True, infer=True) self.assertEqual(result1.keys(), result2.keys()) for key in result1.keys(): for i in xrange(len(c1._actualValues)): self.assertAlmostEqual(result1[key][i], result2[key][i], 5)
class SDRClassifierDiff(object): """Classifier-like object that diffs the output from different classifiers. Instances of each version of the SDR classifier are created and each call to compute is passed to each version of the classifier. The results are diffed to make sure the there are no differences. Optionally, the classifiers can be serialized and deserialized after a specified number of calls to compute to ensure that serialization does not cause discrepencies between the results. TODO: Check internal state as well. TODO: Provide option to write output to a file. TODO: Provide record differences without throwing an exception. """ __VERSION__ = 'SDRClassifierDiffV1' def __init__(self, steps=(1,), alpha=0.001, actValueAlpha=0.3, verbosity=0, callsPerSerialize=CALLS_PER_SERIALIZE): self._sdrClassifier = SDRClassifier(steps, alpha, actValueAlpha, verbosity) self._sdrClassifierCpp = SDRClassifierCpp(steps, alpha, actValueAlpha, verbosity) self._calls = 0 self._callsPerSerialize = callsPerSerialize def compute(self, recordNum, patternNZ, classification, learn, infer): result1 = self._sdrClassifier.compute(recordNum, patternNZ, classification, learn, infer) result2 = self._sdrClassifierCpp.compute(recordNum, patternNZ, classification, learn, infer) self._calls += 1 # Check if it is time to serialize and deserialize. if self._calls % self._callsPerSerialize == 0: schemaPy = self._sdrClassifier.getSchema() protoPy = schemaPy.new_message() self._sdrClassifier.write(protoPy) protoPy = schemaPy.from_bytes(protoPy.to_bytes()) self._sdrClassifier = SDRClassifier.read(protoPy) schemaCpp = self._sdrClassifierCpp.getSchema() protoCpp = schemaCpp.new_message() self._sdrClassifierCpp.write(protoCpp) protoCpp = schemaCpp.from_bytes(protoCpp.to_bytes()) self._sdrClassifierCpp = SDRClassifierCpp.read(protoCpp) # Assert both results are the same type. assert type(result1) == type(result2) # Assert that the keys match. assert set(result1.keys()) == set(result2.keys()), "diff detected: " \ "py result=%s, C++ result=%s" % (result1, result2) # Assert that the values match. for k, l in result1.iteritems(): assert type(l) == type(result2[k]) for i in xrange(len(l)): if isinstance(classification['actValue'], numbers.Real): assert abs(float(l[i]) - float(result2[k][i])) < 0.0000001, ( 'Python SDRClassifier has value %f and C++ SDRClassifierCpp has ' 'value %f.' % (l[i], result2[k][i])) else: assert l[i] == result2[k][i], ( 'Python SDRClassifier has value %s and C++ SDRClassifierCpp has ' 'value %s.' % (str(l[i]), str(result2[k][i]))) return result1
def _doWriteReadChecks(self, computeBeforeSerializing): c1 = SDRClassifier([0], 0.1, 0.1, 0) # Create a vector of input bit indices input1 = [1, 5, 9] if computeBeforeSerializing: result = c1.compute(recordNum=0, patternNZ=input1, classification={ 'bucketIdx': 4, 'actValue': 34.7 }, learn=True, infer=True) proto1 = SdrClassifier_capnp.SdrClassifierProto.new_message() c1.write(proto1) # Write the proto to a temp file and read it back into a new proto with tempfile.TemporaryFile() as f: proto1.write(f) f.seek(0) proto2 = SdrClassifier_capnp.SdrClassifierProto.read(f) # Load the deserialized proto c2 = SDRClassifier.read(proto2) self.assertEqual(c1.steps, c2.steps) self.assertEqual(c1._maxSteps, c2._maxSteps) self.assertAlmostEqual(c1.alpha, c2.alpha) self.assertAlmostEqual(c1.actValueAlpha, c2.actValueAlpha) self.assertEqual(c1._patternNZHistory, c2._patternNZHistory) self.assertEqual(list(c1._weightMatrix.keys()), list(c2._weightMatrix.keys())) for step in list(c1._weightMatrix.keys()): c1Weight = c1._weightMatrix[step] c2Weight = c2._weightMatrix[step] self.assertSequenceEqual(list(c1Weight.flatten()), list(c2Weight.flatten())) self.assertEqual(c1._maxBucketIdx, c2._maxBucketIdx) self.assertEqual(c1._maxInputIdx, c2._maxInputIdx) self.assertEqual(len(c1._actualValues), len(c2._actualValues)) for i in range(len(c1._actualValues)): self.assertAlmostEqual(c1._actualValues[i], c2._actualValues[i], 5) self.assertEqual(c1._version, c2._version) self.assertEqual(c1.verbosity, c2.verbosity) # NOTE: the previous step's actual values determine the size of lists in # results expectedActualValuesLen = len(c1._actualValues) result1 = c1.compute(recordNum=1, patternNZ=input1, classification={ 'bucketIdx': 4, 'actValue': 34.7 }, learn=True, infer=True) result2 = c2.compute(recordNum=1, patternNZ=input1, classification={ 'bucketIdx': 4, 'actValue': 34.7 }, learn=True, infer=True) self.assertEqual(list(result1.keys()), list(result2.keys())) for key in list(result1.keys()): self.assertEqual(len(result1[key]), len(result2[key])) self.assertEqual(len(result1[key]), expectedActualValuesLen) for i in range(expectedActualValuesLen): self.assertAlmostEqual(result1[key][i], result2[key][i], 5)
class HTMusicModel(object): def __init__(self, model_params): # Init an HTM network self.network = Network() # Getting parameters for network regions self.sensor_params = model_params['Sensor'] self.spatial_pooler_params = model_params['SpatialPooler'] self.temporal_memory_params = model_params['TemporalMemory'] self.classifiers_params = model_params['Classifiers'] self.encoders_params = model_params['Encoders'] # Adding regions to HTM network self.network.addRegion('DurationEncoder', 'ScalarSensor', json.dumps(self.encoders_params['duration'])) self.network.addRegion('VelocityEncoder', 'ScalarSensor', json.dumps(self.encoders_params['pitch'])) self.network.addRegion('PitchEncoder', 'ScalarSensor', json.dumps(self.encoders_params['velocity'])) self.network.addRegion('SpatialPooler', 'py.SPRegion', json.dumps(self.spatial_pooler_params)) self.network.addRegion('TemporalMemory', 'py.TMRegion', json.dumps(self.temporal_memory_params)) # Creating outer classifiers for multifield prediction dclp = self.classifiers_params['duration'] vclp = self.classifiers_params['pitch'] pclp = self.classifiers_params['velocity'] self.duration_classifier = SDRClassifier( steps=(1, ), verbosity=dclp['verbosity'], alpha=dclp['alpha'], actValueAlpha=dclp['actValueAlpha']) self.velocity_classifier = SDRClassifier( steps=(1, ), verbosity=vclp['verbosity'], alpha=vclp['alpha'], actValueAlpha=vclp['actValueAlpha']) self.pitch_classifier = SDRClassifier( steps=(1, ), verbosity=pclp['verbosity'], alpha=pclp['alpha'], actValueAlpha=pclp['actValueAlpha']) self._link_all_regions() self._enable_learning() self._enable_inference() self.network.initialize() def _link_all_regions(self): # Linking regions self.network.link('DurationEncoder', 'SpatialPooler', 'UniformLink', '') self.network.link('VelocityEncoder', 'SpatialPooler', 'UniformLink', '') self.network.link('PitchEncoder', 'SpatialPooler', 'UniformLink', '') self.network.link('SpatialPooler', 'TemporalMemory', 'UniformLink', '', srcOutput='bottomUpOut', destInput='bottomUpIn') def _enable_learning(self): # Enable learning for all regions. self.network.regions["SpatialPooler"].setParameter("learningMode", 1) self.network.regions["TemporalMemory"].setParameter("learningMode", 1) def _enable_inference(self): # Enable inference for all regions. self.network.regions["SpatialPooler"].setParameter("inferenceMode", 1) self.network.regions["TemporalMemory"].setParameter("inferenceMode", 1) def train(self, duration, pitch, velocity): records_total = self.network.regions['SpatialPooler'].getSelf( ).getAlgorithmInstance().getIterationNum() self.network.regions['DurationEncoder'].setParameter( 'sensedValue', duration) self.network.regions['PitchEncoder'].setParameter('sensedValue', pitch) self.network.regions['VelocityEncoder'].setParameter( 'sensedValue', velocity) self.network.run(1) # Getting active cells of TM and bucket indicies of encoders to feed classifiers active_cells = numpy.array( self.network.regions['TemporalMemory'].getOutputData( 'bottomUpOut')).nonzero()[0] duration_bucket = numpy.array( self.network.regions['DurationEncoder'].getOutputData('bucket')) pitch_bucket = numpy.array( self.network.regions['PitchEncoder'].getOutputData('bucket')) velocity_bucket = numpy.array( self.network.regions['VelocityEncoder'].getOutputData('bucket')) duration_classifier_result = self.duration_classifier.compute( recordNum=records_total, patternNZ=active_cells, classification={ 'bucketIdx': duration_bucket[0], 'actValue': duration }, learn=True, infer=False) pitch_classifier_result = self.pitch_classifier.compute( recordNum=records_total, patternNZ=active_cells, classification={ 'bucketIdx': pitch_bucket[0], 'actValue': pitch }, learn=True, infer=False) velocity_classifier_result = self.velocity_classifier.compute( recordNum=records_total, patternNZ=active_cells, classification={ 'bucketIdx': velocity_bucket[0], 'actValue': velocity }, learn=True, infer=False) def generate(self, seed, output_dir, event_amount): records_total = self.network.regions['SpatialPooler'].getSelf( ).getAlgorithmInstance().getIterationNum() seed = seed midi = pretty_midi.PrettyMIDI() midi_program = pretty_midi.instrument_name_to_program( 'Acoustic Grand Piano') piano = pretty_midi.Instrument(program=midi_program) clock = 0 for iters in tqdm(range(records_total, records_total + event_amount)): duration = seed[0] pitch = seed[1] velocity = seed[2] self.network.regions['DurationEncoder'].setParameter( 'sensedValue', duration) self.network.regions['PitchEncoder'].setParameter( 'sensedValue', pitch) self.network.regions['VelocityEncoder'].setParameter( 'sensedValue', velocity) self.network.run(1) # Getting active cells of TM and bucket indicies of encoders to feed classifiers active_cells = numpy.array( self.network.regions['TemporalMemory'].getOutputData( 'bottomUpOut')).nonzero()[0] duration_bucket = numpy.array( self.network.regions['DurationEncoder'].getOutputData( 'bucket')) pitch_bucket = numpy.array( self.network.regions['PitchEncoder'].getOutputData('bucket')) velocity_bucket = numpy.array( self.network.regions['VelocityEncoder'].getOutputData( 'bucket')) # Getting up classifiers result duration_classifier_result = self.duration_classifier.compute( recordNum=records_total, patternNZ=active_cells, classification={ 'bucketIdx': duration_bucket[0], 'actValue': duration }, learn=False, infer=True) pitch_classifier_result = self.pitch_classifier.compute( recordNum=records_total, patternNZ=active_cells, classification={ 'bucketIdx': pitch_bucket[0], 'actValue': pitch }, learn=False, infer=True) velocity_classifier_result = self.velocity_classifier.compute( recordNum=records_total, patternNZ=active_cells, classification={ 'bucketIdx': velocity_bucket[0], 'actValue': velocity }, learn=False, infer=True) du = duration_classifier_result[1].argmax() pi = pitch_classifier_result[1].argmax() ve = velocity_classifier_result[1].argmax() duration_top_probs = duration_classifier_result[1][ 0:2] / duration_classifier_result[1][0:2].sum() predicted_duration = duration_classifier_result['actualValues'][du] # predicted_duration = duration_classifier_result['actualValues'][du] predicted_pitch = pitch_classifier_result['actualValues'][pi] predicted_velocity = velocity_classifier_result['actualValues'][ve] # print duration_classifier_result note = pretty_midi.Note(velocity=int(predicted_velocity), pitch=int(predicted_pitch), start=float(clock), end=float(clock + predicted_duration)) piano.notes.append(note) clock = clock + 0.25 seed[0] = predicted_duration seed[1] = predicted_pitch seed[2] = predicted_velocity midi.instruments.append(piano) midi.remove_invalid_notes() time = datetime.datetime.now().strftime('%Y-%m-%d %H:%m:%S') midi.write(output_dir + time + '.mid') def load_model(self, load_path): # Loading SpatialPooler print 'Loading SpatialPooler' with open(load_path + 'sp.bin', 'rb') as sp: sp_builder = SpatialPoolerProto.read( sp, traversal_limit_in_words=2**61) self.network.regions['SpatialPooler'].getSelf( )._sfdr = self.network.regions['SpatialPooler'].getSelf()._sfdr.read( sp_builder) # Loading TemporalMemory print 'Loading TemporalMemory' self.network.regions['TemporalMemory'].getSelf().getAlgorithmInstance( ).loadFromFile(load_path + 'tm.bin') # Loading end classifier print 'Loading duration classifier' with open(load_path + 'dcl.bin', 'rb') as dcl: dcl_builder = SdrClassifierProto.read( dcl, traversal_limit_in_words=2**61) self.duration_classifier = self.duration_classifier.read(dcl_builder) # Loading pitch classifier print 'Loading pitch classifier' with open(load_path + 'pcl.bin', 'rb') as pcl: pcl_builder = SdrClassifierProto.read( pcl, traversal_limit_in_words=2**61) self.pitch_classifier = self.pitch_classifier.read(pcl_builder) # Loading velocity classifier print 'Loading velocity classifier' with open(load_path + 'vcl.bin', 'rb') as vcl: vcl_builder = SdrClassifierProto.read( vcl, traversal_limit_in_words=2**61) self.velocity_classifier = self.velocity_classifier.read(vcl_builder) def save_model(self, save_path): # Saving SpatialPooler print 'Saving SpatialPooler' sp_builder = SpatialPoolerProto.new_message() self.network.regions['SpatialPooler'].getSelf().getAlgorithmInstance( ).write(sp_builder) with open(save_path + 'sp.bin', 'w+b') as sp: sp_builder.write(sp) # Saving TemporalMemory print 'Saving TemporalMemory' self.network.regions['TemporalMemory'].getSelf().getAlgorithmInstance( ).saveToFile(save_path + 'tm.bin') # Saving end classifier print 'Saving duration classifier' dcl_builder = SdrClassifierProto.new_message() self.duration_classifier.write(dcl_builder) with open(save_path + 'dcl.bin', 'w+b') as dcl: dcl_builder.write(dcl) # Saving pitch classifier print 'Saving pitch classifier' pcl_builder = SdrClassifierProto.new_message() self.pitch_classifier.write(pcl_builder) with open(save_path + 'pcl.bin', 'w+b') as pcl: pcl_builder.write(pcl) # Saving velocity classifier print 'Saving velocity classifier' vcl_builder = SdrClassifierProto.new_message() self.velocity_classifier.write(vcl_builder) with open(save_path + 'vcl.bin', 'w+b') as vcl: vcl_builder.write(vcl)