def test_sequence_learner(): e = ScalarEncoder(min_val=0.0, max_val=1.0, num_s=64, num_as=8) sl = SequenceLearner(num_spc=10, num_dps=10, num_rpd=12, d_thresh=6, perm_thr=1, perm_inc=1, perm_dec=0) sl.input.add_child(e.output) data = [ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0 ] expect_scores = np.array([ 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]) actual_scores = np.array([0.0 for i in range(len(data))]) for i in range(len(data)): e.compute(data[i]) sl.compute(True) actual_scores[i] = sl.get_score() np.testing.assert_array_equal(actual_scores, expect_scores)
def __init__( self, min_val=0.0, # minimum input value max_val=1.0, # maximum input value max_step=8, # maximum persistence step num_i=1024, # number of input statelets num_ai=128, # number of active input statelets num_s=512, # number of statelets num_as=8, # number of active statelets num_spc=10, # number of statelets per column num_dps=10, # number of dendrites per statelet num_rpd=12, # number of receptors per dendrite d_thresh=6, # dendrite threshold pct_pool=0.8, # pooling percentage pct_conn=0.5, # initially connected percentage pct_learn=0.3): # learn percentage PERM_THR = 20 PERM_INC = 2 PERM_DEC = 1 num_i_half = int(num_i / 2) num_ai_half = int(num_ai / 2) self.min_val = min_val self.max_val = max_val # seed the random number generator #bb.seed(0) # TODO: fix seeding self.st = ScalarTransformer(min_val, max_val, num_i_half, num_ai_half) self.pt = PersistenceTransformer(min_val, max_val, num_i_half, num_ai_half, max_step) self.pp = PatternPooler(num_s, num_as, PERM_THR, PERM_INC, PERM_DEC, pct_pool, pct_conn, pct_learn) self.sl = SequenceLearner(num_s, num_spc, num_dps, num_rpd, d_thresh, PERM_THR, PERM_INC, PERM_DEC) self.pp.input.add_child(self.st.output, 0) self.pp.input.add_child(self.pt.output, 0) self.sl.input.add_child(self.pp.output, 0) self.pp.init() self.sl.init()
def __init__( self, configs=(), # block configuration min_val=-1.0, # minumum value max_val=1.0, # maximum value num_i=1024, # ScalarEncoder number of statelets num_ai=128, # ScalarEncoder number of active statelets num_s=512, # PatternPooler number of statelets num_as=8, # PatternPooler number of active statelets num_spc=10, # SequenceLearner number of statelets per column num_dps=10, # SequenceLearner number of coincidence detectors per statelet num_rpd=12, # SequenceLearner number of receptors per coincidence detector d_thresh=6, # SequenceLearner coincidence detector threshold pct_pool=0.8, # PatternPooler pool percentage pct_conn=0.5, # PatternPooler initial connection percentage pct_learn=0.25): # PatternPooler learn percentage self.min_val = min_val self.max_val = max_val # seed the random number generator bb.seed(0) # build blocks from config descriptions if given blocks = get_blocks(configs) self.encoders = blocks["encoders"] self.pp = blocks["pattern_pooler"] self.sl = blocks["sequence_learner"] if len(self.encoders) == 0: self.encoders.append(ScalarEncoder(min_val, max_val, num_i, num_ai)) if self.pp == None: self.pp = PatternPooler(num_s, num_as, 20, 2, 1, pct_pool, pct_conn, pct_learn) if self.sl == None: self.sl = SequenceLearner(num_spc, num_dps, num_rpd, d_thresh, 1, 1, 0) for encoder in self.encoders: self.pp.input.add_child(encoder.output) self.sl.input.add_child(self.pp.output) self.initialized = False
def get_scalability(num_detectors): process = psutil.Process(os.getpid()) scores = [] # Setup Blocks transformers = [] pattern_poolers = [] sequence_learners = [] for _ in range(num_detectors): transformers.append( ScalarTransformer(min_val=0.0, max_val=1.0, num_s=1024, num_as=128)) pattern_poolers.append( PatternPooler(num_s=512, num_as=8, pct_pool=0.8, pct_conn=0.5, pct_learn=0.3)) sequence_learners.append( SequenceLearner(num_c=512, num_spc=10, num_dps=10, num_rpd=12, d_thresh=6)) pattern_poolers[-1].input.add_child(transformers[-1].output, 0) sequence_learners[-1].input.add_child(pattern_poolers[-1].output, 0) # Get initialization time and memory usage t0 = time.time() for d in range(num_detectors): pattern_poolers[d].init() sequence_learners[d].init() t1 = time.time() init_time = t1 - t0 num_bytes = process.memory_info().rss # Get compute time t0 = time.time() for d in range(num_detectors): for i in range(len(data)): transformers[d].set_value(data[i]) transformers[d].feedforward() pattern_poolers[d].feedforward(learn=True) sequence_learners[d].feedforward(learn=True) if (d == 0): score = sequence_learners[d].get_anomaly_score() scores.append(score) t1 = time.time() comp_time = t1 - t0 # Test Results np.testing.assert_array_equal(np.array(scores), expected_scores) return [num_detectors, num_bytes, init_time, comp_time]
def get_scalability(num_s): process = psutil.Process(os.getpid()) scores = [] # Setup Block encoders = [] pattern_poolers = [] sequence_learners = [] encoder = ScalarEncoder(min_val=0.0, max_val=1.0, num_s=1024, num_as=128) pattern_pooler = PatternPooler(num_s=num_s, num_as=8, pct_pool=0.8, pct_conn=0.8, pct_learn=0.25) sequence_learner = SequenceLearner(num_spc=10, num_dps=10, num_rpd=12, d_thresh=6) pattern_pooler.input.add_child(encoder.output) sequence_learner.input.add_child(pattern_pooler.output) num_coincidence_detectors = num_s + (num_s * 10 * 10 ) # pattern_pooler + sequence_learner # Get initialization time and memory usage t0 = time.time() encoder.compute(data[0]) pattern_pooler.compute(learn=True) sequence_learner.compute(learn=True) score = sequence_learner.get_score() scores.append(score) t1 = time.time() init_time = t1 - t0 num_bytes = process.memory_info().rss # Get compute time t0 = time.time() for i in range(1, len(data)): encoder.compute(data[i]) pattern_pooler.compute(learn=True) sequence_learner.compute(learn=True) score = sequence_learner.get_score() scores.append(score) t1 = time.time() comp_time = t1 - t0 # Test Results np.testing.assert_array_equal(np.array(scores), expected_scores) return [num_coincidence_detectors, num_bytes, init_time, comp_time]
def test_sequence_learner_sine(): e = ScalarEncoder( min_val=0.0, # minimum input value max_val=1.0, # maximum input value num_s=64, # number of statelets num_as=8) # number of active statelets sl = SequenceLearner( num_spc=10, # number of statelets per column num_dps=10, # number of coincidence detectors per statelet num_rpd=24, # number of receptors per coincidence detector d_thresh=6, # coincidence detector threshold perm_thr=1, # receptor permanence threshold perm_inc=1, # receptor permanence increment perm_dec=0) # receptor permanence decrement sl.input.add_child(e.output) values = [ 0.50, 0.79, 0.98, 0.98, 0.79, 0.50, 0.21, 0.02, 0.02, 0.21, 0.50, 0.79, 0.98, 0.98, 0.79, 0.50, 0.21, 0.02, 0.02, 0.21, 0.50, 0.79, 0.98, 0.98, 0.79, 0.50, 0.21, 0.02, 0.02, 0.21 ] actual_scores = np.array([0.0 for i in range(len(values))]) expect_scores = np.array([ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]) for i in range(len(values)): e.compute(value=values[i]) sl.compute(learn=True) actual_scores[i] = sl.get_score() np.testing.assert_array_equal(actual_scores, expect_scores)
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ] new_label = 0 scores = [0.0 for i in range(len(values))] e = ScalarEncoder( min_val=0.0, # minimum input value max_val=1.0, # maximum input value num_s=64, # number of statelets num_as=8) # number of active statelets sl = SequenceLearner( num_spc=10, # number of statelets per column num_dps=10, # number of coincidence detectors per statelet num_rpd=12, # number of receptors per coincidence detector d_thresh=6, # coincidence detector threshold perm_thr=1, # receptor permanence threshold perm_inc=1, # receptor permanence increment perm_dec=0) # receptor permanence decrement pc = PatternClassifier( labels=labels, # user-defined labels num_s=640, # number of statelets num_as=8, # number of active statelets perm_thr=20, # receptor permanence threshold perm_inc=2, # receptor permanence increment perm_dec=1, # receptor permanence decrement pct_pool=0.8, # pooling percentage pct_conn=0.8, # initially connected percentage pct_learn=0.25) # learn percentage 0.25
class AnomalyDetectorPersist(): def __init__( self, min_val=0.0, # minimum input value max_val=1.0, # maximum input value max_step=8, # maximum persistence step num_i=1024, # number of input statelets num_ai=128, # number of active input statelets num_s=512, # number of statelets num_as=8, # number of active statelets num_spc=10, # number of statelets per column num_dps=10, # number of dendrites per statelet num_rpd=12, # number of receptors per dendrite d_thresh=6, # dendrite threshold pct_pool=0.8, # pooling percentage pct_conn=0.5, # initially connected percentage pct_learn=0.3): # learn percentage PERM_THR = 20 PERM_INC = 2 PERM_DEC = 1 num_i_half = int(num_i / 2) num_ai_half = int(num_ai / 2) self.min_val = min_val self.max_val = max_val # seed the random number generator #bb.seed(0) # TODO: fix seeding self.st = ScalarTransformer(min_val, max_val, num_i_half, num_ai_half) self.pt = PersistenceTransformer(min_val, max_val, num_i_half, num_ai_half, max_step) self.pp = PatternPooler(num_s, num_as, PERM_THR, PERM_INC, PERM_DEC, pct_pool, pct_conn, pct_learn) self.sl = SequenceLearner(num_s, num_spc, num_dps, num_rpd, d_thresh, PERM_THR, PERM_INC, PERM_DEC) self.pp.input.add_child(self.st.output, 0) self.pp.input.add_child(self.pt.output, 0) self.sl.input.add_child(self.pp.output, 0) self.pp.init() self.sl.init() #def save(self, path='./', name='detector'): # self.pp.save(path + name + "_pp.bin") # self.sl.save(path + name + "_sl.bin") #def load(self, path='./', name='detector'): # self.pp.load(path + name + "_pp.bin") # self.sl.load(path + name + "_sl.bin") def feedforward(self, value=0.0, learn=True): in_bounds = True if value < self.min_val or value > self.max_val: in_bounds = False self.st.set_value(value) self.pt.set_value(value) self.st.feedforward() self.pt.feedforward() self.pp.feedforward(learn) self.sl.feedforward(learn) #print(self.pp.input.acts) #print(self.pt.output.acts) #print() if in_bounds: anom = self.sl.get_anomaly_score() else: anom = 1.0 return anom
# define data and scores num_inputs = 20 inputs_0 = [0.0, 0.2, 0.4, 0.6, 0.8, 0.0, 0.2, 0.4, 0.6, 0.8, 0.0, 0.2, 0.4, 0.6, 0.8, 0.0, 0.2, 0.4, 0.6, 0.8] inputs_1 = [0.0, 0.2, 0.4, 0.6, 0.8, 0.8, 0.6, 0.4, 0.2, 0.0, 0.0, 0.2, 0.4, 0.6, 0.8, 0.8, 0.6, 0.4, 0.2, 0.0] scores_0 = [0 for i in range(num_inputs)] scores_1 = [0 for i in range(num_inputs)] scores_2 = [0 for i in range(num_inputs)] # define blocks se_0 = ScalarEncoder(num_s=500, num_as=50) se_1 = ScalarEncoder(num_s=500, num_as=50) pp_0 = PatternPooler(num_s=250, num_as=8) pp_1 = PatternPooler(num_s=250, num_as=8) pp_2 = PatternPooler(num_s=250, num_as=8) sl_0 = SequenceLearner() sl_1 = SequenceLearner() sl_2 = SequenceLearner() # connect blocks pp_0.input.add_child(se_0.output) pp_1.input.add_child(se_1.output) pp_2.input.add_child(pp_0.output) pp_2.input.add_child(pp_1.output) sl_0.input.add_child(pp_0.output) sl_1.input.add_child(pp_1.output) sl_2.input.add_child(pp_2.output) # loop through data for i in range(num_inputs):
scores = [0.0 for _ in range(len(values))] # Convert values to integers le = preprocessing.LabelEncoder() le.fit(values) integers = le.transform(values) # Setup blocks lt = DiscreteTransformer( num_v=26, # number of discrete values num_s=208) # number of statelets sl = SequenceLearner( num_spc=10, # number of statelets per column num_dps=10, # number of dendrites per statelet num_rpd=12, # number of receptors per dendrite d_thresh=6, # dendrite threshold perm_thr=20, # receptor permanence threshold perm_inc=2, # receptor permanence increment perm_dec=1) # receptor permanence decrement # Connect blocks sl.input.add_child(lt.output, 0) # Loop through the values for i in range(len(integers)): # Set scalar transformer value lt.set_value(integers[i]) # Compute the scalar transformer lt.feedforward()
def multiple_prior_contexts(): experiment_name = 'multiple_prior_contexts' directory = './' + experiment_name mkdir_p(directory) print() print('experiment=\'%s\'' % (experiment_name)) e = SymbolsEncoder( max_symbols=26, # maximum number of symbols num_s=208) # number of statelets sl = SequenceLearner( num_spc=10, # number of statelets per column num_dps=50, # number of coincidence detectors per statelet num_rpd=12, # number of receptors per coincidence detector d_thresh=6, # coincidence detector threshold perm_thr=1, # receptor permanence threshold perm_inc=1, # receptor permanence increment perm_dec=0) # receptor permanence decrement sl.input.add_child(e.output) values = [ 'a', 'z', 'a', 'z', 'a', 'z', 'b', 'z', 'b', 'z', 'b', 'z', 'c', 'z', 'c', 'z', 'c', 'z', 'd', 'z', 'd', 'z', 'd', 'z', 'e', 'z', 'e', 'z', 'e', 'z', 'f', 'z', 'f', 'z', 'f', 'z', 'g', 'z', 'g', 'z', 'g', 'z', 'h', 'z', 'h', 'z', 'h', 'z', 'i', 'z', 'i', 'z', 'i', 'z', 'j', 'z', 'j', 'z', 'j', 'z', 'k', 'z', 'k', 'z', 'k', 'z', 'l', 'z', 'l', 'z', 'l', 'z', 'm', 'z', 'm', 'z', 'm', 'z', 'n', 'z', 'n', 'z', 'n', 'z', 'o', 'z', 'o', 'z', 'o', 'z', 'p', 'z', 'p', 'z', 'p', 'z', 'q', 'z', 'q', 'z', 'q', 'z', 'r', 'z', 'r', 'z', 'r', 'z', 's', 'z', 's', 'z', 's', 'z', 't', 'z', 't', 'z', 't', 'z', 'u', 'z', 'u', 'z', 'u', 'z', 'v', 'z', 'v', 'z', 'v', 'z', 'w', 'z', 'w', 'z', 'w', 'z', 'x', 'z', 'x', 'z', 'x', 'z', 'y', 'z', 'y', 'z', 'y', 'z' ] le = preprocessing.LabelEncoder() le.fit(values) int_values = le.transform(values) scores = [0.0 for _ in range(len(values))] count_s_acts = [0 for _ in range(len(values))] count_s_hist = [0 for _ in range(len(values))] count_cs = [0 for _ in range(len(values))] hidden_s_usage = [0 for _ in range(2240)] output_s_usage = [0 for _ in range(2240)] print('val scr s_act s_his cs active output statelets') for i in range(len(int_values)): e.compute(value=int_values[i]) sl.compute(learn=True) # update information hidden_s_bits = sl.hidden.bits hidden_s_acts = sl.hidden.acts output_s_bits = sl.output.bits output_s_acts = sl.output.acts scores[i] = sl.get_score() count_s_acts[i] = len(output_s_acts) count_s_hist[i] = sl.get_historical_count() count_cs[i] = sl.get_coincidence_set_count() # update statelet usage for s in range(len(output_s_usage)): hidden_s_usage[s] += hidden_s_bits[s] output_s_usage[s] += output_s_bits[s] # plot statelets if (i + 1) % 6 == 0: title = values[i] + '_' + values[i - 1] plot_statelets(directory, 'hidden_' + title, hidden_s_bits) plot_statelets(directory, 'output_' + title, output_s_bits) # print information output_s_acts_str = '[' + ', '.join( str(act).rjust(4) for act in output_s_acts) + ']' print('{0:>3} {1:0.1f} {2:5d} {3:5d} {4:4d} {5:>4}'.format( values[i], scores[i], count_s_acts[i], count_s_hist[i], count_cs[i], output_s_acts_str)) # plot information plot_results(directory, 'results', values, scores, count_s_acts, count_s_hist, count_cs, 600, 2200) plot_statelet_usage(directory, 'hidden', hidden_s_usage, 75) plot_statelet_usage(directory, 'output', output_s_usage, 75) '''
class AbnormalityDetector(): def __init__( self, configs=(), # block configuration min_val=-1.0, # minumum value max_val=1.0, # maximum value num_i=1024, # ScalarEncoder number of statelets num_ai=128, # ScalarEncoder number of active statelets num_s=512, # PatternPooler number of statelets num_as=8, # PatternPooler number of active statelets num_spc=10, # SequenceLearner number of statelets per column num_dps=10, # SequenceLearner number of coincidence detectors per statelet num_rpd=12, # SequenceLearner number of receptors per coincidence detector d_thresh=6, # SequenceLearner coincidence detector threshold pct_pool=0.8, # PatternPooler pool percentage pct_conn=0.5, # PatternPooler initial connection percentage pct_learn=0.25): # PatternPooler learn percentage self.min_val = min_val self.max_val = max_val # seed the random number generator bb.seed(0) # build blocks from config descriptions if given blocks = get_blocks(configs) self.encoders = blocks["encoders"] self.pp = blocks["pattern_pooler"] self.sl = blocks["sequence_learner"] if len(self.encoders) == 0: self.encoders.append(ScalarEncoder(min_val, max_val, num_i, num_ai)) if self.pp == None: self.pp = PatternPooler(num_s, num_as, 20, 2, 1, pct_pool, pct_conn, pct_learn) if self.sl == None: self.sl = SequenceLearner(num_spc, num_dps, num_rpd, d_thresh, 1, 1, 0) for encoder in self.encoders: self.pp.input.add_child(encoder.output) self.sl.input.add_child(self.pp.output) self.initialized = False def print_parameters(self): for encoder in self.encoders: encoder.print_parameters() self.pp.print_parameters() self.sl.print_parameters() def save_memories(self, path='./', name='detector'): self.pp.save_memories(path + name + "_pp.bin") self.sl.save_memories(path + name + "_sl.bin") def load_memories(self, path='./', name='detector'): self.pp.load_memories(path + name + "_pp.bin") self.sl.load_memories(path + name + "_sl.bin") def compute(self, vectors=(), learn=True): anoms = [] num_steps = 0xFFFFFFFF num_measurands = len(vectors) num_encoders = len(self.encoders) if num_measurands != num_encoders: print("Warning: compute() num_measurands != num_encoders") return anoms for vector in vectors: len_vector = len(vector) if len_vector < num_steps: num_steps = len_vector for e in range(num_encoders): if isinstance(self.encoders[e], PersistenceEncoder): self.encoders[e].reset() limit_flag = 0 for s in range(num_steps): for e in range(num_encoders): value = vectors[e][s] if value < self.min_val or value > self.max_val: limit_flag = 1 self.encoders[e].compute(value) self.pp.compute(learn) self.sl.compute(learn) if limit_flag == 1: anoms.append(1.0) else: anoms.append(self.sl.get_score()) self.initialized = True return anoms
def three_events(statelet_snapshots_on=False): experiment_name = 'three_events' directory = './' + experiment_name mkdir_p(directory) print() print('experiment=\'%s\'' % (experiment_name)) NUM_S = 208 NUM_SPC = 10 TOTAL_NUM_S = NUM_S * NUM_SPC e = SymbolsEncoder( max_symbols=26, # maximum number of symbols num_s=NUM_S) # number of statelets sl = SequenceLearner( num_spc=NUM_SPC, # number of statelets per column num_dps=50, # number of coincidence detectors per statelet num_rpd=12, # number of receptors per coincidence detector d_thresh=6, # coincidence detector threshold perm_thr=1, # receptor permanence threshold perm_inc=1, # receptor permanence increment perm_dec=0) # receptor permanence decrement sl.input.add_child(e.output) values = [ 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'c', 'd', 'e', 'f', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'f', 'e', 'd', 'c', 'b', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'c', 'd', 'c', 'b', 'a', 'a', 'a', 'a', 'a' ] le = preprocessing.LabelEncoder() le.fit(values) int_values = le.transform(values) scores = [0.0 for _ in range(len(values))] count_s_output_acts = [0 for _ in range(len(values))] count_s_hidden_acts = [0 for _ in range(len(values))] count_s_hist = [0 for _ in range(len(values))] count_cs = [0 for _ in range(len(values))] hidden_s_usage = [0 for _ in range(TOTAL_NUM_S)] output_s_usage = [0 for _ in range(TOTAL_NUM_S)] print('val scr s_act s_his cs active output statelets') for i in range(len(int_values)): e.compute(value=int_values[i]) sl.compute(learn=True) # update information hidden_s_bits = sl.hidden.bits hidden_s_acts = sl.hidden.acts output_s_bits = sl.output.bits output_s_acts = sl.output.acts scores[i] = sl.get_score() count_s_output_acts[i] = len(output_s_acts) count_s_hidden_acts[i] = len(hidden_s_acts) count_s_hist[i] = sl.get_historical_count() count_cs[i] = sl.get_coincidence_set_count() # update statelet usage for s in range(len(output_s_usage)): hidden_s_usage[s] += hidden_s_bits[s] output_s_usage[s] += output_s_bits[s] # plot statelets if statelet_snapshots_on and (i + 1) % 5 == 0 or i == 43: title = 'step_' + str(i) + '_' + values[i] + '_' + values[i - 1] plot_statelets(directory, 'hidden_' + title, hidden_s_bits) plot_statelets(directory, 'output_' + title, output_s_bits) # print information output_s_acts_str = '[' + ', '.join( str(act).rjust(4) for act in output_s_acts) + ']' print('{0:>3} {1:0.1f} {2:5d} {3:5d} {4:4d} {5:>4}'.format( values[i], scores[i], count_s_output_acts[i], count_s_hist[i], count_cs[i], output_s_acts_str)) # plot information plot_results(directory, 'results', values, scores, count_s_output_acts, count_s_hidden_acts, count_s_hist, count_cs, 400, 400) plot_statelet_usage(directory, 'hidden', hidden_s_usage, 75) plot_statelet_usage(directory, 'output', output_s_usage, 75)
values1 = [ 0.0, 0.2, 0.4, 0.6, 0.8, 0.8, 0.6, 0.4, 0.2, 0.0, 0.0, 0.2, 0.4, 0.6, 0.8, 0.8, 0.6, 0.4, 0.2, 0.0 ] scores0 = [0 for i in range(num_values)] scores1 = [0 for i in range(num_values)] scores2 = [0 for i in range(num_values)] # Setup blocks st0 = ScalarTransformer(num_s=500, num_as=50) st1 = ScalarTransformer(num_s=500, num_as=50) pp0 = PatternPooler(num_s=250, num_as=8) pp1 = PatternPooler(num_s=250, num_as=8) pp2 = PatternPooler(num_s=250, num_as=8) sl0 = SequenceLearner() sl1 = SequenceLearner() sl2 = SequenceLearner() # Connect blocks pp0.input.add_child(st0.output, 0) pp1.input.add_child(st1.output, 0) pp2.input.add_child(pp0.output, 0) pp2.input.add_child(pp1.output, 0) sl0.input.add_child(pp0.output, 0) sl1.input.add_child(pp1.output, 0) sl2.input.add_child(pp2.output, 0) # Loop through the values for i in range(num_values):
hgt = HyperGridTransform(num_grids=8, num_bins=8, num_subspace_dims=1) # fit the data hgt.fit(X) # transform scalar feature vectors to distributed binary representation X_bits = hgt.transform(X) # BLOCKS # NOTE: num_bits of BlankBlock and num_c of SequenceLearner must be equal!! # Blank Block to hold the hypergrid output b0 = BlankBlock(num_s=hgt.num_bits) # Sequence learner of distributed binary representations sl = SequenceLearner(num_spc=10, num_dps=10, num_rpd=12, d_thresh=6) # connect blank block containing hypergrid data to sequence learner sl.input.add_child(b0.output) scores = [] for k in range(len(X_bits)): # already converted data, flatten row to 1D array X_array = X_bits[k, :].flatten() # put it into a blank block b0.output.bits = X_array # learn the sequence sl.compute(learn=True)