class Classifier(): def __init__( self, configs=(), # block configuration num_l=2, # number of labels min_val=0.0, # minimum input value max_val=1.0, # maximum input value num_i=1024, # number of input statelets num_ai=128, # number of active input statelets num_s=512, # number of statelets num_as=8, # number of active statelets pct_pool=0.8, # pooling percentage pct_conn=0.5, # initially connected percentage pct_learn=0.3): # learn percentage PERM_THR = 20 PERM_INC = 2 PERM_DEC = 1 # seed the random number generator # bb.seed(0) # TODO: fix seeding self.st = ScalarTransformer(min_val, max_val, num_i, num_ai) self.pc = PatternClassifier(num_l, num_s, num_as, PERM_THR, PERM_INC, PERM_DEC, pct_pool, pct_conn, pct_learn) self.pc.input.add_child(self.st.output, 0) self.pc.init() #def save(self, path='./', name='classifier'): # self.pc.save(path + name + "_pc.bin") #def load(self, path='./', name='classifier'): # self.pc.load(path + name + "_pc.bin") def fit(self, value=0.0, label=0): self.st.set_value(value) self.pc.set_label(label) self.st.feedforward() self.pc.feedforward(learn=True) return self.pc.get_probabilities() def predict(self, value=0.0): self.st.set_value(value) self.st.feedforward() self.pc.feedforward(learn=False) return self.pc.get_probabilities()
def get_scalability(num_s): process = psutil.Process(os.getpid()) scores = [] # Setup Block transformers = [] pattern_poolers = [] sequence_learners = [] transformer = ScalarTransformer(min_val=0.0, max_val=1.0, num_s=1024, num_as=128) pattern_pooler = PatternPooler(num_s=num_s, num_as=8, pct_pool=0.8, pct_conn=0.5, pct_learn=0.3) sequence_learner = SequenceLearner(num_c=num_s, num_spc=10, num_dps=10, num_rpd=12, d_thresh=6) pattern_pooler.input.add_child(transformer.output, 0) sequence_learner.input.add_child(pattern_pooler.output, 0) num_dendrites = num_s + (num_s * 10 * 10 ) # pattern_pooler + sequence_learner # Get initialization time and memory usage t0 = time.time() pattern_pooler.init() sequence_learner.init() t1 = time.time() init_time = t1 - t0 num_bytes = process.memory_info().rss # Get compute time t0 = time.time() for i in range(len(data)): transformer.set_value(data[i]) transformer.feedforward() pattern_pooler.feedforward(learn=True) sequence_learner.feedforward(learn=True) score = sequence_learner.get_anomaly_score() scores.append(score) t1 = time.time() comp_time = t1 - t0 # Test Results np.testing.assert_array_equal(np.array(scores), expected_scores) return [num_dendrites, num_bytes, init_time, comp_time]
formatter={"bool": lambda bin_val: "X" if bin_val else "-"}) values = [ 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0 ] st = ScalarTransformer( min_val=0.0, # minimum input value max_val=1.0, # maximum input value num_s=1024, # number of statelets num_as=128) # number of active statelets # Convert scalars to distributed binary representation for i in range(len(values)): # Set scalar transformer value st.set_value(values[i]) # Compute scalar transformer st.feedforward() # List of 0s and 1s representing the distributed binary representation intarray = st.output.bits # Converted to numpy array for visualization binary_array = np.array(intarray, dtype=np.bool) print(values[i]) print(binary_array)
class AnomalyDetectorPersist(): def __init__( self, min_val=0.0, # minimum input value max_val=1.0, # maximum input value max_step=8, # maximum persistence step num_i=1024, # number of input statelets num_ai=128, # number of active input statelets num_s=512, # number of statelets num_as=8, # number of active statelets num_spc=10, # number of statelets per column num_dps=10, # number of dendrites per statelet num_rpd=12, # number of receptors per dendrite d_thresh=6, # dendrite threshold pct_pool=0.8, # pooling percentage pct_conn=0.5, # initially connected percentage pct_learn=0.3): # learn percentage PERM_THR = 20 PERM_INC = 2 PERM_DEC = 1 num_i_half = int(num_i / 2) num_ai_half = int(num_ai / 2) self.min_val = min_val self.max_val = max_val # seed the random number generator #bb.seed(0) # TODO: fix seeding self.st = ScalarTransformer(min_val, max_val, num_i_half, num_ai_half) self.pt = PersistenceTransformer(min_val, max_val, num_i_half, num_ai_half, max_step) self.pp = PatternPooler(num_s, num_as, PERM_THR, PERM_INC, PERM_DEC, pct_pool, pct_conn, pct_learn) self.sl = SequenceLearner(num_s, num_spc, num_dps, num_rpd, d_thresh, PERM_THR, PERM_INC, PERM_DEC) self.pp.input.add_child(self.st.output, 0) self.pp.input.add_child(self.pt.output, 0) self.sl.input.add_child(self.pp.output, 0) self.pp.init() self.sl.init() #def save(self, path='./', name='detector'): # self.pp.save(path + name + "_pp.bin") # self.sl.save(path + name + "_sl.bin") #def load(self, path='./', name='detector'): # self.pp.load(path + name + "_pp.bin") # self.sl.load(path + name + "_sl.bin") def feedforward(self, value=0.0, learn=True): in_bounds = True if value < self.min_val or value > self.max_val: in_bounds = False self.st.set_value(value) self.pt.set_value(value) self.st.feedforward() self.pt.feedforward() self.pp.feedforward(learn) self.sl.feedforward(learn) #print(self.pp.input.acts) #print(self.pt.output.acts) #print() if in_bounds: anom = self.sl.get_anomaly_score() else: anom = 1.0 return anom
# Connect blocks pp0.input.add_child(st0.output, 0) pp1.input.add_child(st1.output, 0) pp2.input.add_child(pp0.output, 0) pp2.input.add_child(pp1.output, 0) sl0.input.add_child(pp0.output, 0) sl1.input.add_child(pp1.output, 0) sl2.input.add_child(pp2.output, 0) # Loop through the values for i in range(num_values): # Compute hierarchy st0.set_value(values0[i]) st1.set_value(values1[i]) st0.feedforward() st1.feedforward() pp0.feedforward(learn=True) pp1.feedforward(learn=True) pp2.feedforward(learn=True) sl0.feedforward(learn=True) sl1.feedforward(learn=True) sl2.feedforward(learn=True) # Get scores scores0[i] = sl0.get_anomaly_score() scores1[i] = sl1.get_anomaly_score() scores2[i] = sl2.get_anomaly_score() # Print output print("in0, in1, sl0, sl1, sl2")