def get_scalability(num_s): process = psutil.Process(os.getpid()) scores = [] # Setup Block transformers = [] pattern_poolers = [] sequence_learners = [] transformer = ScalarTransformer(min_val=0.0, max_val=1.0, num_s=1024, num_as=128) pattern_pooler = PatternPooler(num_s=num_s, num_as=8, pct_pool=0.8, pct_conn=0.5, pct_learn=0.3) sequence_learner = SequenceLearner(num_c=num_s, num_spc=10, num_dps=10, num_rpd=12, d_thresh=6) pattern_pooler.input.add_child(transformer.output, 0) sequence_learner.input.add_child(pattern_pooler.output, 0) num_dendrites = num_s + (num_s * 10 * 10 ) # pattern_pooler + sequence_learner # Get initialization time and memory usage t0 = time.time() pattern_pooler.init() sequence_learner.init() t1 = time.time() init_time = t1 - t0 num_bytes = process.memory_info().rss # Get compute time t0 = time.time() for i in range(len(data)): transformer.set_value(data[i]) transformer.feedforward() pattern_pooler.feedforward(learn=True) sequence_learner.feedforward(learn=True) score = sequence_learner.get_anomaly_score() scores.append(score) t1 = time.time() comp_time = t1 - t0 # Test Results np.testing.assert_array_equal(np.array(scores), expected_scores) return [num_dendrites, num_bytes, init_time, comp_time]
class AnomalyDetectorPersist(): def __init__( self, min_val=0.0, # minimum input value max_val=1.0, # maximum input value max_step=8, # maximum persistence step num_i=1024, # number of input statelets num_ai=128, # number of active input statelets num_s=512, # number of statelets num_as=8, # number of active statelets num_spc=10, # number of statelets per column num_dps=10, # number of dendrites per statelet num_rpd=12, # number of receptors per dendrite d_thresh=6, # dendrite threshold pct_pool=0.8, # pooling percentage pct_conn=0.5, # initially connected percentage pct_learn=0.3): # learn percentage PERM_THR = 20 PERM_INC = 2 PERM_DEC = 1 num_i_half = int(num_i / 2) num_ai_half = int(num_ai / 2) self.min_val = min_val self.max_val = max_val # seed the random number generator #bb.seed(0) # TODO: fix seeding self.st = ScalarTransformer(min_val, max_val, num_i_half, num_ai_half) self.pt = PersistenceTransformer(min_val, max_val, num_i_half, num_ai_half, max_step) self.pp = PatternPooler(num_s, num_as, PERM_THR, PERM_INC, PERM_DEC, pct_pool, pct_conn, pct_learn) self.sl = SequenceLearner(num_s, num_spc, num_dps, num_rpd, d_thresh, PERM_THR, PERM_INC, PERM_DEC) self.pp.input.add_child(self.st.output, 0) self.pp.input.add_child(self.pt.output, 0) self.sl.input.add_child(self.pp.output, 0) self.pp.init() self.sl.init() #def save(self, path='./', name='detector'): # self.pp.save(path + name + "_pp.bin") # self.sl.save(path + name + "_sl.bin") #def load(self, path='./', name='detector'): # self.pp.load(path + name + "_pp.bin") # self.sl.load(path + name + "_sl.bin") def feedforward(self, value=0.0, learn=True): in_bounds = True if value < self.min_val or value > self.max_val: in_bounds = False self.st.set_value(value) self.pt.set_value(value) self.st.feedforward() self.pt.feedforward() self.pp.feedforward(learn) self.sl.feedforward(learn) #print(self.pp.input.acts) #print(self.pt.output.acts) #print() if in_bounds: anom = self.sl.get_anomaly_score() else: anom = 1.0 return anom
num_spc=10, # number of statelets per column num_dps=10, # number of dendrites per statelet num_rpd=12, # number of receptors per dendrite d_thresh=6, # dendrite threshold perm_thr=20, # receptor permanence threshold perm_inc=2, # receptor permanence increment perm_dec=1) # receptor permanence decrement # Connect blocks sl.input.add_child(lt.output, 0) # Loop through the values for i in range(len(integers)): # Set scalar transformer value lt.set_value(integers[i]) # Compute the scalar transformer lt.feedforward() # Compute the sequence learner sl.feedforward(learn=True) # Get anomaly score scores[i] = sl.get_anomaly_score() # Print output print("val, scr") for i in range(len(values)): print("%3s, %0.1f" % (values[i], scores[i]))
scores = [] for k in range(len(X_bits)): # already converted data, flatten row to 1D array X_array = X_bits[k, :].flatten() # put it into a blank block b0.output.bits = X_array b0.feedforward() # learn the sequence sl.feedforward(learn=True) # get the abnormality score score = sl.get_anomaly_score() scores.append(score) sigDF["score"] = scores # save to CSV file print("Saving to " + output_name + ".csv") sigDF.to_csv(output_name + ".csv", index=True) # generate plot of data print("Saving to " + output_name + ".png") axes = sigDF.plot(subplots=True, legend=False) for k in range(len(sigDF.columns)): axes[k].set_ylabel(sigDF.columns[k]) plt.savefig(output_name + ".png")
pp2.input.add_child(pp1.output, 0) sl0.input.add_child(pp0.output, 0) sl1.input.add_child(pp1.output, 0) sl2.input.add_child(pp2.output, 0) # Loop through the values for i in range(num_values): # Compute hierarchy st0.set_value(values0[i]) st1.set_value(values1[i]) st0.feedforward() st1.feedforward() pp0.feedforward(learn=True) pp1.feedforward(learn=True) pp2.feedforward(learn=True) sl0.feedforward(learn=True) sl1.feedforward(learn=True) sl2.feedforward(learn=True) # Get scores scores0[i] = sl0.get_anomaly_score() scores1[i] = sl1.get_anomaly_score() scores2[i] = sl2.get_anomaly_score() # Print output print("in0, in1, sl0, sl1, sl2") for i in range(num_values): print("%0.1f, %0.1f, %0.1f, %0.1f, %0.1f" % (values0[i], values1[i], scores0[i], scores1[i], scores2[i]))