def test_sequence_learner(): e = ScalarEncoder(min_val=0.0, max_val=1.0, num_s=64, num_as=8) sl = SequenceLearner(num_spc=10, num_dps=10, num_rpd=12, d_thresh=6, perm_thr=1, perm_inc=1, perm_dec=0) sl.input.add_child(e.output) data = [ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0 ] expect_scores = np.array([ 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]) actual_scores = np.array([0.0 for i in range(len(data))]) for i in range(len(data)): e.compute(data[i]) sl.compute(True) actual_scores[i] = sl.get_score() np.testing.assert_array_equal(actual_scores, expect_scores)
def get_scalability(num_s): process = psutil.Process(os.getpid()) scores = [] # Setup Block encoders = [] pattern_poolers = [] sequence_learners = [] encoder = ScalarEncoder(min_val=0.0, max_val=1.0, num_s=1024, num_as=128) pattern_pooler = PatternPooler(num_s=num_s, num_as=8, pct_pool=0.8, pct_conn=0.8, pct_learn=0.25) sequence_learner = SequenceLearner(num_spc=10, num_dps=10, num_rpd=12, d_thresh=6) pattern_pooler.input.add_child(encoder.output) sequence_learner.input.add_child(pattern_pooler.output) num_coincidence_detectors = num_s + (num_s * 10 * 10 ) # pattern_pooler + sequence_learner # Get initialization time and memory usage t0 = time.time() encoder.compute(data[0]) pattern_pooler.compute(learn=True) sequence_learner.compute(learn=True) score = sequence_learner.get_score() scores.append(score) t1 = time.time() init_time = t1 - t0 num_bytes = process.memory_info().rss # Get compute time t0 = time.time() for i in range(1, len(data)): encoder.compute(data[i]) pattern_pooler.compute(learn=True) sequence_learner.compute(learn=True) score = sequence_learner.get_score() scores.append(score) t1 = time.time() comp_time = t1 - t0 # Test Results np.testing.assert_array_equal(np.array(scores), expected_scores) return [num_coincidence_detectors, num_bytes, init_time, comp_time]
def test_sequence_learner_sine(): e = ScalarEncoder( min_val=0.0, # minimum input value max_val=1.0, # maximum input value num_s=64, # number of statelets num_as=8) # number of active statelets sl = SequenceLearner( num_spc=10, # number of statelets per column num_dps=10, # number of coincidence detectors per statelet num_rpd=24, # number of receptors per coincidence detector d_thresh=6, # coincidence detector threshold perm_thr=1, # receptor permanence threshold perm_inc=1, # receptor permanence increment perm_dec=0) # receptor permanence decrement sl.input.add_child(e.output) values = [ 0.50, 0.79, 0.98, 0.98, 0.79, 0.50, 0.21, 0.02, 0.02, 0.21, 0.50, 0.79, 0.98, 0.98, 0.79, 0.50, 0.21, 0.02, 0.02, 0.21, 0.50, 0.79, 0.98, 0.98, 0.79, 0.50, 0.21, 0.02, 0.02, 0.21 ] actual_scores = np.array([0.0 for i in range(len(values))]) expect_scores = np.array([ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]) for i in range(len(values)): e.compute(value=values[i]) sl.compute(learn=True) actual_scores[i] = sl.get_score() np.testing.assert_array_equal(actual_scores, expect_scores)
perm_thr=20, # receptor permanence threshold perm_inc=2, # receptor permanence increment perm_dec=1, # receptor permanence decrement pct_pool=0.8, # pooling percentage pct_conn=0.8, # initially connected percentage pct_learn=0.25) # learn percentage 0.25 sl.input.add_child(e.output) pc.input.add_child(sl.output) aed = AbnormalEventDetector(5, 5) print('val scr lbl prob ae output_active_statelets') for i in range(len(values)): e.compute(value=values[i]) sl.compute(learn=True) pc.compute(learn=False) score = sl.get_score() probs = pc.get_probabilities() abnormal_event = aed.compute(score) if abnormal_event: for _ in range(50): pc.compute(label=new_label, learn=True) new_label += 1 winner = np.argmax(probs) winner_str = '-'
sl_2 = SequenceLearner() # connect blocks pp_0.input.add_child(se_0.output) pp_1.input.add_child(se_1.output) pp_2.input.add_child(pp_0.output) pp_2.input.add_child(pp_1.output) sl_0.input.add_child(pp_0.output) sl_1.input.add_child(pp_1.output) sl_2.input.add_child(pp_2.output) # loop through data for i in range(num_inputs): # compute se_0.compute(inputs_0[i]) se_1.compute(inputs_1[i]) pp_0.compute() pp_1.compute() pp_2.compute() sl_0.compute() sl_1.compute() sl_2.compute() # get scores scores_0[i] = sl_0.get_score() scores_1[i] = sl_1.get_score() scores_2[i] = sl_2.get_score() # print output print("in0, in1, sc0, sc1, sc2")
# printing boolean arrays neatly np.set_printoptions( precision=3, suppress=True, threshold=1000000, linewidth=100, formatter={"bool": lambda bin_val: "X" if bin_val else "-"}) values = [ 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0 ] se_0 = ScalarEncoder(num_s=1024, num_as=128) # convert scalars to distributed binary representation for i in range(len(values)): # encode scalars se_0.compute(values[i]) # list of 0s and 1s representing the distributed binary representation intarray = se_0.output.bits # converted to numpy array for visualization binary_array = np.array(intarray, dtype=np.bool) print(values[i]) print(binary_array)
def test_scalar_encoder(): e = ScalarEncoder( min_val=-1.0, # minimum input value max_val=1.0, # maximum input value num_s=1024, # number of statelets num_as=128) # number of active statelets e.compute(value=-1.5) actual_out = np.array(e.output.bits) expect_out = np.array([0 for i in range(1024)]) expect_out[0:128] = 1 np.testing.assert_array_equal(actual_out, expect_out) e.compute(value=-1.0) actual_out = np.array(e.output.bits) expect_out = np.array([0 for i in range(1024)]) expect_out[0:128] = 1 np.testing.assert_array_equal(actual_out, expect_out) e.compute(value=-0.5) actual_out = np.array(e.output.bits) expect_out = np.array([0 for i in range(1024)]) expect_out[224:352] = 1 np.testing.assert_array_equal(actual_out, expect_out) e.compute(value=0.0) actual_out = np.array(e.output.bits) expect_out = np.array([0 for i in range(1024)]) expect_out[448:576] = 1 np.testing.assert_array_equal(actual_out, expect_out) e.compute(value=0.5) actual_out = np.array(e.output.bits) expect_out = np.array([0 for i in range(1024)]) expect_out[672:800] = 1 np.testing.assert_array_equal(actual_out, expect_out) e.compute(value=1.0) actual_out = np.array(e.output.bits) expect_out = np.array([0 for i in range(1024)]) expect_out[896:1024] = 1 np.testing.assert_array_equal(actual_out, expect_out) e.compute(value=1.5) actual_out = np.array(e.output.bits) expect_out = np.array([0 for i in range(1024)]) expect_out[896:1024] = 1 np.testing.assert_array_equal(actual_out, expect_out)
pp = PatternClassifier( labels=int_classes, # user-defined labels num_s=512, # number of statelets num_as=8, # number of active statelets perm_thr=20, # receptor permanence threshold perm_inc=2, # receptor permanence increment perm_dec=1, # receptor permanence decrement pct_pool=0.8, # pooling percentage pct_conn=0.5, # initially connected percentage pct_learn=0.25) # learn percentage # connect blocks pp.input.add_child(se.output) # fit for i in range(len(x_trains)): se.compute(x_trains[i]) pp.compute(y_trains_ints[i], learn=True) # predict probs = [] for i in range(len(x_tests)): se.compute(x_tests[i]) pp.compute(0, learn=True) probs.append(pp.get_probabilities()) # print output print("x, p_a, p_b") for i in range(len(x_tests)): print("%0.1f, %0.1f, %0.1f" % (x_tests[i], probs[i][0], probs[i][1]))
def test_scalar_encoder(): e = ScalarEncoder(min_val=-1.0, max_val=1.0, num_s=1024, num_as=128) e.compute(-1.5) actual_out = np.array(e.output.get_bits(0)) expect_out = np.array([0 for i in range(1024)]) expect_out[0:128] = 1 np.testing.assert_array_equal(actual_out, expect_out) e.compute(-1.0) actual_out = np.array(e.output.get_bits(0)) expect_out = np.array([0 for i in range(1024)]) expect_out[0:128] = 1 np.testing.assert_array_equal(actual_out, expect_out) e.compute(-0.5) actual_out = np.array(e.output.get_bits(0)) expect_out = np.array([0 for i in range(1024)]) expect_out[224:352] = 1 np.testing.assert_array_equal(actual_out, expect_out) e.compute(0.0) actual_out = np.array(e.output.get_bits(0)) expect_out = np.array([0 for i in range(1024)]) expect_out[448:576] = 1 np.testing.assert_array_equal(actual_out, expect_out) e.compute(0.5) actual_out = np.array(e.output.get_bits(0)) expect_out = np.array([0 for i in range(1024)]) expect_out[672:800] = 1 np.testing.assert_array_equal(actual_out, expect_out) e.compute(1.0) actual_out = np.array(e.output.get_bits(0)) expect_out = np.array([0 for i in range(1024)]) expect_out[896:1024] = 1 np.testing.assert_array_equal(actual_out, expect_out) e.compute(1.5) actual_out = np.array(e.output.get_bits(0)) expect_out = np.array([0 for i in range(1024)]) expect_out[896:1024] = 1 np.testing.assert_array_equal(actual_out, expect_out)
from etaler import et from brainblocks.blocks import ScalarEncoder se_0 = ScalarEncoder(num_s=1024, num_as=128) se_0.compute(0) a = et.Tensor.from_numpy(se_0.output.bits) print(a.tolist()) print(a.to_brainblocks())