Example #1
0
def test_sequence_learner():
    e = ScalarEncoder(min_val=0.0, max_val=1.0, num_s=64, num_as=8)

    sl = SequenceLearner(num_spc=10,
                         num_dps=10,
                         num_rpd=12,
                         d_thresh=6,
                         perm_thr=1,
                         perm_inc=1,
                         perm_dec=0)

    sl.input.add_child(e.output)

    data = [
        0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0,
        0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0,
        1.0, 1.0
    ]

    expect_scores = np.array([
        1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0,
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
        0.0, 0.0
    ])

    actual_scores = np.array([0.0 for i in range(len(data))])

    for i in range(len(data)):
        e.compute(data[i])
        sl.compute(True)
        actual_scores[i] = sl.get_score()

    np.testing.assert_array_equal(actual_scores, expect_scores)
Example #2
0
    def __init__(
        self,
        configs=(),  # block configuration
        labels=(0, 1),  # labels
        min_val=-1.0,  # ScalarEncoder minimum input value
        max_val=1.0,  # ScalarEncoder maximum input value
        num_i=1024,  # ScalarEncoder number of statelets
        num_ai=128,  # ScalarEncoder number of active statelets
        num_s=32,  # PatternClassifier number of statelets
        num_as=8,  # PatternClassifier number of active statelets
        pct_pool=0.8,  # PatternClassifier pool percentage
        pct_conn=0.5,  # PatternClassifier initial connection percentage
        pct_learn=0.25):  # PatternClassifier learn percentage

        # seed the random number generator
        bb.seed(0)

        # build blocks from config descriptions if given
        blocks = get_blocks(configs)
        self.encoders = blocks["encoders"]
        self.pc = blocks["pattern_classifier"]

        if len(self.encoders) == 0:
            self.encoders.append(ScalarEncoder(min_val, max_val, num_i,
                                               num_ai))

        if self.pc == None:
            num_l = len(labels)
            self.pc = PatternClassifier(labels, num_s, num_as, 20, 2, 1,
                                        pct_pool, pct_conn, pct_learn)

        for encoder in self.encoders:
            self.pc.input.add_child(encoder.output)
Example #3
0
def get_scalability(num_detectors):
    process = psutil.Process(os.getpid())
    scores = []

    # Setup Blocks
    encoders = []
    pattern_poolers = []
    sequence_learners = []
    for _ in range(num_detectors):
        encoders.append(
            ScalarEncoder(min_val=0.0, max_val=1.0, num_s=1024, num_as=128))
        pattern_poolers.append(
            PatternPooler(num_s=512,
                          num_as=8,
                          pct_pool=0.8,
                          pct_conn=0.8,
                          pct_learn=0.25))
        sequence_learners.append(
            SequenceLearner(num_spc=10, num_dps=10, num_rpd=12, d_thresh=6))
        pattern_poolers[-1].input.add_child(encoders[-1].output)
        sequence_learners[-1].input.add_child(pattern_poolers[-1].output)

    # Get initialization time and memory usage
    t0 = time.time()
    for d in range(num_detectors):
        encoders[d].compute(data[0])
        pattern_poolers[d].compute(learn=True)
        sequence_learners[d].compute(learn=True)
        if (d == 0):
            score = sequence_learners[d].get_score()
            scores.append(score)
    t1 = time.time()
    init_time = t1 - t0
    num_bytes = process.memory_info().rss

    # Get compute time
    t0 = time.time()
    for d in range(num_detectors):
        for i in range(1, len(data)):
            encoders[d].compute(data[i])
            pattern_poolers[d].compute(learn=True)
            sequence_learners[d].compute(learn=True)
            if (d == 0):
                score = sequence_learners[d].get_score()
                scores.append(score)
    t1 = time.time()
    comp_time = t1 - t0

    # Test Results
    np.testing.assert_array_equal(np.array(scores), expected_scores)

    return [num_detectors, num_bytes, init_time, comp_time]
Example #4
0
def test_sequence_learner_sine():
    e = ScalarEncoder(
        min_val=0.0,  # minimum input value
        max_val=1.0,  # maximum input value
        num_s=64,  # number of statelets
        num_as=8)  # number of active statelets

    sl = SequenceLearner(
        num_spc=10,  # number of statelets per column
        num_dps=10,  # number of coincidence detectors per statelet
        num_rpd=24,  # number of receptors per coincidence detector
        d_thresh=6,  # coincidence detector threshold
        perm_thr=1,  # receptor permanence threshold
        perm_inc=1,  # receptor permanence increment
        perm_dec=0)  # receptor permanence decrement

    sl.input.add_child(e.output)

    values = [
        0.50, 0.79, 0.98, 0.98, 0.79, 0.50, 0.21, 0.02, 0.02, 0.21, 0.50, 0.79,
        0.98, 0.98, 0.79, 0.50, 0.21, 0.02, 0.02, 0.21, 0.50, 0.79, 0.98, 0.98,
        0.79, 0.50, 0.21, 0.02, 0.02, 0.21
    ]

    actual_scores = np.array([0.0 for i in range(len(values))])

    expect_scores = np.array([
        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0,
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
        0.0, 0.0
    ])

    for i in range(len(values)):
        e.compute(value=values[i])
        sl.compute(learn=True)
        actual_scores[i] = sl.get_score()

    np.testing.assert_array_equal(actual_scores, expect_scores)
Example #5
0
def test_read_coincidence_set():
    e = ScalarEncoder()
    pp = PatternPooler()

    pp.input.add_child(e.output)
    pp.initialize()

    cs = pp.coincidence_sets(0)

    addrs = cs.get_addrs()
    addr0 = cs.get_addr(0)
    np.testing.assert_equal(addrs[0], addr0)

    perms = cs.get_perms()
    perm0 = cs.get_perm(0)
    np.testing.assert_equal(perms[0], perm0)
Example #6
0
def test_read_indicator():
    e = ScalarEncoder(min_val=-1.0, max_val=1.0, num_s=128, num_as=16)
    pp = PatternPooler(num_s=128, num_as=2)

    pp.input.add_child(e.output)
    pp.initialize()

    cs = pp.coincidence_sets(0)

    addrs = cs.get_addrs()
    addr0 = cs.get_addr(0)
    np.testing.assert_equal(addrs[0], addr0)

    perms = cs.get_perms()
    perm0 = cs.get_perm(0)
    np.testing.assert_equal(perms[0], perm0)
Example #7
0
    def __init__(
        self,
        configs=(),  # block configuration
        min_val=-1.0,  # minumum value
        max_val=1.0,  # maximum value
        num_i=1024,  # ScalarEncoder number of statelets
        num_ai=128,  # ScalarEncoder number of active statelets
        num_s=512,  # PatternPooler number of statelets
        num_as=8,  # PatternPooler number of active statelets
        num_spc=10,  # SequenceLearner number of statelets per column
        num_dps=10,  # SequenceLearner number of coincidence detectors per statelet
        num_rpd=12,  # SequenceLearner number of receptors per coincidence detector
        d_thresh=6,  # SequenceLearner coincidence detector threshold
        pct_pool=0.8,  # PatternPooler pool percentage
        pct_conn=0.5,  # PatternPooler initial connection percentage
        pct_learn=0.25):  # PatternPooler learn percentage

        self.min_val = min_val
        self.max_val = max_val

        # seed the random number generator
        bb.seed(0)

        # build blocks from config descriptions if given
        blocks = get_blocks(configs)
        self.encoders = blocks["encoders"]
        self.pp = blocks["pattern_pooler"]
        self.sl = blocks["sequence_learner"]

        if len(self.encoders) == 0:
            self.encoders.append(ScalarEncoder(min_val, max_val, num_i,
                                               num_ai))

        if self.pp == None:
            self.pp = PatternPooler(num_s, num_as, 20, 2, 1, pct_pool,
                                    pct_conn, pct_learn)

        if self.sl == None:
            self.sl = SequenceLearner(num_spc, num_dps, num_rpd, d_thresh, 1,
                                      1, 0)

        for encoder in self.encoders:
            self.pp.input.add_child(encoder.output)

        self.sl.input.add_child(self.pp.output)

        self.initialized = False
Example #8
0
        0.0,
        0.0,
        0.0,
        0.0,
    ]

    labels = [
        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19
    ]
    new_label = 0

    scores = [0.0 for i in range(len(values))]

    e = ScalarEncoder(
        min_val=0.0,  # minimum input value
        max_val=1.0,  # maximum input value
        num_s=64,  # number of statelets
        num_as=8)  # number of active statelets

    sl = SequenceLearner(
        num_spc=10,  # number of statelets per column
        num_dps=10,  # number of coincidence detectors per statelet
        num_rpd=12,  # number of receptors per coincidence detector
        d_thresh=6,  # coincidence detector threshold
        perm_thr=1,  # receptor permanence threshold
        perm_inc=1,  # receptor permanence increment
        perm_dec=0)  # receptor permanence decrement

    pc = PatternClassifier(
        labels=labels,  # user-defined labels
        num_s=640,  # number of statelets
from brainblocks.blocks import ScalarEncoder, PatternPooler, SequenceLearner

# define data and scores
num_inputs = 20
inputs_0 = [0.0, 0.2, 0.4, 0.6, 0.8, 0.0, 0.2, 0.4, 0.6, 0.8, 0.0, 0.2, 0.4, 0.6, 0.8, 0.0, 0.2, 0.4, 0.6, 0.8]
inputs_1 = [0.0, 0.2, 0.4, 0.6, 0.8, 0.8, 0.6, 0.4, 0.2, 0.0, 0.0, 0.2, 0.4, 0.6, 0.8, 0.8, 0.6, 0.4, 0.2, 0.0]
scores_0 = [0 for i in range(num_inputs)]
scores_1 = [0 for i in range(num_inputs)]
scores_2 = [0 for i in range(num_inputs)]

# define blocks
se_0 = ScalarEncoder(num_s=500, num_as=50)
se_1 = ScalarEncoder(num_s=500, num_as=50)
pp_0 = PatternPooler(num_s=250, num_as=8)
pp_1 = PatternPooler(num_s=250, num_as=8)
pp_2 = PatternPooler(num_s=250, num_as=8)
sl_0 = SequenceLearner()
sl_1 = SequenceLearner()
sl_2 = SequenceLearner()

# connect blocks
pp_0.input.add_child(se_0.output)
pp_1.input.add_child(se_1.output)
pp_2.input.add_child(pp_0.output)
pp_2.input.add_child(pp_1.output)
sl_0.input.add_child(pp_0.output)
sl_1.input.add_child(pp_1.output)
sl_2.input.add_child(pp_2.output)

# loop through data
for i in range(num_inputs):
Example #10
0
# printing boolean arrays neatly
np.set_printoptions(
    precision=3,
    suppress=True,
    threshold=1000000,
    linewidth=100,
    formatter={"bool": lambda bin_val: "X" if bin_val else "-"})

values = [
    0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0,
    1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0,
    0.0, 1.0, 0.0, 1.0, 0.0
]

se_0 = ScalarEncoder(num_s=1024, num_as=128)

# convert scalars to distributed binary representation
for i in range(len(values)):

    # encode scalars
    se_0.compute(values[i])

    # list of 0s and 1s representing the distributed binary representation
    intarray = se_0.output.bits

    # converted to numpy array for visualization
    binary_array = np.array(intarray, dtype=np.bool)

    print(values[i])
    print(binary_array)
Example #11
0
def test_scalar_encoder():
    e = ScalarEncoder(
        min_val=-1.0,  # minimum input value
        max_val=1.0,  # maximum input value
        num_s=1024,  # number of statelets
        num_as=128)  # number of active statelets

    e.compute(value=-1.5)
    actual_out = np.array(e.output.bits)
    expect_out = np.array([0 for i in range(1024)])
    expect_out[0:128] = 1
    np.testing.assert_array_equal(actual_out, expect_out)

    e.compute(value=-1.0)
    actual_out = np.array(e.output.bits)
    expect_out = np.array([0 for i in range(1024)])
    expect_out[0:128] = 1
    np.testing.assert_array_equal(actual_out, expect_out)

    e.compute(value=-0.5)
    actual_out = np.array(e.output.bits)
    expect_out = np.array([0 for i in range(1024)])
    expect_out[224:352] = 1
    np.testing.assert_array_equal(actual_out, expect_out)

    e.compute(value=0.0)
    actual_out = np.array(e.output.bits)
    expect_out = np.array([0 for i in range(1024)])
    expect_out[448:576] = 1
    np.testing.assert_array_equal(actual_out, expect_out)

    e.compute(value=0.5)
    actual_out = np.array(e.output.bits)
    expect_out = np.array([0 for i in range(1024)])
    expect_out[672:800] = 1
    np.testing.assert_array_equal(actual_out, expect_out)

    e.compute(value=1.0)
    actual_out = np.array(e.output.bits)
    expect_out = np.array([0 for i in range(1024)])
    expect_out[896:1024] = 1
    np.testing.assert_array_equal(actual_out, expect_out)

    e.compute(value=1.5)
    actual_out = np.array(e.output.bits)
    expect_out = np.array([0 for i in range(1024)])
    expect_out[896:1024] = 1
    np.testing.assert_array_equal(actual_out, expect_out)
Example #12
0
]

x_tests = [0.0, 1.0]

# string symbols converted to integers
le = preprocessing.LabelEncoder()
le.fit(y_trains)
y_trains_ints = le.transform(y_trains)

# retrieve the integer classes from above
int_classes = [k for k in range(len(le.classes_))]

# define blocks
se = ScalarEncoder(
    min_val=-1.0,  # minimum input value
    max_val=1.0,  # maximum input value
    num_s=1024,  # number of statelets
    num_as=128)  # number of active statelets

pp = PatternClassifier(
    labels=int_classes,  # user-defined labels
    num_s=512,  # number of statelets
    num_as=8,  # number of active statelets
    perm_thr=20,  # receptor permanence threshold
    perm_inc=2,  # receptor permanence increment
    perm_dec=1,  # receptor permanence decrement
    pct_pool=0.8,  # pooling percentage
    pct_conn=0.5,  # initially connected percentage
    pct_learn=0.25)  # learn percentage

# connect blocks
Example #13
0
def test_scalar_encoder():
    e = ScalarEncoder(min_val=-1.0, max_val=1.0, num_s=1024, num_as=128)

    e.compute(-1.5)
    actual_out = np.array(e.output.get_bits(0))
    expect_out = np.array([0 for i in range(1024)])
    expect_out[0:128] = 1
    np.testing.assert_array_equal(actual_out, expect_out)

    e.compute(-1.0)
    actual_out = np.array(e.output.get_bits(0))
    expect_out = np.array([0 for i in range(1024)])
    expect_out[0:128] = 1
    np.testing.assert_array_equal(actual_out, expect_out)

    e.compute(-0.5)
    actual_out = np.array(e.output.get_bits(0))
    expect_out = np.array([0 for i in range(1024)])
    expect_out[224:352] = 1
    np.testing.assert_array_equal(actual_out, expect_out)

    e.compute(0.0)
    actual_out = np.array(e.output.get_bits(0))
    expect_out = np.array([0 for i in range(1024)])
    expect_out[448:576] = 1
    np.testing.assert_array_equal(actual_out, expect_out)

    e.compute(0.5)
    actual_out = np.array(e.output.get_bits(0))
    expect_out = np.array([0 for i in range(1024)])
    expect_out[672:800] = 1
    np.testing.assert_array_equal(actual_out, expect_out)

    e.compute(1.0)
    actual_out = np.array(e.output.get_bits(0))
    expect_out = np.array([0 for i in range(1024)])
    expect_out[896:1024] = 1
    np.testing.assert_array_equal(actual_out, expect_out)

    e.compute(1.5)
    actual_out = np.array(e.output.get_bits(0))
    expect_out = np.array([0 for i in range(1024)])
    expect_out[896:1024] = 1
    np.testing.assert_array_equal(actual_out, expect_out)
Example #14
0
from etaler import et
from brainblocks.blocks import ScalarEncoder
se_0 = ScalarEncoder(num_s=1024, num_as=128)
se_0.compute(0)
a = et.Tensor.from_numpy(se_0.output.bits)
print(a.tolist())
print(a.to_brainblocks())