Ejemplo n.º 1
0
def test_read_indicator():
    e = ScalarEncoder(min_val=-1.0, max_val=1.0, num_s=128, num_as=16)
    pp = PatternPooler(num_s=128, num_as=2)

    pp.input.add_child(e.output)
    pp.initialize()

    cs = pp.coincidence_sets(0)

    addrs = cs.get_addrs()
    addr0 = cs.get_addr(0)
    np.testing.assert_equal(addrs[0], addr0)

    perms = cs.get_perms()
    perm0 = cs.get_perm(0)
    np.testing.assert_equal(perms[0], perm0)
Ejemplo n.º 2
0
def test_read_coincidence_set():
    e = ScalarEncoder()
    pp = PatternPooler()

    pp.input.add_child(e.output)
    pp.initialize()

    cs = pp.coincidence_sets(0)

    addrs = cs.get_addrs()
    addr0 = cs.get_addr(0)
    np.testing.assert_equal(addrs[0], addr0)

    perms = cs.get_perms()
    perm0 = cs.get_perm(0)
    np.testing.assert_equal(perms[0], perm0)
Ejemplo n.º 3
0
    def __init__(
            self,
            min_val=0.0,  # minimum input value
            max_val=1.0,  # maximum input value
            max_step=8,  # maximum persistence step
            num_i=1024,  # number of input statelets
            num_ai=128,  # number of active input statelets
            num_s=512,  # number of statelets
            num_as=8,  # number of active statelets
            num_spc=10,  # number of statelets per column
            num_dps=10,  # number of dendrites per statelet
            num_rpd=12,  # number of receptors per dendrite
            d_thresh=6,  # dendrite threshold
            pct_pool=0.8,  # pooling percentage
            pct_conn=0.5,  # initially connected percentage
            pct_learn=0.3):  # learn percentage

        PERM_THR = 20
        PERM_INC = 2
        PERM_DEC = 1

        num_i_half = int(num_i / 2)
        num_ai_half = int(num_ai / 2)

        self.min_val = min_val
        self.max_val = max_val

        # seed the random number generator
        #bb.seed(0) # TODO: fix seeding

        self.st = ScalarTransformer(min_val, max_val, num_i_half, num_ai_half)

        self.pt = PersistenceTransformer(min_val, max_val, num_i_half,
                                         num_ai_half, max_step)

        self.pp = PatternPooler(num_s, num_as, PERM_THR, PERM_INC, PERM_DEC,
                                pct_pool, pct_conn, pct_learn)

        self.sl = SequenceLearner(num_s, num_spc, num_dps, num_rpd, d_thresh,
                                  PERM_THR, PERM_INC, PERM_DEC)

        self.pp.input.add_child(self.st.output, 0)
        self.pp.input.add_child(self.pt.output, 0)
        self.sl.input.add_child(self.pp.output, 0)

        self.pp.init()
        self.sl.init()
Ejemplo n.º 4
0
    def __init__(
        self,
        configs=(),  # block configuration
        min_val=-1.0,  # minumum value
        max_val=1.0,  # maximum value
        num_i=1024,  # ScalarEncoder number of statelets
        num_ai=128,  # ScalarEncoder number of active statelets
        num_s=512,  # PatternPooler number of statelets
        num_as=8,  # PatternPooler number of active statelets
        num_spc=10,  # SequenceLearner number of statelets per column
        num_dps=10,  # SequenceLearner number of coincidence detectors per statelet
        num_rpd=12,  # SequenceLearner number of receptors per coincidence detector
        d_thresh=6,  # SequenceLearner coincidence detector threshold
        pct_pool=0.8,  # PatternPooler pool percentage
        pct_conn=0.5,  # PatternPooler initial connection percentage
        pct_learn=0.25):  # PatternPooler learn percentage

        self.min_val = min_val
        self.max_val = max_val

        # seed the random number generator
        bb.seed(0)

        # build blocks from config descriptions if given
        blocks = get_blocks(configs)
        self.encoders = blocks["encoders"]
        self.pp = blocks["pattern_pooler"]
        self.sl = blocks["sequence_learner"]

        if len(self.encoders) == 0:
            self.encoders.append(ScalarEncoder(min_val, max_val, num_i,
                                               num_ai))

        if self.pp == None:
            self.pp = PatternPooler(num_s, num_as, 20, 2, 1, pct_pool,
                                    pct_conn, pct_learn)

        if self.sl == None:
            self.sl = SequenceLearner(num_spc, num_dps, num_rpd, d_thresh, 1,
                                      1, 0)

        for encoder in self.encoders:
            self.pp.input.add_child(encoder.output)

        self.sl.input.add_child(self.pp.output)

        self.initialized = False
def get_scalability(num_detectors):
    process = psutil.Process(os.getpid())
    scores = []

    # Setup Blocks
    transformers = []
    pattern_poolers = []
    sequence_learners = []
    for _ in range(num_detectors):
        transformers.append(
            ScalarTransformer(min_val=0.0, max_val=1.0, num_s=1024,
                              num_as=128))
        pattern_poolers.append(
            PatternPooler(num_s=512,
                          num_as=8,
                          pct_pool=0.8,
                          pct_conn=0.5,
                          pct_learn=0.3))
        sequence_learners.append(
            SequenceLearner(num_c=512,
                            num_spc=10,
                            num_dps=10,
                            num_rpd=12,
                            d_thresh=6))
        pattern_poolers[-1].input.add_child(transformers[-1].output, 0)
        sequence_learners[-1].input.add_child(pattern_poolers[-1].output, 0)

    # Get initialization time and memory usage
    t0 = time.time()
    for d in range(num_detectors):
        pattern_poolers[d].init()
        sequence_learners[d].init()
    t1 = time.time()
    init_time = t1 - t0
    num_bytes = process.memory_info().rss

    # Get compute time
    t0 = time.time()
    for d in range(num_detectors):
        for i in range(len(data)):
            transformers[d].set_value(data[i])
            transformers[d].feedforward()
            pattern_poolers[d].feedforward(learn=True)
            sequence_learners[d].feedforward(learn=True)
            if (d == 0):
                score = sequence_learners[d].get_anomaly_score()
                scores.append(score)
    t1 = time.time()
    comp_time = t1 - t0

    # Test Results
    np.testing.assert_array_equal(np.array(scores), expected_scores)

    return [num_detectors, num_bytes, init_time, comp_time]
Ejemplo n.º 6
0
def get_scalability(num_s):
    process = psutil.Process(os.getpid())
    scores = []

    # Setup Block
    encoders = []
    pattern_poolers = []
    sequence_learners = []
    encoder = ScalarEncoder(min_val=0.0, max_val=1.0, num_s=1024, num_as=128)
    pattern_pooler = PatternPooler(num_s=num_s,
                                   num_as=8,
                                   pct_pool=0.8,
                                   pct_conn=0.8,
                                   pct_learn=0.25)
    sequence_learner = SequenceLearner(num_spc=10,
                                       num_dps=10,
                                       num_rpd=12,
                                       d_thresh=6)
    pattern_pooler.input.add_child(encoder.output)
    sequence_learner.input.add_child(pattern_pooler.output)
    num_coincidence_detectors = num_s + (num_s * 10 * 10
                                         )  # pattern_pooler + sequence_learner

    # Get initialization time and memory usage
    t0 = time.time()
    encoder.compute(data[0])
    pattern_pooler.compute(learn=True)
    sequence_learner.compute(learn=True)
    score = sequence_learner.get_score()
    scores.append(score)
    t1 = time.time()
    init_time = t1 - t0
    num_bytes = process.memory_info().rss

    # Get compute time
    t0 = time.time()
    for i in range(1, len(data)):
        encoder.compute(data[i])
        pattern_pooler.compute(learn=True)
        sequence_learner.compute(learn=True)
        score = sequence_learner.get_score()
        scores.append(score)
    t1 = time.time()
    comp_time = t1 - t0

    # Test Results
    np.testing.assert_array_equal(np.array(scores), expected_scores)

    return [num_coincidence_detectors, num_bytes, init_time, comp_time]
Ejemplo n.º 7
0
class AnomalyDetectorPersist():
    def __init__(
            self,
            min_val=0.0,  # minimum input value
            max_val=1.0,  # maximum input value
            max_step=8,  # maximum persistence step
            num_i=1024,  # number of input statelets
            num_ai=128,  # number of active input statelets
            num_s=512,  # number of statelets
            num_as=8,  # number of active statelets
            num_spc=10,  # number of statelets per column
            num_dps=10,  # number of dendrites per statelet
            num_rpd=12,  # number of receptors per dendrite
            d_thresh=6,  # dendrite threshold
            pct_pool=0.8,  # pooling percentage
            pct_conn=0.5,  # initially connected percentage
            pct_learn=0.3):  # learn percentage

        PERM_THR = 20
        PERM_INC = 2
        PERM_DEC = 1

        num_i_half = int(num_i / 2)
        num_ai_half = int(num_ai / 2)

        self.min_val = min_val
        self.max_val = max_val

        # seed the random number generator
        #bb.seed(0) # TODO: fix seeding

        self.st = ScalarTransformer(min_val, max_val, num_i_half, num_ai_half)

        self.pt = PersistenceTransformer(min_val, max_val, num_i_half,
                                         num_ai_half, max_step)

        self.pp = PatternPooler(num_s, num_as, PERM_THR, PERM_INC, PERM_DEC,
                                pct_pool, pct_conn, pct_learn)

        self.sl = SequenceLearner(num_s, num_spc, num_dps, num_rpd, d_thresh,
                                  PERM_THR, PERM_INC, PERM_DEC)

        self.pp.input.add_child(self.st.output, 0)
        self.pp.input.add_child(self.pt.output, 0)
        self.sl.input.add_child(self.pp.output, 0)

        self.pp.init()
        self.sl.init()

    #def save(self, path='./', name='detector'):
    #    self.pp.save(path + name + "_pp.bin")
    #    self.sl.save(path + name + "_sl.bin")

    #def load(self, path='./', name='detector'):
    #    self.pp.load(path + name + "_pp.bin")
    #    self.sl.load(path + name + "_sl.bin")

    def feedforward(self, value=0.0, learn=True):

        in_bounds = True

        if value < self.min_val or value > self.max_val:
            in_bounds = False

        self.st.set_value(value)
        self.pt.set_value(value)
        self.st.feedforward()
        self.pt.feedforward()
        self.pp.feedforward(learn)
        self.sl.feedforward(learn)

        #print(self.pp.input.acts)
        #print(self.pt.output.acts)
        #print()

        if in_bounds:
            anom = self.sl.get_anomaly_score()
        else:
            anom = 1.0

        return anom
from brainblocks.blocks import ScalarEncoder, PatternPooler, SequenceLearner

# define data and scores
num_inputs = 20
inputs_0 = [0.0, 0.2, 0.4, 0.6, 0.8, 0.0, 0.2, 0.4, 0.6, 0.8, 0.0, 0.2, 0.4, 0.6, 0.8, 0.0, 0.2, 0.4, 0.6, 0.8]
inputs_1 = [0.0, 0.2, 0.4, 0.6, 0.8, 0.8, 0.6, 0.4, 0.2, 0.0, 0.0, 0.2, 0.4, 0.6, 0.8, 0.8, 0.6, 0.4, 0.2, 0.0]
scores_0 = [0 for i in range(num_inputs)]
scores_1 = [0 for i in range(num_inputs)]
scores_2 = [0 for i in range(num_inputs)]

# define blocks
se_0 = ScalarEncoder(num_s=500, num_as=50)
se_1 = ScalarEncoder(num_s=500, num_as=50)
pp_0 = PatternPooler(num_s=250, num_as=8)
pp_1 = PatternPooler(num_s=250, num_as=8)
pp_2 = PatternPooler(num_s=250, num_as=8)
sl_0 = SequenceLearner()
sl_1 = SequenceLearner()
sl_2 = SequenceLearner()

# connect blocks
pp_0.input.add_child(se_0.output)
pp_1.input.add_child(se_1.output)
pp_2.input.add_child(pp_0.output)
pp_2.input.add_child(pp_1.output)
sl_0.input.add_child(pp_0.output)
sl_1.input.add_child(pp_1.output)
sl_2.input.add_child(pp_2.output)

# loop through data
for i in range(num_inputs):
Ejemplo n.º 9
0
    1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0,
    0.0, 0.0, 0.2, 0.0, 0.0
]  # <-- abnormality is 0.2

# define blocks
e = ScalarEncoder(
    min_val=0.0,  # minimum input value
    max_val=1.0,  # maximum input value
    num_s=1000,  # number of statelets
    num_as=100)  # number of active statelets

pp = PatternPooler(
    num_s=500,  # number of statelets
    num_as=8,  # number of active statelets
    perm_thr=20,  # receptor permanence threshold
    perm_inc=2,  # receptor permanence increment
    perm_dec=1,  # receptor permanence decrement
    pct_pool=0.8,  # pooling percentage
    pct_conn=0.5,  # initially connected percentage
    pct_learn=0.25)  # learn percentage

sl = SequenceLearner(
    num_spc=10,  # number of statelets per column
    num_dps=10,  # number of coincidence detectors per statelet
    num_rpd=12,  # number of receptors per coincidence detector
    d_thresh=6,  # coincidence detector threshold
    perm_thr=1,  # receptor permanence threshold
    perm_inc=1,  # receptor permanence increment
    perm_dec=0)  # receptor permanence decrement

# connect blocks
Ejemplo n.º 10
0
class AbnormalityDetector():
    def __init__(
            self,
            configs=(),      # block configuration
            min_val=-1.0,    # minumum value
            max_val=1.0,     # maximum value
            num_i=1024,      # ScalarEncoder number of statelets
            num_ai=128,      # ScalarEncoder number of active statelets
            num_s=512,       # PatternPooler number of statelets
            num_as=8,        # PatternPooler number of active statelets
            num_spc=10,      # SequenceLearner number of statelets per column
            num_dps=10,      # SequenceLearner number of coincidence detectors per statelet
            num_rpd=12,      # SequenceLearner number of receptors per coincidence detector
            d_thresh=6,      # SequenceLearner coincidence detector threshold
            pct_pool=0.8,    # PatternPooler pool percentage
            pct_conn=0.5,    # PatternPooler initial connection percentage
            pct_learn=0.25): # PatternPooler learn percentage

        self.min_val = min_val
        self.max_val = max_val

        # seed the random number generator
        bb.seed(0)

        # build blocks from config descriptions if given
        blocks = get_blocks(configs)
        self.encoders = blocks["encoders"]
        self.pp = blocks["pattern_pooler"]
        self.sl = blocks["sequence_learner"]

        if len(self.encoders) == 0:
            self.encoders.append(ScalarEncoder(min_val, max_val, num_i, num_ai))

        if self.pp == None:
            self.pp = PatternPooler(num_s, num_as, 20, 2, 1, pct_pool, pct_conn, pct_learn)

        if self.sl == None:
            self.sl = SequenceLearner(num_spc, num_dps, num_rpd, d_thresh, 1, 1, 0)

        for encoder in self.encoders:
            self.pp.input.add_child(encoder.output)

        self.sl.input.add_child(self.pp.output)
        
        self.initialized = False

    def print_parameters(self):
        for encoder in self.encoders:
            encoder.print_parameters()
        self.pp.print_parameters()
        self.sl.print_parameters()

    def save_memories(self, path='./', name='detector'):
        self.pp.save_memories(path + name + "_pp.bin")
        self.sl.save_memories(path + name + "_sl.bin")

    def load_memories(self, path='./', name='detector'):
        self.pp.load_memories(path + name + "_pp.bin")
        self.sl.load_memories(path + name + "_sl.bin")

    def compute(self, vectors=(), learn=True):
        anoms = []
        num_steps = 0xFFFFFFFF
        num_measurands = len(vectors)
        num_encoders = len(self.encoders)

        if num_measurands != num_encoders:
            print("Warning: compute() num_measurands != num_encoders")
            return anoms

        for vector in vectors:
            len_vector = len(vector)
            if len_vector < num_steps:
                num_steps = len_vector

        for e in range(num_encoders):
            if isinstance(self.encoders[e], PersistenceEncoder):
                self.encoders[e].reset()

        limit_flag = 0
        for s in range(num_steps):
            for e in range(num_encoders):
                value = vectors[e][s]
                if value < self.min_val or value > self.max_val:
                    limit_flag = 1              
                self.encoders[e].compute(value)
            self.pp.compute(learn)
            self.sl.compute(learn)

            if limit_flag == 1:
                anoms.append(1.0)
            else:
                anoms.append(self.sl.get_score())

        self.initialized = True

        return anoms
Ejemplo n.º 11
0
def test_pattern_pooler():
    e = SymbolsEncoder(
        max_symbols=8,  # maximum number of symbols
        num_s=1024)  # number of statelets

    pp = PatternPooler(
        num_s=512,  # number of statelets
        num_as=8,  # number of active statelets
        perm_thr=20,  # receptor permanence threshold
        perm_inc=2,  # receptor permanence increment
        perm_dec=1,  # receptor permanence decrement
        pct_pool=0.8,  # pooling percentage
        pct_conn=0.5,  # initially connected percentage
        pct_learn=0.25)  # learn percentage

    pp.input.add_child(e.output)

    e.compute(value=0)
    pp.compute(learn=False)
    before_a = pp.output.bits

    e.compute(value=1)
    pp.compute(learn=False)
    before_b = pp.output.bits

    for _ in range(10):
        e.compute(value=0)
        pp.compute(learn=True)
        e.compute(value=1)
        pp.compute(learn=True)

    e.compute(value=0)
    pp.compute(learn=False)
    after_a = pp.output.bits

    e.compute(value=1)
    pp.compute(learn=False)
    after_b = pp.output.bits

    np.testing.assert_array_equal(before_a, after_a)
    np.testing.assert_array_equal(before_b, after_b)
Ejemplo n.º 12
0
    0.0, 0.2, 0.4, 0.6, 0.8
]

values1 = [
    0.0, 0.2, 0.4, 0.6, 0.8, 0.8, 0.6, 0.4, 0.2, 0.0, 0.0, 0.2, 0.4, 0.6, 0.8,
    0.8, 0.6, 0.4, 0.2, 0.0
]

scores0 = [0 for i in range(num_values)]
scores1 = [0 for i in range(num_values)]
scores2 = [0 for i in range(num_values)]

# Setup blocks
st0 = ScalarTransformer(num_s=500, num_as=50)
st1 = ScalarTransformer(num_s=500, num_as=50)
pp0 = PatternPooler(num_s=250, num_as=8)
pp1 = PatternPooler(num_s=250, num_as=8)
pp2 = PatternPooler(num_s=250, num_as=8)
sl0 = SequenceLearner()
sl1 = SequenceLearner()
sl2 = SequenceLearner()

# Connect blocks
pp0.input.add_child(st0.output, 0)
pp1.input.add_child(st1.output, 0)
pp2.input.add_child(pp0.output, 0)
pp2.input.add_child(pp1.output, 0)
sl0.input.add_child(pp0.output, 0)
sl1.input.add_child(pp1.output, 0)
sl2.input.add_child(pp2.output, 0)
Ejemplo n.º 13
0
def test_pattern_pooler():
    e = SymbolsEncoder(max_symbols=8, num_s=1024)

    pp = PatternPooler(num_s=512,
                       num_as=8,
                       perm_thr=20,
                       perm_inc=2,
                       perm_dec=1,
                       pct_pool=0.8,
                       pct_conn=0.5,
                       pct_learn=0.25)

    pp.input.add_child(e.output)

    e.compute(0)
    pp.compute(False)
    before_a = pp.output.get_bits()

    e.compute(1)
    pp.compute(False)
    before_b = pp.output.get_bits()

    for _ in range(10):
        e.compute(0)
        pp.compute(True)
        e.compute(1)
        pp.compute(True)

    e.compute(0)
    pp.compute(False)
    after_a = pp.output.get_bits()

    e.compute(1)
    pp.compute(False)
    after_b = pp.output.get_bits()

    np.testing.assert_array_equal(before_a, after_a)
    np.testing.assert_array_equal(before_b, after_b)