Exemple #1
0
def get_scalability(num_s):
    process = psutil.Process(os.getpid())
    scores = []

    # Setup Block
    encoders = []
    pattern_poolers = []
    sequence_learners = []
    encoder = ScalarEncoder(min_val=0.0, max_val=1.0, num_s=1024, num_as=128)
    pattern_pooler = PatternPooler(num_s=num_s,
                                   num_as=8,
                                   pct_pool=0.8,
                                   pct_conn=0.8,
                                   pct_learn=0.25)
    sequence_learner = SequenceLearner(num_spc=10,
                                       num_dps=10,
                                       num_rpd=12,
                                       d_thresh=6)
    pattern_pooler.input.add_child(encoder.output)
    sequence_learner.input.add_child(pattern_pooler.output)
    num_coincidence_detectors = num_s + (num_s * 10 * 10
                                         )  # pattern_pooler + sequence_learner

    # Get initialization time and memory usage
    t0 = time.time()
    encoder.compute(data[0])
    pattern_pooler.compute(learn=True)
    sequence_learner.compute(learn=True)
    score = sequence_learner.get_score()
    scores.append(score)
    t1 = time.time()
    init_time = t1 - t0
    num_bytes = process.memory_info().rss

    # Get compute time
    t0 = time.time()
    for i in range(1, len(data)):
        encoder.compute(data[i])
        pattern_pooler.compute(learn=True)
        sequence_learner.compute(learn=True)
        score = sequence_learner.get_score()
        scores.append(score)
    t1 = time.time()
    comp_time = t1 - t0

    # Test Results
    np.testing.assert_array_equal(np.array(scores), expected_scores)

    return [num_coincidence_detectors, num_bytes, init_time, comp_time]
Exemple #2
0
def test_sequence_learner():
    e = ScalarEncoder(min_val=0.0, max_val=1.0, num_s=64, num_as=8)

    sl = SequenceLearner(num_spc=10,
                         num_dps=10,
                         num_rpd=12,
                         d_thresh=6,
                         perm_thr=1,
                         perm_inc=1,
                         perm_dec=0)

    sl.input.add_child(e.output)

    data = [
        0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0,
        0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0,
        1.0, 1.0
    ]

    expect_scores = np.array([
        1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0,
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
        0.0, 0.0
    ])

    actual_scores = np.array([0.0 for i in range(len(data))])

    for i in range(len(data)):
        e.compute(data[i])
        sl.compute(True)
        actual_scores[i] = sl.get_score()

    np.testing.assert_array_equal(actual_scores, expect_scores)
Exemple #3
0
def test_sequence_learner_sine():
    e = ScalarEncoder(
        min_val=0.0,  # minimum input value
        max_val=1.0,  # maximum input value
        num_s=64,  # number of statelets
        num_as=8)  # number of active statelets

    sl = SequenceLearner(
        num_spc=10,  # number of statelets per column
        num_dps=10,  # number of coincidence detectors per statelet
        num_rpd=24,  # number of receptors per coincidence detector
        d_thresh=6,  # coincidence detector threshold
        perm_thr=1,  # receptor permanence threshold
        perm_inc=1,  # receptor permanence increment
        perm_dec=0)  # receptor permanence decrement

    sl.input.add_child(e.output)

    values = [
        0.50, 0.79, 0.98, 0.98, 0.79, 0.50, 0.21, 0.02, 0.02, 0.21, 0.50, 0.79,
        0.98, 0.98, 0.79, 0.50, 0.21, 0.02, 0.02, 0.21, 0.50, 0.79, 0.98, 0.98,
        0.79, 0.50, 0.21, 0.02, 0.02, 0.21
    ]

    actual_scores = np.array([0.0 for i in range(len(values))])

    expect_scores = np.array([
        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0,
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
        0.0, 0.0
    ])

    for i in range(len(values)):
        e.compute(value=values[i])
        sl.compute(learn=True)
        actual_scores[i] = sl.get_score()

    np.testing.assert_array_equal(actual_scores, expect_scores)
Exemple #4
0
        pct_conn=0.8,  # initially connected percentage
        pct_learn=0.25)  # learn percentage 0.25

    sl.input.add_child(e.output)
    pc.input.add_child(sl.output)

    aed = AbnormalEventDetector(5, 5)

    print('val  scr  lbl  prob  ae  output_active_statelets')

    for i in range(len(values)):
        e.compute(value=values[i])
        sl.compute(learn=True)
        pc.compute(learn=False)

        score = sl.get_score()
        probs = pc.get_probabilities()

        abnormal_event = aed.compute(score)

        if abnormal_event:
            for _ in range(50):
                pc.compute(label=new_label, learn=True)
            new_label += 1

        winner = np.argmax(probs)
        winner_str = '-'
        #winner_str = str(winner)
        if probs[winner] >= 0.75:
            winner_str = str(winner)
pp_0.input.add_child(se_0.output)
pp_1.input.add_child(se_1.output)
pp_2.input.add_child(pp_0.output)
pp_2.input.add_child(pp_1.output)
sl_0.input.add_child(pp_0.output)
sl_1.input.add_child(pp_1.output)
sl_2.input.add_child(pp_2.output)

# loop through data
for i in range(num_inputs):

    # compute
    se_0.compute(inputs_0[i])
    se_1.compute(inputs_1[i])
    pp_0.compute()
    pp_1.compute()
    pp_2.compute()
    sl_0.compute()
    sl_1.compute()
    sl_2.compute()

    # get scores
    scores_0[i] = sl_0.get_score()
    scores_1[i] = sl_1.get_score()
    scores_2[i] = sl_2.get_score()

# print output
print("in0, in1, sc0, sc1, sc2")
for i in range(num_inputs):
    print("%0.1f, %0.1f, %0.1f, %0.1f, %0.1f" % (inputs_0[i], inputs_1[i], scores_0[i], scores_1[i], scores_2[i]))
Exemple #6
0
def multiple_prior_contexts():
    experiment_name = 'multiple_prior_contexts'
    directory = './' + experiment_name
    mkdir_p(directory)
    print()
    print('experiment=\'%s\'' % (experiment_name))

    e = SymbolsEncoder(
        max_symbols=26,  # maximum number of symbols
        num_s=208)  # number of statelets

    sl = SequenceLearner(
        num_spc=10,  # number of statelets per column
        num_dps=50,  # number of coincidence detectors per statelet
        num_rpd=12,  # number of receptors per coincidence detector
        d_thresh=6,  # coincidence detector threshold
        perm_thr=1,  # receptor permanence threshold
        perm_inc=1,  # receptor permanence increment
        perm_dec=0)  # receptor permanence decrement

    sl.input.add_child(e.output)

    values = [
        'a', 'z', 'a', 'z', 'a', 'z', 'b', 'z', 'b', 'z', 'b', 'z', 'c', 'z',
        'c', 'z', 'c', 'z', 'd', 'z', 'd', 'z', 'd', 'z', 'e', 'z', 'e', 'z',
        'e', 'z', 'f', 'z', 'f', 'z', 'f', 'z', 'g', 'z', 'g', 'z', 'g', 'z',
        'h', 'z', 'h', 'z', 'h', 'z', 'i', 'z', 'i', 'z', 'i', 'z', 'j', 'z',
        'j', 'z', 'j', 'z', 'k', 'z', 'k', 'z', 'k', 'z', 'l', 'z', 'l', 'z',
        'l', 'z', 'm', 'z', 'm', 'z', 'm', 'z', 'n', 'z', 'n', 'z', 'n', 'z',
        'o', 'z', 'o', 'z', 'o', 'z', 'p', 'z', 'p', 'z', 'p', 'z', 'q', 'z',
        'q', 'z', 'q', 'z', 'r', 'z', 'r', 'z', 'r', 'z', 's', 'z', 's', 'z',
        's', 'z', 't', 'z', 't', 'z', 't', 'z', 'u', 'z', 'u', 'z', 'u', 'z',
        'v', 'z', 'v', 'z', 'v', 'z', 'w', 'z', 'w', 'z', 'w', 'z', 'x', 'z',
        'x', 'z', 'x', 'z', 'y', 'z', 'y', 'z', 'y', 'z'
    ]

    le = preprocessing.LabelEncoder()
    le.fit(values)
    int_values = le.transform(values)

    scores = [0.0 for _ in range(len(values))]
    count_s_acts = [0 for _ in range(len(values))]
    count_s_hist = [0 for _ in range(len(values))]
    count_cs = [0 for _ in range(len(values))]
    hidden_s_usage = [0 for _ in range(2240)]
    output_s_usage = [0 for _ in range(2240)]

    print('val  scr  s_act  s_his    cs  active output statelets')

    for i in range(len(int_values)):
        e.compute(value=int_values[i])
        sl.compute(learn=True)

        # update information
        hidden_s_bits = sl.hidden.bits
        hidden_s_acts = sl.hidden.acts
        output_s_bits = sl.output.bits
        output_s_acts = sl.output.acts
        scores[i] = sl.get_score()
        count_s_acts[i] = len(output_s_acts)
        count_s_hist[i] = sl.get_historical_count()
        count_cs[i] = sl.get_coincidence_set_count()

        # update statelet usage
        for s in range(len(output_s_usage)):
            hidden_s_usage[s] += hidden_s_bits[s]
            output_s_usage[s] += output_s_bits[s]

        # plot statelets
        if (i + 1) % 6 == 0:
            title = values[i] + '_' + values[i - 1]
            plot_statelets(directory, 'hidden_' + title, hidden_s_bits)
            plot_statelets(directory, 'output_' + title, output_s_bits)

        # print information
        output_s_acts_str = '[' + ', '.join(
            str(act).rjust(4) for act in output_s_acts) + ']'
        print('{0:>3}  {1:0.1f}  {2:5d}  {3:5d}  {4:4d}  {5:>4}'.format(
            values[i], scores[i], count_s_acts[i], count_s_hist[i],
            count_cs[i], output_s_acts_str))

    # plot information
    plot_results(directory, 'results', values, scores, count_s_acts,
                 count_s_hist, count_cs, 600, 2200)
    plot_statelet_usage(directory, 'hidden', hidden_s_usage, 75)
    plot_statelet_usage(directory, 'output', output_s_usage, 75)
    '''
class AbnormalityDetector():
    def __init__(
            self,
            configs=(),      # block configuration
            min_val=-1.0,    # minumum value
            max_val=1.0,     # maximum value
            num_i=1024,      # ScalarEncoder number of statelets
            num_ai=128,      # ScalarEncoder number of active statelets
            num_s=512,       # PatternPooler number of statelets
            num_as=8,        # PatternPooler number of active statelets
            num_spc=10,      # SequenceLearner number of statelets per column
            num_dps=10,      # SequenceLearner number of coincidence detectors per statelet
            num_rpd=12,      # SequenceLearner number of receptors per coincidence detector
            d_thresh=6,      # SequenceLearner coincidence detector threshold
            pct_pool=0.8,    # PatternPooler pool percentage
            pct_conn=0.5,    # PatternPooler initial connection percentage
            pct_learn=0.25): # PatternPooler learn percentage

        self.min_val = min_val
        self.max_val = max_val

        # seed the random number generator
        bb.seed(0)

        # build blocks from config descriptions if given
        blocks = get_blocks(configs)
        self.encoders = blocks["encoders"]
        self.pp = blocks["pattern_pooler"]
        self.sl = blocks["sequence_learner"]

        if len(self.encoders) == 0:
            self.encoders.append(ScalarEncoder(min_val, max_val, num_i, num_ai))

        if self.pp == None:
            self.pp = PatternPooler(num_s, num_as, 20, 2, 1, pct_pool, pct_conn, pct_learn)

        if self.sl == None:
            self.sl = SequenceLearner(num_spc, num_dps, num_rpd, d_thresh, 1, 1, 0)

        for encoder in self.encoders:
            self.pp.input.add_child(encoder.output)

        self.sl.input.add_child(self.pp.output)
        
        self.initialized = False

    def print_parameters(self):
        for encoder in self.encoders:
            encoder.print_parameters()
        self.pp.print_parameters()
        self.sl.print_parameters()

    def save_memories(self, path='./', name='detector'):
        self.pp.save_memories(path + name + "_pp.bin")
        self.sl.save_memories(path + name + "_sl.bin")

    def load_memories(self, path='./', name='detector'):
        self.pp.load_memories(path + name + "_pp.bin")
        self.sl.load_memories(path + name + "_sl.bin")

    def compute(self, vectors=(), learn=True):
        anoms = []
        num_steps = 0xFFFFFFFF
        num_measurands = len(vectors)
        num_encoders = len(self.encoders)

        if num_measurands != num_encoders:
            print("Warning: compute() num_measurands != num_encoders")
            return anoms

        for vector in vectors:
            len_vector = len(vector)
            if len_vector < num_steps:
                num_steps = len_vector

        for e in range(num_encoders):
            if isinstance(self.encoders[e], PersistenceEncoder):
                self.encoders[e].reset()

        limit_flag = 0
        for s in range(num_steps):
            for e in range(num_encoders):
                value = vectors[e][s]
                if value < self.min_val or value > self.max_val:
                    limit_flag = 1              
                self.encoders[e].compute(value)
            self.pp.compute(learn)
            self.sl.compute(learn)

            if limit_flag == 1:
                anoms.append(1.0)
            else:
                anoms.append(self.sl.get_score())

        self.initialized = True

        return anoms
Exemple #8
0
def three_events(statelet_snapshots_on=False):
    experiment_name = 'three_events'
    directory = './' + experiment_name
    mkdir_p(directory)
    print()
    print('experiment=\'%s\'' % (experiment_name))

    NUM_S = 208
    NUM_SPC = 10
    TOTAL_NUM_S = NUM_S * NUM_SPC

    e = SymbolsEncoder(
        max_symbols=26,  # maximum number of symbols
        num_s=NUM_S)  # number of statelets

    sl = SequenceLearner(
        num_spc=NUM_SPC,  # number of statelets per column
        num_dps=50,  # number of coincidence detectors per statelet
        num_rpd=12,  # number of receptors per coincidence detector
        d_thresh=6,  # coincidence detector threshold
        perm_thr=1,  # receptor permanence threshold
        perm_inc=1,  # receptor permanence increment
        perm_dec=0)  # receptor permanence decrement

    sl.input.add_child(e.output)

    values = [
        'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'c', 'd', 'e',
        'f', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'f', 'e', 'd',
        'c', 'b', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'c',
        'd', 'c', 'b', 'a', 'a', 'a', 'a', 'a'
    ]

    le = preprocessing.LabelEncoder()
    le.fit(values)
    int_values = le.transform(values)

    scores = [0.0 for _ in range(len(values))]
    count_s_output_acts = [0 for _ in range(len(values))]
    count_s_hidden_acts = [0 for _ in range(len(values))]
    count_s_hist = [0 for _ in range(len(values))]
    count_cs = [0 for _ in range(len(values))]
    hidden_s_usage = [0 for _ in range(TOTAL_NUM_S)]
    output_s_usage = [0 for _ in range(TOTAL_NUM_S)]

    print('val  scr  s_act  s_his    cs  active output statelets')

    for i in range(len(int_values)):
        e.compute(value=int_values[i])
        sl.compute(learn=True)

        # update information
        hidden_s_bits = sl.hidden.bits
        hidden_s_acts = sl.hidden.acts
        output_s_bits = sl.output.bits
        output_s_acts = sl.output.acts
        scores[i] = sl.get_score()
        count_s_output_acts[i] = len(output_s_acts)
        count_s_hidden_acts[i] = len(hidden_s_acts)
        count_s_hist[i] = sl.get_historical_count()
        count_cs[i] = sl.get_coincidence_set_count()

        # update statelet usage
        for s in range(len(output_s_usage)):
            hidden_s_usage[s] += hidden_s_bits[s]
            output_s_usage[s] += output_s_bits[s]

        # plot statelets
        if statelet_snapshots_on and (i + 1) % 5 == 0 or i == 43:
            title = 'step_' + str(i) + '_' + values[i] + '_' + values[i - 1]
            plot_statelets(directory, 'hidden_' + title, hidden_s_bits)
            plot_statelets(directory, 'output_' + title, output_s_bits)

        # print information
        output_s_acts_str = '[' + ', '.join(
            str(act).rjust(4) for act in output_s_acts) + ']'
        print('{0:>3}  {1:0.1f}  {2:5d}  {3:5d}  {4:4d}  {5:>4}'.format(
            values[i], scores[i], count_s_output_acts[i], count_s_hist[i],
            count_cs[i], output_s_acts_str))

    # plot information
    plot_results(directory, 'results', values, scores, count_s_output_acts,
                 count_s_hidden_acts, count_s_hist, count_cs, 400, 400)
    plot_statelet_usage(directory, 'hidden', hidden_s_usage, 75)
    plot_statelet_usage(directory, 'output', output_s_usage, 75)