Exemple #1
0
def test_subset(rng):
    v1 = Vocabulary(32, rng=rng)
    v1.parse('A+B+C+D+E+F+G')

    # Test creating a vocabulary subset
    v2 = v1.create_subset(['A', 'C', 'E'])
    assert v2.keys == ['A', 'C', 'E']
    assert v2['A'] == v1['A']
    assert v2['C'] == v1['C']
    assert v2['E'] == v1['E']
    assert v2.parent is v1

    # Test creating a subset from a subset (it should create off the parent)
    v3 = v2.create_subset(['C', 'E'])
    assert v3.parent is v2.parent and v2.parent is v1

    v3.include_pairs = True
    assert v3.key_pairs == ['C*E']
    assert not v1.include_pairs
    assert not v2.include_pairs

    # Test transform_to between subsets (should be identity transform)
    t = v1.transform_to(v2)

    assert v2.parse('A').compare(np.dot(t, v1.parse('A').v)) >= 0.99999999
Exemple #2
0
def test_subset(rng):
    v1 = Vocabulary(32, rng=rng)
    v1.parse("A+B+C+D+E+F+G")

    # Test creating a vocabulary subset
    v2 = v1.create_subset(["A", "C", "E"])
    assert v2.keys == ["A", "C", "E"]
    assert v2["A"] == v1["A"]
    assert v2["C"] == v1["C"]
    assert v2["E"] == v1["E"]
    assert v2.parent is v1

    # Test creating a subset from a subset (it should create off the parent)
    v3 = v2.create_subset(["C", "E"])
    assert v3.parent is v2.parent and v2.parent is v1

    v3.include_pairs = True
    assert v3.key_pairs == ["C*E"]
    assert not v1.include_pairs
    assert not v2.include_pairs

    # Test transform_to between subsets (should be identity transform)
    t = v1.transform_to(v2)

    assert v2.parse("A").compare(np.dot(t, v1.parse("A").v)) >= 0.99999999
Exemple #3
0
class SpaunVocabulary(object):
    def __init__(self):
        self.main = None

        self.sp_dim = 512
        self.mtr_dim = 50
        self.vis_dim = 200

        # ############ Semantic pointer (strings) definitions #################
        # --- Numerical semantic pointers ---
        self.num_sp_strs = ['ZER', 'ONE', 'TWO', 'THR', 'FOR',
                            'FIV', 'SIX', 'SEV', 'EIG', 'NIN']
        self.n_num_sp = len(self.num_sp_strs)

        # --- Task semantic pointer list ---
        # W - Drawing (Copying visual input)
        # R - Recognition
        # L - Learning (Bandit Task)
        # M - Memory (forward serial recall)
        # C - Counting
        # A - Answering
        # V - Rapid Variable Creation
        # F - Fluid Induction (Ravens)
        # X - Task precursor
        # DEC - Decoding task (output to motor system)
        self.ps_task_sp_strs = ['W', 'R', 'L', 'M', 'C', 'A', 'V', 'F', 'X',
                                'DEC']
        self.ps_task_vis_sp_strs = ['A', 'C', 'F', 'K', 'L', 'M', 'P', 'R',
                                    'V', 'W']
        # --- Task visual semantic pointer usage ---
        # A - Task initialization
        # F - Forward recall
        # R - Reverse recall
        # K - Q&A 'kind' probe
        # P - Q&A 'position' probe

        # --- Production system semantic pointers ---
        # DECW - Decoding state (output to motor system, but for drawing task)
        # DECI - Decoding state (output to motor system, but for inductn tasks)
        self.ps_state_sp_strs = ['QAP', 'QAK', 'TRANS0', 'TRANS1', 'TRANS2',
                                 'CNT0', 'CNT1', 'LEARN']
        self.ps_dec_sp_strs = ['FWD', 'REV', 'CNT', 'DECW', 'DECI', 'NONE']

        # --- Misc actions semantic pointers
        self.ps_action_sp_strs = None
        self.min_num_ps_actions = 3

        # --- Misc visual semantic pointers ---
        self.misc_vis_sp_strs = ['OPEN', 'CLOSE', 'SPACE', 'QM']

        # --- Misc state semantic pointers ---
        self.misc_ps_sp_strs = ['MATCH', 'NO_MATCH']

        # --- 'I don't know' motor response vector
        self.mtr_sp_strs = ['UNK']

        # --- List of all visual semantic pointers ---
        self.vis_sp_strs = list(self.num_sp_strs)
        self.vis_sp_strs.extend(self.misc_vis_sp_strs)
        self.vis_sp_strs.extend(self.ps_task_vis_sp_strs)

        # --- Position (enumerated) semantic pointers ---
        self.pos_sp_strs = None
        self.max_enum_list_pos = 8

        # --- Operations semantic pointers
        self.ops_sp_strs = ['ADD', 'INC']

        # --- Reward semantic pointers
        self.reward_n_sp_str = self.num_sp_strs[0]
        self.reward_y_sp_str = self.num_sp_strs[1]
        self.reward_sp_strs = [self.reward_n_sp_str, self.reward_y_sp_str]

    def write_header(self):
        logger.write('# Spaun Vocabulary Options:\n')
        logger.write('# -------------------------\n')
        for param_name in sorted(self.__dict__.keys()):
            param_value = getattr(self, param_name)
            if not callable(param_value) and not isinstance(param_value, list)\
               and not isinstance(param_value, Vocabulary) \
               and not isinstance(param_value, SemanticPointer) \
               and not isinstance(param_value, np.ndarray):
                logger.write('# - %s = %s\n' % (param_name, param_value))
        logger.write('\n')

    def initialize(self, num_learn_actions=3, rng=0):
        if rng == 0:
            rng = np.random.RandomState(int(time.time()))

        # ############### Semantic pointer list definitions ###################
        # --- Position (enumerated) semantic pointers ---
        self.pos_sp_strs = ['POS%i' % (i + 1)
                            for i in range(self.max_enum_list_pos)]

        # --- Unitary semantic pointers
        self.unitary_sp_strs = [self.num_sp_strs[0], self.pos_sp_strs[0]]
        self.unitary_sp_strs.extend(self.ops_sp_strs)

        # --- Production system (action) semantic pointers ---
        self.ps_action_learn_sp_strs = ['A%d' % (i + 1) for i in
                                        range(num_learn_actions)]
        self.ps_action_misc_sp_strs = []
        self.ps_action_sp_strs = (self.ps_action_learn_sp_strs +
                                  self.ps_action_misc_sp_strs)
        # #################### Vocabulary definitions #########################
        # --- Primary vocabulary ---
        self.main = Vocabulary(self.sp_dim, unitary=self.unitary_sp_strs,
                               rng=rng)

        # --- Add numerical sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[0], self.num_sp_strs[0]))
        add_sp = self.main[self.ops_sp_strs[0]]
        num_sp = self.main[self.num_sp_strs[0]].copy()
        for i in range(len(self.num_sp_strs) - 1):
            num_sp = num_sp.copy() * add_sp
            self.main.add(self.num_sp_strs[i + 1], num_sp)

        self.add_sp = add_sp

        # --- Add positional sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[1], self.pos_sp_strs[0]))
        inc_sp = self.main[self.ops_sp_strs[1]]
        pos_sp = self.main[self.pos_sp_strs[0]].copy()
        for i in range(len(self.pos_sp_strs) - 1):
            pos_sp = pos_sp.copy() * inc_sp
            self.main.add(self.pos_sp_strs[i + 1], pos_sp)

        self.inc_sp = inc_sp

        # --- Add other visual sp's ---
        self.main.parse('+'.join(self.misc_vis_sp_strs))
        self.main.parse('+'.join(self.ps_task_vis_sp_strs))

        # --- Add production system sp's ---
        self.main.parse('+'.join(self.ps_task_sp_strs))
        self.main.parse('+'.join(self.ps_state_sp_strs))
        self.main.parse('+'.join(self.ps_dec_sp_strs))
        if len(self.ps_action_sp_strs) > 0:
            self.main.parse('+'.join(self.ps_action_sp_strs))
        self.main.parse('+'.join(self.misc_ps_sp_strs))

        # ################# Sub-vocabulary definitions ########################
        self.vis_main = self.main.create_subset(self.vis_sp_strs)

        self.pos = self.main.create_subset(self.pos_sp_strs)

        self.item = self.main.create_subset(self.num_sp_strs)

        self.ps_task = self.main.create_subset(self.ps_task_sp_strs)
        self.ps_state = self.main.create_subset(self.ps_state_sp_strs)
        self.ps_dec = self.main.create_subset(self.ps_dec_sp_strs)
        self.ps_cmp = self.main.create_subset(self.misc_ps_sp_strs)
        self.ps_action = self.main.create_subset(self.ps_action_sp_strs)
        self.ps_action_learn = \
            self.main.create_subset(self.ps_action_learn_sp_strs)

        self.reward = self.main.create_subset(self.reward_sp_strs)

        # ############ Enumerated vocabulary definitions ######################
        # --- Enumerated vocabulary, enumerates all possible combinations of
        #     position and item vectors (for debug purposes)
        self.enum = Vocabulary(self.sp_dim, rng=rng)
        for pos in self.pos_sp_strs:
            for num in self.num_sp_strs:
                sp_str = '%s*%s' % (pos, num)
                self.enum.add(sp_str, self.main.parse(sp_str))

        self.pos1 = Vocabulary(self.sp_dim, rng=rng)
        for num in self.num_sp_strs:
            sp_str = '%s*%s' % (self.pos_sp_strs[0], num)
            self.pos1.add(sp_str, self.main.parse(sp_str))

    def initialize_mtr_vocab(self, mtr_dim, mtr_sps):
        self.mtr_dim = mtr_dim

        self.mtr = Vocabulary(self.mtr_dim)
        for i, sp_str in enumerate(self.num_sp_strs):
            self.mtr.add(sp_str, mtr_sps[i, :])

        self.mtr_unk = Vocabulary(self.mtr_dim)
        self.mtr_unk.add(self.mtr_sp_strs[0], mtr_sps[-1, :])

        self.mtr_disp = self.mtr.create_subset(self.num_sp_strs)
        self.mtr_disp.readonly = False
        # Disable read-only flag for display vocab so that things can be added
        self.mtr_disp.add(self.mtr_sp_strs[0],
                          self.mtr_unk[self.mtr_sp_strs[0]].v)

    def initialize_vis_vocab(self, vis_dim, vis_sps):
        self.vis_dim = vis_dim

        self.vis = Vocabulary(self.vis_dim)
        for i, sp_str in enumerate(self.vis_sp_strs):
            self.vis.add(sp_str, vis_sps[i, :])
Exemple #4
0
def test_am_complex(Simulator, plt, seed, rng):
    """Complex auto-associative memory test.

    Has a default output vector, outputs utilities, and becomes inhibited.
    """
    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D+E+F')

    vocab2 = vocab.create_subset(["A", "B", "C", "D"])

    def input_func(t):
        if t < 0.25:
            return vocab.parse('A+0.8*B').v
        elif t < 0.5:
            return vocab.parse('0.8*A+B').v
        else:
            return vocab.parse('E').v

    def inhib_func(t):
        return int(t > 0.75)

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab2,
                               default_output_vector=vocab.parse("F").v,
                               inhibitable=True,
                               output_utilities=True,
                               output_thresholded_utilities=True)
        in_node = nengo.Node(output=input_func, label='input')
        inhib_node = nengo.Node(output=inhib_func, label='inhib')
        nengo.Connection(in_node, am.input)
        nengo.Connection(inhib_node, am.inhibit)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)
        utils_p = nengo.Probe(am.utilities, synapse=0.05)
        utils_th_p = nengo.Probe(am.thresholded_utilities, synapse=0.05)

    sim = Simulator(m)
    sim.run(1.0)
    t = sim.trange()
    # Input: A+0.8B
    more_a = (t >= 0.2) & (t < 0.25)
    # Input: 0.8B+A
    more_b = (t >= 0.45) & (t < 0.5)
    # Input: E (but E isn't in the memory vocabulary, so should output F)
    all_e = (t >= 0.7) & (t < 0.75)
    # Input: E (but inhibited, so should output nothing)
    inhib = (t >= 0.95)

    def plot(i, y, ylabel):
        plt.subplot(4, 1, i)
        plt.plot(t, y)
        plt.axvline(0.25, c='k')
        plt.axvline(0.5, c='k')
        plt.axvline(0.75, c='k')
        plt.ylabel(ylabel)
        plt.legend(vocab.keys[:y.shape[1]], loc='best', fontsize='xx-small')

    plot(1, nengo.spa.similarity(sim.data[in_p], vocab), "Input")
    plot(2, sim.data[utils_p], "Utilities")
    plot(3, sim.data[utils_th_p], "Thresholded utilities")
    plot(4, nengo.spa.similarity(sim.data[out_p], vocab), "Output")

    assert all(np.mean(sim.data[utils_p][more_a], axis=0)[:2] > [0.8, 0.5])
    assert all(np.mean(sim.data[utils_p][more_a], axis=0)[2:] < [0.01, 0.01])
    assert all(np.mean(sim.data[utils_p][more_b], axis=0)[:2] > [0.5, 0.8])
    assert all(np.mean(sim.data[utils_p][more_b], axis=0)[2:] < [0.01, 0.01])
    assert similarity(sim.data[utils_p][all_e], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[utils_p][inhib], np.ones((1, 4))) < 0.05
    assert all(np.mean(sim.data[utils_th_p][more_a], axis=0)[:2] > [0.8, 0.8])
    assert all(
        np.mean(sim.data[utils_th_p][more_a], axis=0)[2:] < [0.01, 0.01])
    assert all(np.mean(sim.data[utils_th_p][more_b], axis=0)[:2] > [0.8, 0.8])
    assert all(
        np.mean(sim.data[utils_th_p][more_b], axis=0)[2:] < [0.01, 0.01])
    assert similarity(sim.data[utils_th_p][all_e], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[utils_th_p][inhib], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[out_p][more_a], vocab.parse("A").v) > 0.8
    assert similarity(sim.data[out_p][more_a], vocab.parse("B").v) > 0.8
    assert similarity(sim.data[out_p][more_b], vocab.parse("A").v) > 0.8
    assert similarity(sim.data[out_p][more_b], vocab.parse("B").v) > 0.8
    assert similarity(sim.data[out_p][all_e], vocab.parse("F").v) > 0.8
    assert similarity(sim.data[out_p][inhib], np.ones((1, D))) < 0.05
Exemple #5
0
def make_mtr_sp(path_x, path_y):
    path_x = convert_func_2_diff_func(path_x)
    path_y = convert_func_2_diff_func(path_y)
    return np.concatenate((path_x, path_y))

mtr_vocab = Vocabulary(cfg.mtr_dim, rng=cfg.rng)
for i, sp_str in enumerate(num_sp_strs):
    mtr_sp_vec = make_mtr_sp(mtr_canon_paths_x[i, :], mtr_canon_paths_y[i, :])
    mtr_vocab.add(sp_str, mtr_sp_vec)

mtr_unk_vocab = Vocabulary(cfg.mtr_dim, rng=cfg.rng)
mtr_unk_vocab.add(mtr_sp_strs[0], make_mtr_sp(mtr_canon_paths_x[-1, :],
                                              mtr_canon_paths_y[-1, :]))

mtr_disp_vocab = mtr_vocab.create_subset(num_sp_strs)
mtr_disp_vocab.readonly = False  # Disable read-only flag for display vocab
mtr_disp_vocab.add(mtr_sp_strs[0], mtr_unk_vocab[mtr_sp_strs[0]].v)

mtr_sp_scale_factor = float(mtr_canon_paths['size_scaling_factor'])

# ##################### Sub-vocabulary definitions ############################
vis_vocab = vocab.create_subset(vis_sp_strs)
vis_vocab_nums_inds = range(len(num_sp_strs))
vis_vocab_syms_inds = range(len(num_sp_strs), len(vis_sp_strs))

pos_vocab = vocab.create_subset(pos_sp_strs)

item_vocab = vocab.create_subset(num_sp_strs)

ps_task_vocab = vocab.create_subset(ps_task_sp_strs)
Exemple #6
0
def test_am_complex(Simulator, plt, seed, rng):
    """Complex auto-associative memory test.

    Has a default output vector, outputs utilities, and becomes inhibited.
    """
    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D+E+F')

    vocab2 = vocab.create_subset(["A", "B", "C", "D"])

    def input_func(t):
        if t < 0.25:
            return vocab.parse('A+0.8*B').v
        elif t < 0.5:
            return vocab.parse('0.8*A+B').v
        else:
            return vocab.parse('E').v

    def inhib_func(t):
        return int(t > 0.75)

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab2,
                               default_output_vector=vocab.parse("F").v,
                               inhibitable=True,
                               output_utilities=True,
                               output_thresholded_utilities=True)
        in_node = nengo.Node(output=input_func, label='input')
        inhib_node = nengo.Node(output=inhib_func, label='inhib')
        nengo.Connection(in_node, am.input)
        nengo.Connection(inhib_node, am.inhibit)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)
        utils_p = nengo.Probe(am.utilities, synapse=0.05)
        utils_th_p = nengo.Probe(am.thresholded_utilities, synapse=0.05)

    sim = Simulator(m)
    sim.run(1.0)
    t = sim.trange()
    # Input: A+0.8B
    more_a = (t >= 0.2) & (t < 0.25)
    # Input: 0.8B+A
    more_b = (t >= 0.45) & (t < 0.5)
    # Input: E (but E isn't in the memory vocabulary, so should output F)
    all_e = (t >= 0.7) & (t < 0.75)
    # Input: E (but inhibited, so should output nothing)
    inhib = (t >= 0.95)

    def plot(i, y, ylabel):
        plt.subplot(4, 1, i)
        plt.plot(t, y)
        plt.axvline(0.25, c='k')
        plt.axvline(0.5, c='k')
        plt.axvline(0.75, c='k')
        plt.ylabel(ylabel)
        plt.legend(vocab.keys[:y.shape[1]], loc='best', fontsize='xx-small')
    plot(1, nengo.spa.similarity(sim.data[in_p], vocab), "Input")
    plot(2, sim.data[utils_p], "Utilities")
    plot(3, sim.data[utils_th_p], "Thresholded utilities")
    plot(4, nengo.spa.similarity(sim.data[out_p], vocab), "Output")

    assert all(np.mean(sim.data[utils_p][more_a], axis=0)[:2] > [0.8, 0.5])
    assert all(np.mean(sim.data[utils_p][more_a], axis=0)[2:] < [0.01, 0.01])
    assert all(np.mean(sim.data[utils_p][more_b], axis=0)[:2] > [0.5, 0.8])
    assert all(np.mean(sim.data[utils_p][more_b], axis=0)[2:] < [0.01, 0.01])
    assert similarity(sim.data[utils_p][all_e], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[utils_p][inhib], np.ones((1, 4))) < 0.05
    assert all(np.mean(sim.data[utils_th_p][more_a], axis=0)[:2] > [0.8, 0.8])
    assert all(
        np.mean(sim.data[utils_th_p][more_a], axis=0)[2:] < [0.01, 0.01])
    assert all(np.mean(sim.data[utils_th_p][more_b], axis=0)[:2] > [0.8, 0.8])
    assert all(
        np.mean(sim.data[utils_th_p][more_b], axis=0)[2:] < [0.01, 0.01])
    assert similarity(sim.data[utils_th_p][all_e], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[utils_th_p][inhib], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[out_p][more_a], vocab.parse("A").v) > 0.8
    assert similarity(sim.data[out_p][more_a], vocab.parse("B").v) > 0.8
    assert similarity(sim.data[out_p][more_b], vocab.parse("A").v) > 0.8
    assert similarity(sim.data[out_p][more_b], vocab.parse("B").v) > 0.8
    assert similarity(sim.data[out_p][all_e], vocab.parse("F").v) > 0.8
    assert similarity(sim.data[out_p][inhib], np.ones((1, D))) < 0.05
Exemple #7
0
class SpaunVocabulary(object):
    def __init__(self):
        self.main = None

        self.sp_dim = 512
        self.mtr_dim = 50
        self.vis_dim = 200

        # ############ Semantic pointer (strings) definitions #################
        # --- Numerical semantic pointers ---
        self.num_sp_strs = ['ZER', 'ONE', 'TWO', 'THR', 'FOR',
                            'FIV', 'SIX', 'SEV', 'EIG', 'NIN']
        self.n_num_sp = len(self.num_sp_strs)

        # --- Task semantic pointer list ---
        # W - Drawing (Copying visual input)
        # R - Recognition
        # L - Learning (Bandit Task)
        # M - Memory (forward serial recall)
        # C - Counting
        # A - Answering
        # V - Rapid Variable Creation
        # F - Fluid Induction (Ravens)
        # X - Task precursor
        # DEC - Decoding task (output to motor system)
        self.ps_task_sp_strs = ['W', 'R', 'L', 'M', 'C', 'A', 'V', 'F', 'X',
                                'DEC', 'REACT', 'INSTR', 'CMP']
        self.ps_task_vis_sp_strs = ['A', 'C', 'F', 'K', 'L', 'M', 'P', 'R',
                                    'V', 'W']
        # --- Task visual semantic pointer usage ---
        # A - Task initialization
        # F - Forward recall
        # R - Reverse recall
        # K - Q&A 'kind' probe
        # P - Q&A 'position' probe

        # --- Production system semantic pointers ---
        # DECW - Decoding state (output to motor system, but for drawing task)
        # DECI - Decoding state (output to motor system, but for inductn tasks)
        self.ps_state_sp_strs = ['QAP', 'QAK', 'TRANS0', 'TRANS1', 'TRANS2',
                                 'CNT0', 'CNT1', 'LEARN', 'DIRECT', 'INSTRP',
                                 'INSTRV', 'TRANSC']
        self.ps_dec_sp_strs = ['FWD', 'REV', 'CNT', 'DECW', 'DECI', 'NONE']

        # --- Misc actions semantic pointers
        self.ps_action_sp_strs = None
        self.min_num_ps_actions = 3

        # --- Misc visual semantic pointers ---
        self.misc_vis_sp_strs = ['OPEN', 'CLOSE', 'SPACE', 'QM']

        # --- Misc state semantic pointers ---
        self.misc_ps_sp_strs = ['NO_MATCH', 'MATCH']

        # --- 'I don't know' motor response vector
        self.mtr_sp_strs = ['UNK']

        # --- List of all visual semantic pointers ---
        # self.vis_sp_strs = list(self.num_sp_strs)
        # self.vis_sp_strs.extend(self.misc_vis_sp_strs)
        # self.vis_sp_strs.extend(self.ps_task_vis_sp_strs)

        # --- Position (enumerated) semantic pointers ---
        self.pos_sp_strs = None
        self.max_enum_list_pos = 8

        # --- Operations semantic pointers
        self.ops_sp_strs = ['ADD', 'INC']

        # --- Reward semantic pointers
        self.reward_n_sp_str = self.num_sp_strs[0]
        self.reward_y_sp_str = self.num_sp_strs[1]
        self.reward_sp_strs = [self.reward_n_sp_str, self.reward_y_sp_str]

        # --- Instruction processing input and output tags
        self.instr_tag_strs = ['VIS', 'TASK', 'STATE', 'DEC', 'DATA', 'ENABLE']

    def write_header(self):
        logger.write('# Spaun Vocabulary Options:\n')
        logger.write('# -------------------------\n')
        for param_name in sorted(self.__dict__.keys()):
            param_value = getattr(self, param_name)
            if not callable(param_value) and not isinstance(param_value, list)\
               and not isinstance(param_value, Vocabulary) \
               and not isinstance(param_value, SemanticPointer) \
               and not isinstance(param_value, np.ndarray):
                logger.write('# - %s = %s\n' % (param_name, param_value))
        logger.write('\n')

    def initialize(self, stim_SP_labels, num_learn_actions=3, rng=0):
        if rng == 0:
            rng = np.random.RandomState(int(time.time()))

        # ############### Semantic pointer list definitions ###################
        # --- Position (enumerated) semantic pointers ---
        self.pos_sp_strs = ['POS%i' % (i + 1)
                            for i in range(self.max_enum_list_pos)]

        # --- Unitary semantic pointers
        self.unitary_sp_strs = [self.num_sp_strs[0], self.pos_sp_strs[0]]
        self.unitary_sp_strs.extend(self.ops_sp_strs)

        # --- Production system (action) semantic pointers ---
        self.ps_action_learn_sp_strs = ['A%d' % (i + 1) for i in
                                        range(num_learn_actions)]
        self.ps_action_misc_sp_strs = []
        self.ps_action_sp_strs = (self.ps_action_learn_sp_strs +
                                  self.ps_action_misc_sp_strs)

        # #################### Vocabulary definitions #########################
        # --- Primary vocabulary ---
        self.main = Vocabulary(self.sp_dim, unitary=self.unitary_sp_strs,
                               max_similarity=0.2, rng=rng)

        # --- Add in visual sp's ---
        self.main.parse('+'.join(self.misc_vis_sp_strs))
        self.main.parse('+'.join(self.ps_task_vis_sp_strs))
        for sp_str in list(stim_SP_labels):
            if sp_str not in self.num_sp_strs and \
               sp_str not in self.pos_sp_strs:
                self.main.parse(sp_str)

        # --- Add numerical sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[0], self.num_sp_strs[0]))
        add_sp = self.main[self.ops_sp_strs[0]]
        num_sp = self.main[self.num_sp_strs[0]].copy()
        for i in range(len(self.num_sp_strs) - 1):
            num_sp = num_sp.copy() * add_sp
            self.main.add(self.num_sp_strs[i + 1], num_sp)

        self.add_sp = add_sp

        # --- Add positional sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[1], self.pos_sp_strs[0]))
        inc_sp = self.main[self.ops_sp_strs[1]]
        pos_sp = self.main[self.pos_sp_strs[0]].copy()
        for i in range(len(self.pos_sp_strs) - 1):
            pos_sp = pos_sp.copy() * inc_sp
            self.main.add(self.pos_sp_strs[i + 1], pos_sp)

        self.inc_sp = inc_sp

        # --- Add production system sp's ---
        self.main.parse('+'.join(self.ps_task_sp_strs))
        self.main.parse('+'.join(self.ps_state_sp_strs))
        self.main.parse('+'.join(self.ps_dec_sp_strs))
        if len(self.ps_action_sp_strs) > 0:
            self.main.parse('+'.join(self.ps_action_sp_strs))
        self.main.parse('+'.join(self.misc_ps_sp_strs))

        # --- Add instruction processing system sp's ---
        self.main.parse('+'.join(self.instr_tag_strs))

        # ################### Visual Vocabulary definitions ###################
        self.vis_sp_strs = list(stim_SP_labels)

        # Visual sp str vocab check
        if (not all(x in self.vis_sp_strs for x in self.num_sp_strs)):
            raise RuntimeError("Vocabulator - Stimulus vocabulary does not " +
                               "contain necessary Spaun NUM semantic pointer" +
                               " definitions.")
        if (not all(x in self.vis_sp_strs for x in self.misc_vis_sp_strs)):
            raise RuntimeError("Vocabulator - Stimulus vocabulary does not " +
                               "contain necessary Spaun MISC semantic " +
                               "pointer definitions.")
        if (not all(x in self.vis_sp_strs for x in self.ps_task_vis_sp_strs)):
            raise RuntimeError("Vocabulator - Stimulus vocabulary does not " +
                               "contain necessary Spaun PS semantic " +
                               "pointer definitions.")

        # ################# Sub-vocabulary definitions ########################
        self.vis_main = self.main.create_subset(self.vis_sp_strs)

        self.pos = self.main.create_subset(self.pos_sp_strs)

        self.item = self.main.create_subset(self.num_sp_strs)
        self.item_1_index = self.main.create_subset(self.num_sp_strs[1:])

        self.ps_task = self.main.create_subset(self.ps_task_sp_strs)
        self.ps_state = self.main.create_subset(self.ps_state_sp_strs)
        self.ps_dec = self.main.create_subset(self.ps_dec_sp_strs)
        self.ps_cmp = self.main.create_subset(self.misc_ps_sp_strs)
        self.ps_action = self.main.create_subset(self.ps_action_sp_strs)
        self.ps_action_learn = \
            self.main.create_subset(self.ps_action_learn_sp_strs)

        self.reward = self.main.create_subset(self.reward_sp_strs)

        self.instr = self.main.create_subset(self.instr_tag_strs)

        # ############ Enumerated vocabulary definitions ######################
        # --- Enumerated vocabulary, enumerates all possible combinations of
        #     position and item vectors (for debug purposes)
        self.enum = Vocabulary(self.sp_dim, rng=rng)
        for pos in self.pos_sp_strs:
            for num in self.num_sp_strs:
                sp_str = '%s*%s' % (pos, num)
                self.enum.add(sp_str, self.main.parse(sp_str))

        self.pos1 = Vocabulary(self.sp_dim, rng=rng)
        for num in self.num_sp_strs:
            sp_str = '%s*%s' % (self.pos_sp_strs[0], num)
            self.pos1.add(sp_str, self.main.parse(sp_str))

        # ############ Instruction vocabulary definitions #####################
        # --- ANTECEDENT and CONSEQUENCE permutation transforms
        self.perm_ant = np.arange(self.sp_dim)
        self.perm_con = np.arange(self.sp_dim)
        np.random.shuffle(self.perm_ant)
        np.random.shuffle(self.perm_con)

        self.perm_ant_inv = np.argsort(self.perm_ant)
        self.perm_con_inv = np.argsort(self.perm_con)

    def initialize_mtr_vocab(self, mtr_dim, mtr_sps):
        self.mtr_dim = mtr_dim

        self.mtr = Vocabulary(self.mtr_dim)
        for i, sp_str in enumerate(self.num_sp_strs):
            self.mtr.add(sp_str, mtr_sps[i, :])

        self.mtr_unk = Vocabulary(self.mtr_dim)
        self.mtr_unk.add(self.mtr_sp_strs[0], mtr_sps[-1, :])

        self.mtr_disp = self.mtr.create_subset(self.num_sp_strs)
        self.mtr_disp.readonly = False
        # Disable read-only flag for display vocab so that things can be added
        self.mtr_disp.add(self.mtr_sp_strs[0],
                          self.mtr_unk[self.mtr_sp_strs[0]].v)

    def initialize_vis_vocab(self, vis_dim, vis_sps):
        if vis_sps.shape[0] != len(self.vis_sp_strs):
            raise RuntimeError('Vocabulatory.initialize_vis_vocab: ' +
                               'Mismatch in shape of raw vision SPs and ' +
                               'number of vision SP labels.')

        self.vis_dim = vis_dim

        self.vis = Vocabulary(self.vis_dim)
        for i, sp_str in enumerate(self.vis_sp_strs):
            self.vis.add(sp_str, vis_sps[i, :])

    def parse_instr_sps_list(self, instr_sps_list):
        instr_sps_list_sp = self.main.parse('0')

        if len(instr_sps_list) > self.max_enum_list_pos:
            raise ValueError('Vocabulator: Too many sequential ' +
                             'instructions. Max: %d' %
                             self.max_enum_list_pos + ', Got: ' +
                             len(instr_sps_list))

        for i, instr_sps in enumerate(instr_sps_list):
            instr_sps_list_sp += (self.main.parse('POS%i' % (i + 1)) *
                                  self.parse_instr_sps(*instr_sps))
        return instr_sps_list_sp

    def parse_instr_sps(self, ant_sp='0', cons_sp='0'):
        # Note: The ant and con permutations are used here to separate the
        #       possible POS tags in the ant/con from the instruction POS tag.
        #       This permutation is not necessary if the instruction POS tags
        #       differ from the number-representation POS tags.
        return SemanticPointer(self.main.parse(ant_sp).v[self.perm_ant] +
                               self.main.parse(cons_sp).v[self.perm_con])
def test_am_default_output_inhibit_utilities(Simulator):
    """Auto-associative memory (non-wta) complex test.

    Options: defaults to predefined vector if no match is found,
    threshold = 0.3, inhibitable, non-wta, outputs utilities and thresholded
    utilities.
    """
    rng = np.random.RandomState(1)

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D+E+F')

    vocab2 = vocab.create_subset(["A", "B", "C", "D"])

    def input_func(t):
        if t < 0.25:
            return vocab.parse('A+0.8*B').v
        elif t < 0.5:
            return vocab.parse('0.8*A+B').v
        else:
            return vocab.parse('E').v

    def inhib_func(t):
        return int(t > 0.75)

    m = nengo.Network('model', seed=123)
    with m:
        am = AssociativeMemory(vocab2,
                               default_output_vector=vocab.parse("F").v,
                               inhibitable=True, output_utilities=True,
                               output_thresholded_utilities=True)
        in_node = nengo.Node(output=input_func, label='input')
        inhib_node = nengo.Node(output=inhib_func, label='inhib')
        out_node = nengo.Node(size_in=D, label='output')
        utils_node = nengo.Node(size_in=4, label='utils')
        utils_th_node = nengo.Node(size_in=4, label='utils_th')
        nengo.Connection(in_node, am.input)
        nengo.Connection(inhib_node, am.inhibit)
        nengo.Connection(am.output, out_node, synapse=0.03)
        nengo.Connection(am.utilities, utils_node, synapse=0.05)
        nengo.Connection(am.thresholded_utilities, utils_th_node, synapse=0.05)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(out_node)
        utils_p = nengo.Probe(utils_node)
        utils_th_p = nengo.Probe(utils_th_node)

    sim = Simulator(m)
    sim.run(1.0)

    assert np.allclose(sim.data[in_p][240:250], vocab.parse("A+0.8*B").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[in_p][490:500], vocab.parse("0.8*A+B").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[in_p][-10:], vocab.parse("E").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[out_p][240:250], vocab.parse("A+B").v,
                       atol=.2, rtol=.05)
    assert np.allclose(sim.data[out_p][490:500], vocab.parse("A+B").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[out_p][740:750], vocab.parse("F").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[out_p][-10:], vocab.parse("0").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_p][240:250], [1, 0.75, 0, 0],
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_p][490:500], [0.75, 1, 0, 0],
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_p][740:750], [0, 0, 0, 0],
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_p][-10:], [0, 0, 0, 0],
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_th_p][240:250], [1.05, 1.05, 0, 0],
                       atol=.2, rtol=.05)
    assert np.allclose(sim.data[utils_th_p][490:500], [1.05, 1.05, 0, 0],
                       atol=.1, rtol=.05)
    assert np.allclose(sim.data[utils_th_p][740:750], [0, 0, 0, 0],
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_th_p][-10:], [0, 0, 0, 0],
                       atol=.1, rtol=.01)
Exemple #9
0
def test_am_default_output_inhibit_utilities(Simulator):
    """Auto-associative memory (non-wta) complex test.

    Options: defaults to predefined vector if no match is found,
    threshold = 0.3, inhibitable, non-wta, outputs utilities and thresholded
    utilities.
    """
    rng = np.random.RandomState(1)

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D+E+F')

    vocab2 = vocab.create_subset(["A", "B", "C", "D"])

    def input_func(t):
        if t < 0.25:
            return vocab.parse('A+0.8*B').v
        elif t < 0.5:
            return vocab.parse('0.8*A+B').v
        else:
            return vocab.parse('E').v

    def inhib_func(t):
        return int(t > 0.75)

    m = nengo.Network('model', seed=123)
    with m:
        am = AssociativeMemory(vocab2,
                               default_output_vector=vocab.parse("F").v,
                               inhibitable=True,
                               output_utilities=True,
                               output_thresholded_utilities=True)
        in_node = nengo.Node(output=input_func, label='input')
        inhib_node = nengo.Node(output=inhib_func, label='inhib')
        out_node = nengo.Node(size_in=D, label='output')
        utils_node = nengo.Node(size_in=4, label='utils')
        utils_th_node = nengo.Node(size_in=4, label='utils_th')
        nengo.Connection(in_node, am.input)
        nengo.Connection(inhib_node, am.inhibit)
        nengo.Connection(am.output, out_node, synapse=0.03)
        nengo.Connection(am.utilities, utils_node, synapse=0.05)
        nengo.Connection(am.thresholded_utilities, utils_th_node, synapse=0.05)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(out_node)
        utils_p = nengo.Probe(utils_node)
        utils_th_p = nengo.Probe(utils_th_node)

    sim = Simulator(m)
    sim.run(1.0)

    assert np.allclose(sim.data[in_p][240:250],
                       vocab.parse("A+0.8*B").v,
                       atol=.1,
                       rtol=.01)
    assert np.allclose(sim.data[in_p][490:500],
                       vocab.parse("0.8*A+B").v,
                       atol=.1,
                       rtol=.01)
    assert np.allclose(sim.data[in_p][-10:],
                       vocab.parse("E").v,
                       atol=.1,
                       rtol=.01)
    assert np.allclose(sim.data[out_p][240:250],
                       vocab.parse("A+B").v,
                       atol=.2,
                       rtol=.05)
    assert np.allclose(sim.data[out_p][490:500],
                       vocab.parse("A+B").v,
                       atol=.1,
                       rtol=.01)
    assert np.allclose(sim.data[out_p][740:750],
                       vocab.parse("F").v,
                       atol=.1,
                       rtol=.01)
    assert np.allclose(sim.data[out_p][-10:],
                       vocab.parse("0").v,
                       atol=.1,
                       rtol=.01)
    assert np.allclose(sim.data[utils_p][240:250], [1, 0.75, 0, 0],
                       atol=.1,
                       rtol=.01)
    assert np.allclose(sim.data[utils_p][490:500], [0.75, 1, 0, 0],
                       atol=.1,
                       rtol=.01)
    assert np.allclose(sim.data[utils_p][740:750], [0, 0, 0, 0],
                       atol=.1,
                       rtol=.01)
    assert np.allclose(sim.data[utils_p][-10:], [0, 0, 0, 0],
                       atol=.1,
                       rtol=.01)
    assert np.allclose(sim.data[utils_th_p][240:250], [1.05, 1.05, 0, 0],
                       atol=.2,
                       rtol=.05)
    assert np.allclose(sim.data[utils_th_p][490:500], [1.05, 1.05, 0, 0],
                       atol=.1,
                       rtol=.05)
    assert np.allclose(sim.data[utils_th_p][740:750], [0, 0, 0, 0],
                       atol=.1,
                       rtol=.01)
    assert np.allclose(sim.data[utils_th_p][-10:], [0, 0, 0, 0],
                       atol=.1,
                       rtol=.01)