Exemplo n.º 1
0
def test_extend(rng):
    v = Vocabulary(16, rng=rng)
    v.parse("A+B")
    assert v.keys == ["A", "B"]
    assert not v.unitary

    # Test extending the vocabulary
    v.extend(["C", "D"])
    assert v.keys == ["A", "B", "C", "D"]

    # Test extending the vocabulary with various unitary options
    v.extend(["E", "F"], unitary=["E"])
    assert v.keys == ["A", "B", "C", "D", "E", "F"]
    assert v.unitary == ["E"]

    # Check if 'E' is unitary
    fft_val = np.fft.fft(v["E"].v)
    fft_imag = fft_val.imag
    fft_real = fft_val.real
    fft_norms = np.sqrt(fft_imag**2 + fft_real**2)
    assert np.allclose(fft_norms, np.ones(16))

    v.extend(["G", "H"], unitary=True)
    assert v.keys == ["A", "B", "C", "D", "E", "F", "G", "H"]
    assert v.unitary == ["E", "G", "H"]
Exemplo n.º 2
0
def test_extend(rng):
    v = Vocabulary(16, rng=rng)
    v.parse('A+B')
    assert v.keys == ['A', 'B']
    assert not v.unitary

    # Test extending the vocabulary
    v.extend(['C', 'D'])
    assert v.keys == ['A', 'B', 'C', 'D']

    # Test extending the vocabulary with various unitary options
    v.extend(['E', 'F'], unitary=['E'])
    assert v.keys == ['A', 'B', 'C', 'D', 'E', 'F']
    assert v.unitary == ['E']

    # Check if 'E' is unitary
    fft_val = np.fft.fft(v['E'].v)
    fft_imag = fft_val.imag
    fft_real = fft_val.real
    fft_norms = np.sqrt(fft_imag ** 2 + fft_real ** 2)
    assert np.allclose(fft_norms, np.ones(16))

    v.extend(['G', 'H'], unitary=True)
    assert v.keys == ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
    assert v.unitary == ['E', 'G', 'H']
Exemplo n.º 3
0
def test_invalid_dimensions():
    with pytest.raises(ValidationError):
        Vocabulary(1.5)
    with pytest.raises(ValidationError):
        Vocabulary(0)
    with pytest.raises(ValidationError):
        Vocabulary(-1)
Exemplo n.º 4
0
def test_prob_cleanup(rng):
    v = Vocabulary(64, rng=rng)
    assert 1.0 > v.prob_cleanup(0.7, 10000) > 0.9999
    assert 0.9999 > v.prob_cleanup(0.6, 10000) > 0.999
    assert 0.99 > v.prob_cleanup(0.5, 1000) > 0.9

    v = Vocabulary(128, rng=rng)
    assert 0.999 > v.prob_cleanup(0.4, 1000) > 0.997
    assert 0.99 > v.prob_cleanup(0.4, 10000) > 0.97
    assert 0.9 > v.prob_cleanup(0.4, 100000) > 0.8
Exemplo n.º 5
0
def test_extend(rng):
    v = Vocabulary(16, rng=rng)
    v.parse('A+B')
    assert v.keys == ['A', 'B']
    assert not v.unitary

    # Test extending the vocabulary
    v.extend(['C', 'D'])
    assert v.keys == ['A', 'B', 'C', 'D']

    # Test extending the vocabulary with various unitary options
    v.extend(['E', 'F'], unitary=['E'])
    assert v.keys == ['A', 'B', 'C', 'D', 'E', 'F']
    assert v.unitary == ['E']

    # Check if 'E' is unitary
    fft_val = np.fft.fft(v['E'].v)
    fft_imag = fft_val.imag
    fft_real = fft_val.real
    fft_norms = np.sqrt(fft_imag ** 2 + fft_real ** 2)
    assert np.allclose(fft_norms, np.ones(16))

    v.extend(['G', 'H'], unitary=True)
    assert v.keys == ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
    assert v.unitary == ['E', 'G', 'H']
Exemplo n.º 6
0
def test_am_spa_interaction(Simulator, seed, rng):
    """Make sure associative memory interacts with other SPA modules."""
    D = 16
    vocab = Vocabulary(D, rng=rng)
    vocab.parse("A+B+C+D")

    D2 = int(D / 2)
    vocab2 = Vocabulary(D2, rng=rng)
    vocab2.parse("A+B+C+D")

    def input_func(t):
        return "0.49*A" if t < 0.5 else "0.79*A"

    with nengo.spa.SPA(seed=seed) as m:
        m.buf = nengo.spa.Buffer(D)
        m.input = nengo.spa.Input(buf=input_func)

        m.am = AssociativeMemory(
            vocab,
            vocab2,
            input_keys=["A", "B", "C"],
            output_keys=["B", "C", "D"],
            default_output_key="A",
            threshold=0.5,
            inhibitable=True,
            wta_output=True,
            threshold_output=True,
        )

        cortical_actions = nengo.spa.Actions("am = buf")
        m.c_act = nengo.spa.Cortical(cortical_actions)

    # Check to see if model builds properly. No functionality test needed
    with Simulator(m):
        pass
Exemplo n.º 7
0
def test_am_spa_interaction(Simulator):
    """Standard associative memory interacting with other SPA modules.

    Options: threshold = 0.5, non-inhibitable, non-wta, does not output
    utilities or thresholded utilities.
    """
    rng = np.random.RandomState(1)

    D = 16
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    D2 = int(D / 2)
    vocab2 = Vocabulary(D2, rng=rng)
    vocab2.parse('A+B+C+D')

    def input_func(t):
        if t < 0.5:
            return '0.49*A'
        else:
            return '0.79*A'

    m = nengo.spa.SPA('model', seed=123)
    with m:
        m.buf = nengo.spa.Buffer(D)
        m.input = nengo.spa.Input(buf=input_func)

        m.am = AssociativeMemory(vocab, vocab2, threshold=0.5)

        cortical_actions = nengo.spa.Actions('am = buf')
        m.c_act = nengo.spa.Cortical(cortical_actions)

    # Check to see if model builds properly. No functionality test needed
    Simulator(m)
Exemplo n.º 8
0
    def initialize_mtr_vocab(self, mtr_dim, mtr_sps):
        self.mtr_dim = mtr_dim

        self.mtr = Vocabulary(self.mtr_dim)
        for i, sp_str in enumerate(self.num_sp_strs):
            self.mtr.add(sp_str, mtr_sps[i, :])

        self.mtr_unk = Vocabulary(self.mtr_dim)
        self.mtr_unk.add(self.mtr_sp_strs[0], mtr_sps[-1, :])

        self.mtr_disp = self.mtr.create_subset(self.num_sp_strs)
        self.mtr_disp.readonly = False
        # Disable read-only flag for display vocab so that things can be added
        self.mtr_disp.add(self.mtr_sp_strs[0],
                          self.mtr_unk[self.mtr_sp_strs[0]].v)
Exemplo n.º 9
0
def test_transform():
    v1 = Vocabulary(32, rng=np.random.RandomState(7))
    v2 = Vocabulary(64, rng=np.random.RandomState(8))
    A = v1.parse('A')
    B = v1.parse('B')
    C = v1.parse('C')
    t = v1.transform_to(v2)

    assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
    assert v2.parse('C+B').compare(np.dot(t, C.v + B.v)) > 0.95

    t = v1.transform_to(v2, keys=['A', 'B'])

    assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
    assert v2.parse('B').compare(np.dot(t, C.v + B.v)) > 0.95
Exemplo n.º 10
0
def test_prob_cleanup(rng):
    v = Vocabulary(64, rng=rng)
    assert 1.0 > v.prob_cleanup(0.7, 10000) > 0.9999
    assert 0.9999 > v.prob_cleanup(0.6, 10000) > 0.999
    assert 0.99 > v.prob_cleanup(0.5, 1000) > 0.9

    v = Vocabulary(128, rng=rng)
    assert 0.999 > v.prob_cleanup(0.4, 1000) > 0.997
    assert 0.99 > v.prob_cleanup(0.4, 10000) > 0.97
    assert 0.9 > v.prob_cleanup(0.4, 100000) > 0.8
Exemplo n.º 11
0
def test_create_pointer_warning(rng):
    v = Vocabulary(2, rng=rng)

    # five pointers shouldn't fit
    with pytest.warns(UserWarning):
        v.parse('A')
        v.parse('B')
        v.parse('C')
        v.parse('D')
        v.parse('E')
Exemplo n.º 12
0
def test_create_pointer_warning(rng):
    v = Vocabulary(2, rng=rng)

    # five pointers shouldn't fit
    with pytest.warns(UserWarning):
        v.parse("A")
        v.parse("B")
        v.parse("C")
        v.parse("D")
        v.parse("E")
Exemplo n.º 13
0
def test_am_basic(Simulator, plt, seed, rng):
    """Basic associative memory test."""

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab)
        in_node = nengo.Node(output=vocab.parse("A").v, label='input')
        nengo.Connection(in_node, am.input)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)

    sim = Simulator(m)
    sim.run(0.2)
    t = sim.trange()

    plt.subplot(2, 1, 1)
    plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
    plt.ylabel("Input")
    plt.ylim(top=1.1)
    plt.legend(vocab.keys, loc='best')
    plt.subplot(2, 1, 2)
    plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))
    plt.plot(t[t > 0.15], np.ones(t.shape)[t > 0.15] * 0.8, c='g', lw=2)
    plt.ylabel("Output")
    plt.legend(vocab.keys, loc='best')

    assert similarity(sim.data[in_p][t > 0.15], vocab.parse("A").v) > 0.99
    assert similarity(sim.data[out_p][t > 0.15], vocab.parse("A").v) > 0.8
Exemplo n.º 14
0
def test_am_defaults(Simulator):
    """Default assoc memory.

    Options: auto-associative, threshold = 0.3, non-inhibitable, non-wta,
    does not output utilities or thresholded utilities.
    """

    rng = np.random.RandomState(1)

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    m = nengo.Network('model', seed=123)
    with m:
        am = AssociativeMemory(vocab)
        in_node = nengo.Node(output=vocab.parse("A").v, label='input')
        out_node = nengo.Node(size_in=D, label='output')
        nengo.Connection(in_node, am.input)
        nengo.Connection(am.output, out_node, synapse=0.03)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(out_node)

    sim = Simulator(m)
    sim.run(1.0)

    assert np.allclose(sim.data[in_p][-10:],
                       vocab.parse("A").v,
                       atol=.1,
                       rtol=.01)
    assert np.allclose(sim.data[out_p][-10:],
                       vocab.parse("A").v,
                       atol=.1,
                       rtol=.01)
Exemplo n.º 15
0
def test_subset(rng):
    v1 = Vocabulary(32, rng=rng)
    v1.parse("A+B+C+D+E+F+G")

    # Test creating a vocabulary subset
    v2 = v1.create_subset(["A", "C", "E"])
    assert v2.keys == ["A", "C", "E"]
    assert v2["A"] == v1["A"]
    assert v2["C"] == v1["C"]
    assert v2["E"] == v1["E"]
    assert v2.parent is v1

    # Test creating a subset from a subset (it should create off the parent)
    v3 = v2.create_subset(["C", "E"])
    assert v3.parent is v2.parent and v2.parent is v1

    v3.include_pairs = True
    assert v3.key_pairs == ["C*E"]
    assert not v1.include_pairs
    assert not v2.include_pairs

    # Test transform_to between subsets (should be identity transform)
    t = v1.transform_to(v2)

    assert v2.parse("A").compare(np.dot(t, v1.parse("A").v)) >= 0.99999999
Exemplo n.º 16
0
def test_am_wta(Simulator, plt, seed, rng):
    """Test the winner-take-all ability of the associative memory."""

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    def input_func(t):
        if t < 0.2:
            return vocab.parse('A+0.8*B').v
        elif t < 0.3:
            return np.zeros(D)
        else:
            return vocab.parse('0.8*A+B').v

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab, wta_output=True)
        in_node = nengo.Node(output=input_func, label='input')
        nengo.Connection(in_node, am.input)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)

    sim = Simulator(m)
    sim.run(0.5)
    t = sim.trange()
    more_a = (t > 0.15) & (t < 0.2)
    more_b = t > 0.45

    plt.subplot(2, 1, 1)
    plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
    plt.ylabel("Input")
    plt.ylim(top=1.1)
    plt.legend(vocab.keys, loc='best')
    plt.subplot(2, 1, 2)
    plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))
    plt.plot(t[more_a], np.ones(t.shape)[more_a] * 0.8, c='g', lw=2)
    plt.plot(t[more_b], np.ones(t.shape)[more_b] * 0.8, c='g', lw=2)
    plt.ylabel("Output")
    plt.legend(vocab.keys, loc='best')

    assert similarity(sim.data[out_p][more_a], vocab.parse("A").v) > 0.8
    assert similarity(sim.data[out_p][more_a], vocab.parse("B").v) < 0.2
    assert similarity(sim.data[out_p][more_b], vocab.parse("B").v) > 0.8
    assert similarity(sim.data[out_p][more_b], vocab.parse("A").v) < 0.2
Exemplo n.º 17
0
def test_am_assoc_mem_threshold(Simulator):
    """Standard associative memory (differing input and output vocabularies).

    Options: threshold = 0.5, non-inhibitable, non-wta, does not output
    utilities or thresholded utilities.
    """
    rng = np.random.RandomState(1)

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    D2 = int(D / 2)
    vocab2 = Vocabulary(D2, rng=rng)
    vocab2.parse('A+B+C+D')

    def input_func(t):
        if t < 0.5:
            return vocab.parse('0.49*A').v
        else:
            return vocab.parse('0.79*A').v

    m = nengo.Network('model', seed=123)
    with m:
        am = AssociativeMemory(vocab, vocab2, threshold=0.5)
        in_node = nengo.Node(output=input_func, label='input')
        out_node = nengo.Node(size_in=D2, label='output')
        nengo.Connection(in_node, am.input)
        nengo.Connection(am.output, out_node, synapse=0.03)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(out_node)

    sim = Simulator(m)
    sim.run(1.0)

    assert np.allclose(sim.data[in_p][490:500], vocab.parse("0.49*A").v,
                       atol=.15, rtol=.01)
    assert np.allclose(sim.data[in_p][-10:], vocab.parse("0.79*A").v,
                       atol=.15, rtol=.01)
    assert np.allclose(sim.data[out_p][490:500], vocab2.parse("0").v,
                       atol=.15, rtol=.01)
    assert np.allclose(sim.data[out_p][-10:], vocab2.parse("A").v,
                       atol=.15, rtol=.01)
Exemplo n.º 18
0
def test_transform(rng):
    v1 = Vocabulary(32, rng=rng)
    v2 = Vocabulary(64, rng=rng)
    A = v1.parse('A')
    B = v1.parse('B')
    C = v1.parse('C')
    t = v1.transform_to(v2)

    assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
    assert v2.parse('C+B').compare(np.dot(t, C.v + B.v)) > 0.9

    t = v1.transform_to(v2, keys=['A', 'B'])

    assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
    assert v2.parse('B').compare(np.dot(t, B.v)) > 0.95
Exemplo n.º 19
0
def test_readonly(rng):
    v1 = Vocabulary(32, rng=rng)
    v1.parse("A+B+C")

    v1.readonly = True

    with pytest.raises(ValueError):
        v1.parse("D")
Exemplo n.º 20
0
def test_transform(rng):
    v1 = Vocabulary(32, rng=rng)
    v2 = Vocabulary(64, rng=rng)
    A = v1.parse("A")
    B = v1.parse("B")
    C = v1.parse("C")
    t = v1.transform_to(v2)

    assert v2.parse("A").compare(np.dot(t, A.v)) > 0.95
    assert v2.parse("C+B").compare(np.dot(t, C.v + B.v)) > 0.9

    t = v1.transform_to(v2, keys=["A", "B"])

    assert v2.parse("A").compare(np.dot(t, A.v)) > 0.95
    assert v2.parse("B").compare(np.dot(t, B.v)) > 0.95
Exemplo n.º 21
0
def test_include_pairs(rng):
    v = Vocabulary(10, rng=rng)
    v['A']
    v['B']
    v['C']
    assert v.key_pairs is None
    v.include_pairs = True
    assert v.key_pairs == ['A*B', 'A*C', 'B*C']
    v.include_pairs = False
    assert v.key_pairs is None
    v.include_pairs = True
    v['D']
    assert v.key_pairs == ['A*B', 'A*C', 'B*C', 'A*D', 'B*D', 'C*D']

    v = Vocabulary(12, include_pairs=True)
    v['A']
    v['B']
    v['C']
    assert v.key_pairs == ['A*B', 'A*C', 'B*C']
Exemplo n.º 22
0
def test_include_pairs():
    v = Vocabulary(10)
    v["A"]
    v["B"]
    v["C"]
    assert v.key_pairs is None
    v.include_pairs = True
    assert v.key_pairs == ["A*B", "A*C", "B*C"]
    v.include_pairs = False
    assert v.key_pairs is None
    v.include_pairs = True
    v["D"]
    assert v.key_pairs == ["A*B", "A*C", "B*C", "A*D", "B*D", "C*D"]

    v = Vocabulary(12, include_pairs=True)
    v["A"]
    v["B"]
    v["C"]
    assert v.key_pairs == ["A*B", "A*C", "B*C"]
Exemplo n.º 23
0
    def initialize_vis_vocab(self, vis_dim, vis_sps):
        if vis_sps.shape[0] != len(self.vis_sp_strs):
            raise RuntimeError('Vocabulatory.initialize_vis_vocab: ' +
                               'Mismatch in shape of raw vision SPs and ' +
                               'number of vision SP labels.')

        self.vis_dim = vis_dim

        self.vis = Vocabulary(self.vis_dim)
        for i, sp_str in enumerate(self.vis_sp_strs):
            self.vis.add(sp_str, vis_sps[i, :])
Exemplo n.º 24
0
def test_am_threshold(Simulator, plt, seed, rng):
    """Associative memory thresholding with differing input/output vocabs."""
    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    D2 = int(D / 2)
    vocab2 = Vocabulary(D2, rng=rng)
    vocab2.parse('A+B+C+D')

    def input_func(t):
        return vocab.parse('0.49*A').v if t < 0.1 else vocab.parse('0.79*A').v

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab, vocab2, threshold=0.5)
        in_node = nengo.Node(output=input_func, label='input')
        nengo.Connection(in_node, am.input)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)

    sim = Simulator(m)
    sim.run(0.3)
    t = sim.trange()
    below_th = t < 0.1
    above_th = t > 0.25

    plt.subplot(2, 1, 1)
    plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
    plt.ylabel("Input")
    plt.legend(vocab.keys, loc='best')
    plt.subplot(2, 1, 2)
    plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab2))
    plt.plot(t[above_th], np.ones(t.shape)[above_th] * 0.8, c='g', lw=2)
    plt.ylabel("Output")
    plt.legend(vocab.keys, loc='best')

    assert similarity(sim.data[in_p][below_th], vocab.parse("A").v) > 0.48
    assert similarity(sim.data[in_p][above_th], vocab.parse("A").v) > 0.78
    assert similarity(sim.data[out_p][below_th], vocab2.parse("0").v) < 0.01
    assert similarity(sim.data[out_p][above_th], vocab2.parse("A").v) > 0.8
Exemplo n.º 25
0
def test_create_pointer_warning(rng):
    v = Vocabulary(2, rng=rng)

    # five pointers shouldn't fit
    with warns(UserWarning):
        v.parse('A')
        v.parse('B')
        v.parse('C')
        v.parse('D')
        v.parse('E')
Exemplo n.º 26
0
def test_am_spa_interaction(Simulator, seed, rng):
    """Make sure associative memory interacts with other SPA modules."""
    D = 16
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    D2 = int(D / 2)
    vocab2 = Vocabulary(D2, rng=rng)
    vocab2.parse('A+B+C+D')

    def input_func(t):
        return '0.49*A' if t < 0.5 else '0.79*A'

    with nengo.spa.SPA(seed=seed) as m:
        m.buf = nengo.spa.Buffer(D)
        m.input = nengo.spa.Input(buf=input_func)

        m.am = AssociativeMemory(vocab, vocab2,
                                 input_keys=['A', 'B', 'C'],
                                 output_keys=['B', 'C', 'D'],
                                 default_output_key='A',
                                 threshold=0.5,
                                 inhibitable=True,
                                 wta_output=True,
                                 threshold_output=True)

        cortical_actions = nengo.spa.Actions('am = buf')
        m.c_act = nengo.spa.Cortical(cortical_actions)

    # Check to see if model builds properly. No functionality test needed
    Simulator(m)
Exemplo n.º 27
0
def test_am_defaults(Simulator):
    """Default assoc memory.

    Options: auto-associative, threshold = 0.3, non-inhibitable, non-wta,
    does not output utilities or thresholded utilities.
    """

    rng = np.random.RandomState(1)

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    m = nengo.Network('model', seed=123)
    with m:
        am = AssociativeMemory(vocab)
        in_node = nengo.Node(output=vocab.parse("A").v,
                             label='input')
        out_node = nengo.Node(size_in=D, label='output')
        nengo.Connection(in_node, am.input)
        nengo.Connection(am.output, out_node, synapse=0.03)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(out_node)

    sim = Simulator(m)
    sim.run(1.0)

    assert np.allclose(sim.data[in_p][-10:], vocab.parse("A").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[out_p][-10:], vocab.parse("A").v,
                       atol=.1, rtol=.01)
Exemplo n.º 28
0
def test_am_basic(Simulator, plt, seed, rng):
    """Basic associative memory test."""

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab)
        in_node = nengo.Node(output=vocab.parse("A").v, label='input')
        nengo.Connection(in_node, am.input)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)

    sim = Simulator(m)
    sim.run(0.2)
    t = sim.trange()

    plt.subplot(2, 1, 1)
    plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
    plt.ylabel("Input")
    plt.ylim(top=1.1)
    plt.legend(vocab.keys, loc='best')
    plt.subplot(2, 1, 2)
    plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))
    plt.plot(t[t > 0.15], np.ones(t.shape)[t > 0.15] * 0.8, c='g', lw=2)
    plt.ylabel("Output")
    plt.legend(vocab.keys, loc='best')

    assert similarity(sim.data[in_p][t > 0.15], vocab.parse("A").v) > 0.99
    assert similarity(sim.data[out_p][t > 0.15], vocab.parse("A").v) > 0.8
Exemplo n.º 29
0
def test_am_spa_interaction(Simulator):
    """Standard associative memory interacting with other SPA modules.

    Options: threshold = 0.5, non-inhibitable, non-wta, does not output
    utilities or thresholded utilities.
    """
    rng = np.random.RandomState(1)

    D = 16
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    D2 = int(D / 2)
    vocab2 = Vocabulary(D2, rng=rng)
    vocab2.parse('A+B+C+D')

    def input_func(t):
        if t < 0.5:
            return '0.49*A'
        else:
            return '0.79*A'

    m = nengo.spa.SPA('model', seed=123)
    with m:
        m.buf = nengo.spa.Buffer(D)
        m.input = nengo.spa.Input(buf=input_func)

        m.am = AssociativeMemory(vocab, vocab2, threshold=0.5)

        cortical_actions = nengo.spa.Actions('am = buf')
        m.c_act = nengo.spa.Cortical(cortical_actions)

    # Check to see if model builds properly. No functionality test needed
    Simulator(m)
Exemplo n.º 30
0
def test_subset(rng):
    v1 = Vocabulary(32, rng=rng)
    v1.parse('A+B+C+D+E+F+G')

    # Test creating a vocabulary subset
    v2 = v1.create_subset(['A', 'C', 'E'])
    assert v2.keys == ['A', 'C', 'E']
    assert v2['A'] == v1['A']
    assert v2['C'] == v1['C']
    assert v2['E'] == v1['E']
    assert v2.parent is v1

    # Test creating a subset from a subset (it should create off the parent)
    v3 = v2.create_subset(['C', 'E'])
    assert v3.parent is v2.parent and v2.parent is v1

    v3.include_pairs = True
    assert v3.key_pairs == ['C*E']
    assert not v1.include_pairs
    assert not v2.include_pairs

    # Test transform_to between subsets (should be identity transform)
    t = v1.transform_to(v2)

    assert v2.parse('A').compare(np.dot(t, v1.parse('A').v)) >= 0.99999999
Exemplo n.º 31
0
def test_readonly(rng):
    v1 = Vocabulary(32, rng=rng)
    v1.parse('A+B+C')

    v1.readonly = True

    with pytest.raises(ValueError):
        v1.parse('D')
Exemplo n.º 32
0
def test_am_threshold(Simulator, plt, seed, rng):
    """Associative memory thresholding with differing input/output vocabs."""
    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    D2 = int(D / 2)
    vocab2 = Vocabulary(D2, rng=rng)
    vocab2.parse('A+B+C+D')

    def input_func(t):
        return vocab.parse('0.49*A').v if t < 0.1 else vocab.parse('0.79*A').v

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab, vocab2, threshold=0.5)
        in_node = nengo.Node(output=input_func, label='input')
        nengo.Connection(in_node, am.input)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)

    sim = Simulator(m)
    sim.run(0.3)
    t = sim.trange()
    below_th = t < 0.1
    above_th = t > 0.25

    plt.subplot(2, 1, 1)
    plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
    plt.ylabel("Input")
    plt.legend(vocab.keys, loc='best')
    plt.subplot(2, 1, 2)
    plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab2))
    plt.plot(t[above_th], np.ones(t.shape)[above_th] * 0.8, c='g', lw=2)
    plt.ylabel("Output")
    plt.legend(vocab.keys, loc='best')

    assert similarity(sim.data[in_p][below_th], vocab.parse("A").v) > 0.48
    assert similarity(sim.data[in_p][above_th], vocab.parse("A").v) > 0.78
    assert similarity(sim.data[out_p][below_th], vocab2.parse("0").v) < 0.01
    assert similarity(sim.data[out_p][above_th], vocab2.parse("A").v) > 0.8
Exemplo n.º 33
0
def test_include_pairs(rng):
    v = Vocabulary(10, rng=rng)
    v["A"]
    v["B"]
    v["C"]
    assert v.key_pairs is None
    v.include_pairs = True
    assert v.key_pairs == ["A*B", "A*C", "B*C"]
    v.include_pairs = False
    assert v.key_pairs is None
    v.include_pairs = True
    v["D"]
    assert v.key_pairs == ["A*B", "A*C", "B*C", "A*D", "B*D", "C*D"]

    v = Vocabulary(12, include_pairs=True, rng=rng)
    v["A"]
    v["B"]
    v["C"]
    assert v.key_pairs == ["A*B", "A*C", "B*C"]
Exemplo n.º 34
0
def test_include_pairs(rng):
    v = Vocabulary(10, rng=rng)
    v['A']
    v['B']
    v['C']
    assert v.key_pairs is None
    v.include_pairs = True
    assert v.key_pairs == ['A*B', 'A*C', 'B*C']
    v.include_pairs = False
    assert v.key_pairs is None
    v.include_pairs = True
    v['D']
    assert v.key_pairs == ['A*B', 'A*C', 'B*C', 'A*D', 'B*D', 'C*D']

    v = Vocabulary(12, include_pairs=True, rng=rng)
    v['A']
    v['B']
    v['C']
    assert v.key_pairs == ['A*B', 'A*C', 'B*C']
Exemplo n.º 35
0
def test_am_spa_interaction(Simulator, seed, rng):
    """Make sure associative memory interacts with other SPA modules."""
    D = 16
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    D2 = int(D / 2)
    vocab2 = Vocabulary(D2, rng=rng)
    vocab2.parse('A+B+C+D')

    def input_func(t):
        return '0.49*A' if t < 0.5 else '0.79*A'

    with nengo.spa.SPA(seed=seed) as m:
        m.buf = nengo.spa.Buffer(D)
        m.input = nengo.spa.Input(buf=input_func)

        m.am = AssociativeMemory(vocab, vocab2, threshold=0.5)

        cortical_actions = nengo.spa.Actions('am = buf')
        m.c_act = nengo.spa.Cortical(cortical_actions)

    # Check to see if model builds properly. No functionality test needed
    Simulator(m)
Exemplo n.º 36
0
def test_am_wta(Simulator, plt, seed, rng):
    """Test the winner-take-all ability of the associative memory."""

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    def input_func(t):
        if t < 0.2:
            return vocab.parse('A+0.8*B').v
        elif t < 0.3:
            return np.zeros(D)
        else:
            return vocab.parse('0.8*A+B').v

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab, wta_output=True)
        in_node = nengo.Node(output=input_func, label='input')
        nengo.Connection(in_node, am.input)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)

    sim = Simulator(m)
    sim.run(0.5)
    t = sim.trange()
    more_a = (t > 0.15) & (t < 0.2)
    more_b = t > 0.45

    plt.subplot(2, 1, 1)
    plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
    plt.ylabel("Input")
    plt.ylim(top=1.1)
    plt.legend(vocab.keys, loc='best')
    plt.subplot(2, 1, 2)
    plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))
    plt.plot(t[more_a], np.ones(t.shape)[more_a] * 0.8, c='g', lw=2)
    plt.plot(t[more_b], np.ones(t.shape)[more_b] * 0.8, c='g', lw=2)
    plt.ylabel("Output")
    plt.legend(vocab.keys, loc='best')

    assert similarity(sim.data[out_p][more_a], vocab.parse("A").v) > 0.8
    assert similarity(sim.data[out_p][more_a], vocab.parse("B").v) < 0.2
    assert similarity(sim.data[out_p][more_b], vocab.parse("B").v) > 0.8
    assert similarity(sim.data[out_p][more_b], vocab.parse("A").v) < 0.2
def main():
    
    model = spa.SPA(label="Vector Storage")
    with model:
        
        # Dimensionality of each representation
        num_dimensions = 2
        sub_dimensions = 2
        
        # Create the vocabulary
        vocab = Vocabulary(num_dimensions, randomize = False)
        
        # Form the inputs
        stored_value_1 = [1] * num_dimensions
        stored_value_1 = [s/np.linalg.norm(stored_value_1) for s in stored_value_1]
        vocab.add("Stored_value_1", stored_value_1)
        
        stored_value_2 = [(-1**i) for i in range(num_dimensions)]
        stored_value_2 = [s/np.linalg.norm(stored_value_2) for s in stored_value_2]
        vocab.add("Stored_value_2", stored_value_2)
                
        def first_input(t):
            if t < 10:
                return "Stored_value_2"
            else:
                return "Stored_value_1"
        
        def second_input(t):
            if t < 5:
                return "Stored_value_1"
            else:
                return "Stored_value_2"
                
        # Buffers to store the input
        model.buffer1 = spa.Buffer(dimensions = num_dimensions, subdimensions = sub_dimensions, neurons_per_dimension = 200, direct = True)
        model.buffer2 = spa.Buffer(dimensions = num_dimensions, subdimensions = sub_dimensions, neurons_per_dimension = 200, direct = True)
        
        # Probe to visualize the values stored in the buffers
        buffer_1_probe = nengo.Probe(model.buffer1.state.output)
        buffer_2_probe = nengo.Probe(model.buffer2.state.output)
        
        # Connect up the inputs
        model.input = spa.Input(buffer1 = first_input)
        model.input = spa.Input(buffer2 = second_input)
        
        # Buffer to store the output
        model.buffer3 = spa.Buffer(dimensions = num_dimensions, subdimensions = sub_dimensions, neurons_per_dimension = 200, direct = True)
        buffer_3_probe = nengo.Probe(model.buffer3.state.output)
        
        # Control system
        actions = spa.Actions('dot(buffer1, Stored_value_2) --> buffer3=Stored_value_2', 'dot(buffer1, Stored_value_1) --> buffer3=Stored_value_1+Stored_value_2')
        model.bg = spa.BasalGanglia(actions)
        model.thalamus = spa.Thalamus(model.bg)

        
    # Start the simulator
    sim = nengo.Simulator(model)

    # Dynamic plotting
    plt.ion() # Dynamic updating of plots
    fig = plt.figure(figsize=(15,8))
    plt.show()
    ax = fig.gca()
    ax.set_title("Vector Storage")

    while True:
        sim.run(1) # Run for an additional 1 second
        plt.clf() # Clear the figure
        plt.plot(sim.trange(), similarity(sim.data, buffer_1_probe, vocab), label = "Buffer 1 Value") # Plot the entire dataset so far
        plt.plot(sim.trange(), similarity(sim.data, buffer_2_probe, vocab), label = "Buffer 2 Value")
        plt.plot(sim.trange(), similarity(sim.data, buffer_3_probe, vocab), label = "Buffer 3 Value")
        print sim.data[buffer_1_probe][-1]
        print sim.data[buffer_2_probe][-1]
        print sim.data[buffer_3_probe][-1]
        plt.legend(vocab.keys * 3, loc = 2)
        plt.draw() # Re-draw
Exemplo n.º 38
0
vis_sp_strs.extend(ps_task_vis_sp_strs)

# --- Position (enumerated) semantic pointers ---
pos_sp_strs = ['POS%i' % (i + 1) for i in range(cfg.max_enum_list_pos)]

# --- Operations semantic pointers
ops_sp_strs = ['ADD', 'INC']

# --- Unitary semantic pointers
unitary_sp_strs = [num_sp_strs[0], pos_sp_strs[0]]
unitary_sp_strs.extend(ops_sp_strs)


# ####################### Vocabulary definitions ##############################
# --- Primary vocabulary ---
vocab = Vocabulary(cfg.sp_dim, unitary=unitary_sp_strs, rng=cfg.rng)

# --- Add numerical sp's ---
vocab.parse('%s+%s' % (ops_sp_strs[0], num_sp_strs[0]))
add_sp = vocab[ops_sp_strs[0]]
num_sp = vocab[num_sp_strs[0]].copy()
for i in range(len(num_sp_strs) - 1):
    num_sp = num_sp.copy() * add_sp
    vocab.add(num_sp_strs[i + 1], num_sp)

# --- Add positional sp's ---
vocab.parse('%s+%s' % (ops_sp_strs[1], pos_sp_strs[0]))
inc_sp = vocab[ops_sp_strs[1]]
pos_sp = vocab[pos_sp_strs[0]].copy()
for i in range(len(pos_sp_strs) - 1):
    pos_sp = pos_sp.copy() * inc_sp
Exemplo n.º 39
0
def test_transform(rng):
    v1 = Vocabulary(32, rng=rng)
    v2 = Vocabulary(64, rng=rng)
    A = v1.parse("A")
    B = v1.parse("B")
    C = v1.parse("C")

    # Test transform from v1 to v2 (full vocbulary)
    # Expected: np.dot(t, A.v) ~= v2.parse('A')
    # Expected: np.dot(t, B.v) ~= v2.parse('B')
    # Expected: np.dot(t, C.v) ~= v2.parse('C')
    t = v1.transform_to(v2)

    assert v2.parse("A").compare(np.dot(t, A.v)) > 0.95
    assert v2.parse("C+B").compare(np.dot(t, C.v + B.v)) > 0.9

    # Test transform from v1 to v2 (only 'A' and 'B')
    t = v1.transform_to(v2, keys=["A", "B"])

    assert v2.parse("A").compare(np.dot(t, A.v)) > 0.95
    assert v2.parse("B").compare(np.dot(t, C.v + B.v)) > 0.95

    # Test transform_to when either vocabulary is read-only
    v1.parse("D")
    v2.parse("E")

    # When both are read-only, transform_to shouldn't add any new items to
    # either and the transform should be using keys that are the intersection
    # of both vocabularies
    v1.readonly = True
    v2.readonly = True

    t = v1.transform_to(v2)

    assert v1.keys == ["A", "B", "C", "D"]
    assert v2.keys == ["A", "B", "C", "E"]

    # When one is read-only, transform_to should add any new items to the non
    # read-only vocabulary
    v1.readonly = False
    v2.readonly = True

    t = v1.transform_to(v2)

    assert v1.keys == ["A", "B", "C", "D", "E"]
    assert v2.keys == ["A", "B", "C", "E"]

    # When one is read-only, transform_to should add any new items to the non
    # read-only vocabulary
    v1.readonly = True
    v2.readonly = False

    t = v1.transform_to(v2)

    assert v1.keys == ["A", "B", "C", "D", "E"]
    assert v2.keys == ["A", "B", "C", "E", "D"]
Exemplo n.º 40
0
def test_text(rng):
    v = Vocabulary(64, rng=rng)
    x = v.parse('A+B+C')
    y = v.parse('-D-E-F')
    ptr = r'-?[01]\.[0-9]{2}[A-F]'
    assert re.match(';'.join([ptr] * 3), v.text(x))
    assert re.match(';'.join([ptr] * 2), v.text(x, maximum_count=2))
    assert re.match(ptr, v.text(x, maximum_count=1))
    assert len(v.text(x, maximum_count=10).split(';')) <= 10
    assert re.match(';'.join([ptr] * 4), v.text(x, minimum_count=4))
    assert re.match(';'.join([ptr.replace('F', 'C')] * 3),
                    v.text(x, minimum_count=4, terms=['A', 'B', 'C']))

    assert re.match(ptr, v.text(y, threshold=0.6))
    assert v.text(y, minimum_count=None, threshold=0.6) == ''

    assert v.text(x, join=',') == v.text(x).replace(';', ',')
    assert re.match(';'.join([ptr] * 2), v.text(x, normalize=True))

    assert v.text([0]*64) == '0.00F'
    assert v.text(v['D'].v) == '1.00D'
Exemplo n.º 41
0
def test_parse(rng):
    v = Vocabulary(64, rng=rng)
    A = v.parse('A')
    B = v.parse('B')
    C = v.parse('C')
    assert np.allclose((A * B).v, v.parse('A * B').v)
    assert np.allclose((A * ~B).v, v.parse('A * ~B').v)
    assert np.allclose((A + B).v, v.parse('A + B').v)
    assert np.allclose((A - (B*C)*3 + ~C).v, v.parse('A-(B*C)*3+~C').v)

    assert np.allclose(v.parse('0').v, np.zeros(64))
    assert np.allclose(v.parse('1').v, np.eye(64)[0])
    assert np.allclose(v.parse('1.7').v, np.eye(64)[0] * 1.7)

    with pytest.raises(SyntaxError):
        v.parse('A((')
    with pytest.raises(SpaParseError):
        v.parse('"hello"')
Exemplo n.º 42
0
def test_parse(rng):
    v = Vocabulary(64, rng=rng)
    A = v.parse('A')
    B = v.parse('B')
    C = v.parse('C')
    assert np.allclose((A * B).v, v.parse('A * B').v)
    assert np.allclose((A * ~B).v, v.parse('A * ~B').v)
    assert np.allclose((A + B).v, v.parse('A + B').v)
    assert np.allclose((A - (B*C)*3 + ~C).v, v.parse('A-(B*C)*3+~C').v)

    assert np.allclose(v.parse('0').v, np.zeros(64))
    assert np.allclose(v.parse('1').v, np.eye(64)[0])
    assert np.allclose(v.parse('1.7').v, np.eye(64)[0] * 1.7)

    with pytest.raises(SyntaxError):
        v.parse('A((')
    with pytest.raises(TypeError):
        v.parse('"hello"')
Exemplo n.º 43
0
def test_parse(rng):
    v = Vocabulary(64, rng=rng)
    A = v.parse("A")
    B = v.parse("B")
    C = v.parse("C")
    assert np.allclose((A * B).v, v.parse("A * B").v)
    assert np.allclose((A * ~B).v, v.parse("A * ~B").v)
    assert np.allclose((A + B).v, v.parse("A + B").v)
    assert np.allclose((A - (B * C) * 3 + ~C).v, v.parse("A-(B*C)*3+~C").v)

    assert np.allclose(v.parse("0").v, np.zeros(64))
    assert np.allclose(v.parse("1").v, np.eye(64)[0])
    assert np.allclose(v.parse("1.7").v, np.eye(64)[0] * 1.7)

    with pytest.raises(SyntaxError):
        v.parse("A((")
    with pytest.raises(SpaParseError):
        v.parse('"hello"')
Exemplo n.º 44
0
def test_identity(rng):
    v = Vocabulary(64, rng=rng)
    assert np.allclose(v.identity.v, np.eye(64)[0])
Exemplo n.º 45
0
def test_am_complex(Simulator, plt, seed, rng):
    """Complex auto-associative memory test.

    Has a default output vector, outputs utilities, and becomes inhibited.
    """
    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D+E+F')

    vocab2 = vocab.create_subset(["A", "B", "C", "D"])

    def input_func(t):
        if t < 0.25:
            return vocab.parse('A+0.8*B').v
        elif t < 0.5:
            return vocab.parse('0.8*A+B').v
        else:
            return vocab.parse('E').v

    def inhib_func(t):
        return int(t > 0.75)

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab2,
                               default_output_vector=vocab.parse("F").v,
                               inhibitable=True,
                               output_utilities=True,
                               output_thresholded_utilities=True)
        in_node = nengo.Node(output=input_func, label='input')
        inhib_node = nengo.Node(output=inhib_func, label='inhib')
        nengo.Connection(in_node, am.input)
        nengo.Connection(inhib_node, am.inhibit)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)
        utils_p = nengo.Probe(am.utilities, synapse=0.05)
        utils_th_p = nengo.Probe(am.thresholded_utilities, synapse=0.05)

    sim = Simulator(m)
    sim.run(1.0)
    t = sim.trange()
    # Input: A+0.8B
    more_a = (t >= 0.2) & (t < 0.25)
    # Input: 0.8B+A
    more_b = (t >= 0.45) & (t < 0.5)
    # Input: E (but E isn't in the memory vocabulary, so should output F)
    all_e = (t >= 0.7) & (t < 0.75)
    # Input: E (but inhibited, so should output nothing)
    inhib = (t >= 0.95)

    def plot(i, y, ylabel):
        plt.subplot(4, 1, i)
        plt.plot(t, y)
        plt.axvline(0.25, c='k')
        plt.axvline(0.5, c='k')
        plt.axvline(0.75, c='k')
        plt.ylabel(ylabel)
        plt.legend(vocab.keys[:y.shape[1]], loc='best', fontsize='xx-small')
    plot(1, nengo.spa.similarity(sim.data[in_p], vocab), "Input")
    plot(2, sim.data[utils_p], "Utilities")
    plot(3, sim.data[utils_th_p], "Thresholded utilities")
    plot(4, nengo.spa.similarity(sim.data[out_p], vocab), "Output")

    assert all(np.mean(sim.data[utils_p][more_a], axis=0)[:2] > [0.8, 0.5])
    assert all(np.mean(sim.data[utils_p][more_a], axis=0)[2:] < [0.01, 0.01])
    assert all(np.mean(sim.data[utils_p][more_b], axis=0)[:2] > [0.5, 0.8])
    assert all(np.mean(sim.data[utils_p][more_b], axis=0)[2:] < [0.01, 0.01])
    assert similarity(sim.data[utils_p][all_e], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[utils_p][inhib], np.ones((1, 4))) < 0.05
    assert all(np.mean(sim.data[utils_th_p][more_a], axis=0)[:2] > [0.8, 0.8])
    assert all(
        np.mean(sim.data[utils_th_p][more_a], axis=0)[2:] < [0.01, 0.01])
    assert all(np.mean(sim.data[utils_th_p][more_b], axis=0)[:2] > [0.8, 0.8])
    assert all(
        np.mean(sim.data[utils_th_p][more_b], axis=0)[2:] < [0.01, 0.01])
    assert similarity(sim.data[utils_th_p][all_e], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[utils_th_p][inhib], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[out_p][more_a], vocab.parse("A").v) > 0.8
    assert similarity(sim.data[out_p][more_a], vocab.parse("B").v) > 0.8
    assert similarity(sim.data[out_p][more_b], vocab.parse("A").v) > 0.8
    assert similarity(sim.data[out_p][more_b], vocab.parse("B").v) > 0.8
    assert similarity(sim.data[out_p][all_e], vocab.parse("F").v) > 0.8
    assert similarity(sim.data[out_p][inhib], np.ones((1, D))) < 0.05
Exemplo n.º 46
0
def test_text(rng):
    v = Vocabulary(64, rng=rng)
    x = v.parse('A+B+C')
    y = v.parse('-D-E-F')
    ptr = r'-?[01]\.[0-9]{2}[A-F]'
    assert re.match(';'.join([ptr] * 3), v.text(x))
    assert re.match(';'.join([ptr] * 2), v.text(x, maximum_count=2))
    assert re.match(ptr, v.text(x, maximum_count=1))
    assert len(v.text(x, maximum_count=10).split(';')) <= 10
    assert re.match(';'.join([ptr] * 4), v.text(x, minimum_count=4))
    assert re.match(';'.join([ptr.replace('F', 'C')] * 3),
                    v.text(x, minimum_count=4, terms=['A', 'B', 'C']))

    assert re.match(ptr, v.text(y, threshold=0.6))
    assert v.text(y, minimum_count=None, threshold=0.6) == ''

    assert v.text(x, join=',') == v.text(x).replace(';', ',')
    assert re.match(';'.join([ptr] * 2), v.text(x, normalize=True))

    assert v.text([0]*64) == '0.00F'
    assert v.text(v['D'].v) == '1.00D'
Exemplo n.º 47
0
def test_add(rng):
    v = Vocabulary(3, rng=rng)
    v.add("A", [1, 2, 3])
    v.add("B", [4, 5, 6])
    v.add("C", [7, 8, 9])
    assert np.allclose(v.vectors, [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
Exemplo n.º 48
0
def test_capital(rng):
    v = Vocabulary(16, rng=rng)
    with pytest.raises(KeyError):
        v.parse('a')
    with pytest.raises(KeyError):
        v.parse('A+B+C+a')
Exemplo n.º 49
0
    def init_module(self, vis_net, detect_net, vis_sps, vis_sps_scale,
                    vis_net_neuron_type):
        # Make LIF vision network
        if vis_net is None:
            vis_net = LIFVisionNet(net_neuron_type=vis_net_neuron_type)
        self.vis_net = vis_net

        # Make network to detect changes in visual input stream
        if detect_net is None:
            detect_net = \
                DetectChange(dimensions=vis_data.images_data_dimensions,
                             n_neurons=cfg.n_neurons_ens)
        self.detect_change_net = detect_net
        nengo.Connection(self.vis_net.raw_output,
                         self.detect_change_net.input,
                         synapse=None)

        # Make associative memory to map visual image semantic pointers to
        # visual conceptual semantic pointers
        self.am = cfg.make_assoc_mem(vis_sps,
                                     vocab.vis_main.vectors,
                                     threshold=vis_data.am_threshold,
                                     inhibitable=True)
        nengo.Connection(self.vis_net.output, self.am.input, synapse=0.005)
        nengo.Connection(self.detect_change_net.output,
                         self.am.inhibit,
                         transform=3,
                         synapse=0.005)

        # Visual memory block (for the visual semantic pointers - top layer of
        #                      vis_net)
        vocab.vis_dim = vis_sps.shape[1]
        vis_sp_vocab = Vocabulary(vocab.vis_dim)  # TODO: FIX THIS?

        self.vis_mb = MB(cfg.n_neurons_mb * 2,
                         vocab.vis_dim,
                         gate_mode=2,
                         vocab=vis_sp_vocab,
                         radius=vis_sps_scale,
                         **cfg.mb_config)

        vis_mb_gate_sp_vecs = vocab.main.parse('+'.join(vocab.num_sp_strs)).v
        nengo.Connection(self.am.cleaned_output,
                         self.vis_mb.gate,
                         transform=[cfg.mb_gate_scale * vis_mb_gate_sp_vecs])
        nengo.Connection(self.vis_net.output,
                         self.vis_mb.input,
                         transform=vis_data.amp,
                         synapse=0.03)
        nengo.Connection(self.detect_change_net.output,
                         self.vis_mb.gate,
                         transform=-1,
                         synapse=0.01)

        # Define network input and outputs
        self.input = self.vis_net.input
        self.output = self.am.cleaned_output
        self.mb_output = self.vis_mb.output
        self.neg_attention = self.detect_change_net.output

        # Define module inputs and outputs
        self.outputs = dict(default=(self.output, vocab.vis_main))

        # ######################## DEBUG PROBES ###############################
        self.vis_out = self.vis_net.output
        self.am_utilities = self.am.cleaned_output_utilities
def main():

    model = spa.SPA(label="Vector Storage")
    with model:

        # Dimensionality of each representation
        num_dimensions = 30
        sub_dimensions = 1

        # Create the vocabulary
        vocab = Vocabulary(num_dimensions, randomize=False)

        # Form the inputs manually by directly defining their vectors
        stored_value_1 = np.random.rand(num_dimensions) - [0.5
                                                           ] * num_dimensions
        stored_value_1 = [
            s / np.linalg.norm(stored_value_1) for s in stored_value_1
        ]
        vocab.add("Stored_value_1", stored_value_1)

        stored_value_2 = np.random.rand(num_dimensions) - [0.5
                                                           ] * num_dimensions
        stored_value_2 = [
            s / np.linalg.norm(stored_value_2) for s in stored_value_2
        ]
        vocab.add("Stored_value_2", stored_value_2)

        stored_value_3 = np.random.rand(num_dimensions) - [0.5
                                                           ] * num_dimensions
        stored_value_3 = [
            s / np.linalg.norm(stored_value_3) for s in stored_value_3
        ]
        vocab.add("Stored_value_3", stored_value_3)

        # Create a semantic pointer corresponding to the "correct" answer for the operation
        sum_vector = np.subtract(np.add(stored_value_1, stored_value_2),
                                 stored_value_3)
        sum_vector = sum_vector / np.linalg.norm(sum_vector)
        vocab.add("Sum", sum_vector)

        # Define the control signal inputs as random vectors
        r1 = [1] * num_dimensions
        r1 = r1 / np.linalg.norm(r1)
        r2 = [(-1)**k for k in range(num_dimensions)]
        r2 = r2 / np.linalg.norm(r2)
        vocab.add("Hold_signal", r1)
        vocab.add("Start_signal", r2)

        # Control when the vector operation takes place
        def control_input(t):
            if t < 1:
                return "Hold_signal"
            else:
                return "Start_signal"

        # inputs to the input word buffers
        def first_input(t):
            return "Stored_value_1"

        def second_input(t):
            return "Stored_value_2"

        def third_input(t):
            return "Stored_value_3"

        # Control buffer
        model.control = spa.Buffer(dimensions=num_dimensions,
                                   subdimensions=sub_dimensions,
                                   neurons_per_dimension=200,
                                   direct=True,
                                   vocab=vocab)
        control_probe = nengo.Probe(model.control.state.output)

        # Buffers to store the inputs: e.g., King, Man, Woman
        model.word_buffer1 = spa.Buffer(dimensions=num_dimensions,
                                        subdimensions=sub_dimensions,
                                        neurons_per_dimension=200,
                                        direct=True,
                                        vocab=vocab)
        model.word_buffer2 = spa.Buffer(dimensions=num_dimensions,
                                        subdimensions=sub_dimensions,
                                        neurons_per_dimension=200,
                                        direct=True,
                                        vocab=vocab)
        model.word_buffer3 = spa.Buffer(dimensions=num_dimensions,
                                        subdimensions=sub_dimensions,
                                        neurons_per_dimension=200,
                                        direct=True,
                                        vocab=vocab)

        # Probe to visualize the values stored in the buffers
        buffer_1_probe = nengo.Probe(model.word_buffer1.state.output)
        buffer_2_probe = nengo.Probe(model.word_buffer2.state.output)
        buffer_3_probe = nengo.Probe(model.word_buffer3.state.output)

        # Buffer to hold the result: e.g. Queen
        model.result = spa.Buffer(dimensions=num_dimensions,
                                  subdimensions=sub_dimensions,
                                  neurons_per_dimension=200,
                                  direct=True,
                                  vocab=vocab)
        result_probe = nengo.Probe(model.result.state.output)

        # Control system
        actions = spa.Actions(
            'dot(control, Start_signal) --> result = word_buffer1 + word_buffer2 - word_buffer3'
        )
        model.bg = spa.BasalGanglia(actions)
        model.thalamus = spa.Thalamus(model.bg, subdim_channel=sub_dimensions)

        # Connect up the inputs
        model.input = spa.Input(control=control_input,
                                word_buffer1=first_input,
                                word_buffer2=second_input,
                                word_buffer3=third_input)

    # Start the simulator
    sim = nengo.Simulator(model)

    # Dynamic plotting
    plt.ion()  # Dynamic updating of plots
    fig = plt.figure(figsize=(15, 8))
    plt.show()
    ax = fig.gca()
    ax.set_title("Vector Storage")

    while True:
        sim.run(1)  # Run for an additional 1 second
        plt.clf()  # Clear the figure
        plt.plot(sim.trange(),
                 similarity(sim.data, result_probe, vocab),
                 label="Buffer 3 Value")
        plt.legend(vocab.keys * 3, loc=2)
        plt.draw()  # Re-draw

        print sim.data[buffer_1_probe][-1]
        print sim.data[buffer_2_probe][-1]
        print sim.data[buffer_3_probe][-1]
        print sim.data[result_probe][-1]
        print "\n"

    plt.show()
Exemplo n.º 51
0
def test_add(rng):
    v = Vocabulary(3, rng=rng)
    v.add('A', [1, 2, 3])
    v.add('B', [4, 5, 6])
    v.add('C', [7, 8, 9])
    assert np.allclose(v.vectors, [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
Exemplo n.º 52
0
    def initialize_vis_vocab(self, vis_dim, vis_sps):
        self.vis_dim = vis_dim

        self.vis = Vocabulary(self.vis_dim)
        for i, sp_str in enumerate(self.vis_sp_strs):
            self.vis.add(sp_str, vis_sps[i, :])
Exemplo n.º 53
0
class SpaunVocabulary(object):
    def __init__(self):
        self.main = None

        self.sp_dim = 512
        self.mtr_dim = 50
        self.vis_dim = 200

        # ############ Semantic pointer (strings) definitions #################
        # --- Numerical semantic pointers ---
        self.num_sp_strs = ['ZER', 'ONE', 'TWO', 'THR', 'FOR',
                            'FIV', 'SIX', 'SEV', 'EIG', 'NIN']
        self.n_num_sp = len(self.num_sp_strs)

        # --- Task semantic pointer list ---
        # W - Drawing (Copying visual input)
        # R - Recognition
        # L - Learning (Bandit Task)
        # M - Memory (forward serial recall)
        # C - Counting
        # A - Answering
        # V - Rapid Variable Creation
        # F - Fluid Induction (Ravens)
        # X - Task precursor
        # DEC - Decoding task (output to motor system)
        self.ps_task_sp_strs = ['W', 'R', 'L', 'M', 'C', 'A', 'V', 'F', 'X',
                                'DEC']
        self.ps_task_vis_sp_strs = ['A', 'C', 'F', 'K', 'L', 'M', 'P', 'R',
                                    'V', 'W']
        # --- Task visual semantic pointer usage ---
        # A - Task initialization
        # F - Forward recall
        # R - Reverse recall
        # K - Q&A 'kind' probe
        # P - Q&A 'position' probe

        # --- Production system semantic pointers ---
        # DECW - Decoding state (output to motor system, but for drawing task)
        # DECI - Decoding state (output to motor system, but for inductn tasks)
        self.ps_state_sp_strs = ['QAP', 'QAK', 'TRANS0', 'TRANS1', 'TRANS2',
                                 'CNT0', 'CNT1', 'LEARN']
        self.ps_dec_sp_strs = ['FWD', 'REV', 'CNT', 'DECW', 'DECI', 'NONE']

        # --- Misc actions semantic pointers
        self.ps_action_sp_strs = None
        self.min_num_ps_actions = 3

        # --- Misc visual semantic pointers ---
        self.misc_vis_sp_strs = ['OPEN', 'CLOSE', 'SPACE', 'QM']

        # --- Misc state semantic pointers ---
        self.misc_ps_sp_strs = ['MATCH', 'NO_MATCH']

        # --- 'I don't know' motor response vector
        self.mtr_sp_strs = ['UNK']

        # --- List of all visual semantic pointers ---
        self.vis_sp_strs = list(self.num_sp_strs)
        self.vis_sp_strs.extend(self.misc_vis_sp_strs)
        self.vis_sp_strs.extend(self.ps_task_vis_sp_strs)

        # --- Position (enumerated) semantic pointers ---
        self.pos_sp_strs = None
        self.max_enum_list_pos = 8

        # --- Operations semantic pointers
        self.ops_sp_strs = ['ADD', 'INC']

        # --- Reward semantic pointers
        self.reward_n_sp_str = self.num_sp_strs[0]
        self.reward_y_sp_str = self.num_sp_strs[1]
        self.reward_sp_strs = [self.reward_n_sp_str, self.reward_y_sp_str]

    def write_header(self):
        logger.write('# Spaun Vocabulary Options:\n')
        logger.write('# -------------------------\n')
        for param_name in sorted(self.__dict__.keys()):
            param_value = getattr(self, param_name)
            if not callable(param_value) and not isinstance(param_value, list)\
               and not isinstance(param_value, Vocabulary) \
               and not isinstance(param_value, SemanticPointer) \
               and not isinstance(param_value, np.ndarray):
                logger.write('# - %s = %s\n' % (param_name, param_value))
        logger.write('\n')

    def initialize(self, num_learn_actions=3, rng=0):
        if rng == 0:
            rng = np.random.RandomState(int(time.time()))

        # ############### Semantic pointer list definitions ###################
        # --- Position (enumerated) semantic pointers ---
        self.pos_sp_strs = ['POS%i' % (i + 1)
                            for i in range(self.max_enum_list_pos)]

        # --- Unitary semantic pointers
        self.unitary_sp_strs = [self.num_sp_strs[0], self.pos_sp_strs[0]]
        self.unitary_sp_strs.extend(self.ops_sp_strs)

        # --- Production system (action) semantic pointers ---
        self.ps_action_learn_sp_strs = ['A%d' % (i + 1) for i in
                                        range(num_learn_actions)]
        self.ps_action_misc_sp_strs = []
        self.ps_action_sp_strs = (self.ps_action_learn_sp_strs +
                                  self.ps_action_misc_sp_strs)
        # #################### Vocabulary definitions #########################
        # --- Primary vocabulary ---
        self.main = Vocabulary(self.sp_dim, unitary=self.unitary_sp_strs,
                               rng=rng)

        # --- Add numerical sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[0], self.num_sp_strs[0]))
        add_sp = self.main[self.ops_sp_strs[0]]
        num_sp = self.main[self.num_sp_strs[0]].copy()
        for i in range(len(self.num_sp_strs) - 1):
            num_sp = num_sp.copy() * add_sp
            self.main.add(self.num_sp_strs[i + 1], num_sp)

        self.add_sp = add_sp

        # --- Add positional sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[1], self.pos_sp_strs[0]))
        inc_sp = self.main[self.ops_sp_strs[1]]
        pos_sp = self.main[self.pos_sp_strs[0]].copy()
        for i in range(len(self.pos_sp_strs) - 1):
            pos_sp = pos_sp.copy() * inc_sp
            self.main.add(self.pos_sp_strs[i + 1], pos_sp)

        self.inc_sp = inc_sp

        # --- Add other visual sp's ---
        self.main.parse('+'.join(self.misc_vis_sp_strs))
        self.main.parse('+'.join(self.ps_task_vis_sp_strs))

        # --- Add production system sp's ---
        self.main.parse('+'.join(self.ps_task_sp_strs))
        self.main.parse('+'.join(self.ps_state_sp_strs))
        self.main.parse('+'.join(self.ps_dec_sp_strs))
        if len(self.ps_action_sp_strs) > 0:
            self.main.parse('+'.join(self.ps_action_sp_strs))
        self.main.parse('+'.join(self.misc_ps_sp_strs))

        # ################# Sub-vocabulary definitions ########################
        self.vis_main = self.main.create_subset(self.vis_sp_strs)

        self.pos = self.main.create_subset(self.pos_sp_strs)

        self.item = self.main.create_subset(self.num_sp_strs)

        self.ps_task = self.main.create_subset(self.ps_task_sp_strs)
        self.ps_state = self.main.create_subset(self.ps_state_sp_strs)
        self.ps_dec = self.main.create_subset(self.ps_dec_sp_strs)
        self.ps_cmp = self.main.create_subset(self.misc_ps_sp_strs)
        self.ps_action = self.main.create_subset(self.ps_action_sp_strs)
        self.ps_action_learn = \
            self.main.create_subset(self.ps_action_learn_sp_strs)

        self.reward = self.main.create_subset(self.reward_sp_strs)

        # ############ Enumerated vocabulary definitions ######################
        # --- Enumerated vocabulary, enumerates all possible combinations of
        #     position and item vectors (for debug purposes)
        self.enum = Vocabulary(self.sp_dim, rng=rng)
        for pos in self.pos_sp_strs:
            for num in self.num_sp_strs:
                sp_str = '%s*%s' % (pos, num)
                self.enum.add(sp_str, self.main.parse(sp_str))

        self.pos1 = Vocabulary(self.sp_dim, rng=rng)
        for num in self.num_sp_strs:
            sp_str = '%s*%s' % (self.pos_sp_strs[0], num)
            self.pos1.add(sp_str, self.main.parse(sp_str))

    def initialize_mtr_vocab(self, mtr_dim, mtr_sps):
        self.mtr_dim = mtr_dim

        self.mtr = Vocabulary(self.mtr_dim)
        for i, sp_str in enumerate(self.num_sp_strs):
            self.mtr.add(sp_str, mtr_sps[i, :])

        self.mtr_unk = Vocabulary(self.mtr_dim)
        self.mtr_unk.add(self.mtr_sp_strs[0], mtr_sps[-1, :])

        self.mtr_disp = self.mtr.create_subset(self.num_sp_strs)
        self.mtr_disp.readonly = False
        # Disable read-only flag for display vocab so that things can be added
        self.mtr_disp.add(self.mtr_sp_strs[0],
                          self.mtr_unk[self.mtr_sp_strs[0]].v)

    def initialize_vis_vocab(self, vis_dim, vis_sps):
        self.vis_dim = vis_dim

        self.vis = Vocabulary(self.vis_dim)
        for i, sp_str in enumerate(self.vis_sp_strs):
            self.vis.add(sp_str, vis_sps[i, :])
Exemplo n.º 54
0
def test_text(rng):
    v = Vocabulary(64, rng=rng)
    x = v.parse("A+B+C")
    y = v.parse("-D-E-F")
    ptr = r"-?[01]\.[0-9]{2}[A-F]"
    assert re.match(";".join([ptr] * 3), v.text(x))
    assert re.match(";".join([ptr] * 2), v.text(x, maximum_count=2))
    assert re.match(ptr, v.text(x, maximum_count=1))
    assert len(v.text(x, maximum_count=10).split(";")) <= 10
    assert re.match(";".join([ptr] * 4), v.text(x, minimum_count=4))
    assert re.match(
        ";".join([ptr.replace("F", "C")] * 3),
        v.text(x, minimum_count=4, terms=["A", "B", "C"]),
    )

    assert re.match(ptr, v.text(y, threshold=0.6))
    assert v.text(y, minimum_count=None, threshold=0.6) == ""

    assert v.text(x, join=",") == v.text(x).replace(";", ",")
    assert re.match(";".join([ptr] * 2), v.text(x, normalize=True))

    assert v.text([0] * 64) == "0.00F"
    assert v.text(v["D"].v) == "1.00D"
Exemplo n.º 55
0
    def initialize(self, num_learn_actions=3, rng=0):
        if rng == 0:
            rng = np.random.RandomState(int(time.time()))

        # ############### Semantic pointer list definitions ###################
        # --- Position (enumerated) semantic pointers ---
        self.pos_sp_strs = ['POS%i' % (i + 1)
                            for i in range(self.max_enum_list_pos)]

        # --- Unitary semantic pointers
        self.unitary_sp_strs = [self.num_sp_strs[0], self.pos_sp_strs[0]]
        self.unitary_sp_strs.extend(self.ops_sp_strs)

        # --- Production system (action) semantic pointers ---
        self.ps_action_learn_sp_strs = ['A%d' % (i + 1) for i in
                                        range(num_learn_actions)]
        self.ps_action_misc_sp_strs = []
        self.ps_action_sp_strs = (self.ps_action_learn_sp_strs +
                                  self.ps_action_misc_sp_strs)
        # #################### Vocabulary definitions #########################
        # --- Primary vocabulary ---
        self.main = Vocabulary(self.sp_dim, unitary=self.unitary_sp_strs,
                               rng=rng)

        # --- Add numerical sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[0], self.num_sp_strs[0]))
        add_sp = self.main[self.ops_sp_strs[0]]
        num_sp = self.main[self.num_sp_strs[0]].copy()
        for i in range(len(self.num_sp_strs) - 1):
            num_sp = num_sp.copy() * add_sp
            self.main.add(self.num_sp_strs[i + 1], num_sp)

        self.add_sp = add_sp

        # --- Add positional sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[1], self.pos_sp_strs[0]))
        inc_sp = self.main[self.ops_sp_strs[1]]
        pos_sp = self.main[self.pos_sp_strs[0]].copy()
        for i in range(len(self.pos_sp_strs) - 1):
            pos_sp = pos_sp.copy() * inc_sp
            self.main.add(self.pos_sp_strs[i + 1], pos_sp)

        self.inc_sp = inc_sp

        # --- Add other visual sp's ---
        self.main.parse('+'.join(self.misc_vis_sp_strs))
        self.main.parse('+'.join(self.ps_task_vis_sp_strs))

        # --- Add production system sp's ---
        self.main.parse('+'.join(self.ps_task_sp_strs))
        self.main.parse('+'.join(self.ps_state_sp_strs))
        self.main.parse('+'.join(self.ps_dec_sp_strs))
        if len(self.ps_action_sp_strs) > 0:
            self.main.parse('+'.join(self.ps_action_sp_strs))
        self.main.parse('+'.join(self.misc_ps_sp_strs))

        # ################# Sub-vocabulary definitions ########################
        self.vis_main = self.main.create_subset(self.vis_sp_strs)

        self.pos = self.main.create_subset(self.pos_sp_strs)

        self.item = self.main.create_subset(self.num_sp_strs)

        self.ps_task = self.main.create_subset(self.ps_task_sp_strs)
        self.ps_state = self.main.create_subset(self.ps_state_sp_strs)
        self.ps_dec = self.main.create_subset(self.ps_dec_sp_strs)
        self.ps_cmp = self.main.create_subset(self.misc_ps_sp_strs)
        self.ps_action = self.main.create_subset(self.ps_action_sp_strs)
        self.ps_action_learn = \
            self.main.create_subset(self.ps_action_learn_sp_strs)

        self.reward = self.main.create_subset(self.reward_sp_strs)

        # ############ Enumerated vocabulary definitions ######################
        # --- Enumerated vocabulary, enumerates all possible combinations of
        #     position and item vectors (for debug purposes)
        self.enum = Vocabulary(self.sp_dim, rng=rng)
        for pos in self.pos_sp_strs:
            for num in self.num_sp_strs:
                sp_str = '%s*%s' % (pos, num)
                self.enum.add(sp_str, self.main.parse(sp_str))

        self.pos1 = Vocabulary(self.sp_dim, rng=rng)
        for num in self.num_sp_strs:
            sp_str = '%s*%s' % (self.pos_sp_strs[0], num)
            self.pos1.add(sp_str, self.main.parse(sp_str))
Exemplo n.º 56
0
def test_capital(rng):
    v = Vocabulary(16, rng=rng)
    with pytest.raises(SpaParseError):
        v.parse("a")
    with pytest.raises(SpaParseError):
        v.parse("A+B+C+a")
Exemplo n.º 57
0
    def initialize(self, stim_SP_labels, num_learn_actions=3, rng=0):
        if rng == 0:
            rng = np.random.RandomState(int(time.time()))

        # ############### Semantic pointer list definitions ###################
        # --- Position (enumerated) semantic pointers ---
        self.pos_sp_strs = ['POS%i' % (i + 1)
                            for i in range(self.max_enum_list_pos)]

        # --- Unitary semantic pointers
        self.unitary_sp_strs = [self.num_sp_strs[0], self.pos_sp_strs[0]]
        self.unitary_sp_strs.extend(self.ops_sp_strs)

        # --- Production system (action) semantic pointers ---
        self.ps_action_learn_sp_strs = ['A%d' % (i + 1) for i in
                                        range(num_learn_actions)]
        self.ps_action_misc_sp_strs = []
        self.ps_action_sp_strs = (self.ps_action_learn_sp_strs +
                                  self.ps_action_misc_sp_strs)

        # #################### Vocabulary definitions #########################
        # --- Primary vocabulary ---
        self.main = Vocabulary(self.sp_dim, unitary=self.unitary_sp_strs,
                               max_similarity=0.2, rng=rng)

        # --- Add in visual sp's ---
        self.main.parse('+'.join(self.misc_vis_sp_strs))
        self.main.parse('+'.join(self.ps_task_vis_sp_strs))
        for sp_str in list(stim_SP_labels):
            if sp_str not in self.num_sp_strs and \
               sp_str not in self.pos_sp_strs:
                self.main.parse(sp_str)

        # --- Add numerical sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[0], self.num_sp_strs[0]))
        add_sp = self.main[self.ops_sp_strs[0]]
        num_sp = self.main[self.num_sp_strs[0]].copy()
        for i in range(len(self.num_sp_strs) - 1):
            num_sp = num_sp.copy() * add_sp
            self.main.add(self.num_sp_strs[i + 1], num_sp)

        self.add_sp = add_sp

        # --- Add positional sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[1], self.pos_sp_strs[0]))
        inc_sp = self.main[self.ops_sp_strs[1]]
        pos_sp = self.main[self.pos_sp_strs[0]].copy()
        for i in range(len(self.pos_sp_strs) - 1):
            pos_sp = pos_sp.copy() * inc_sp
            self.main.add(self.pos_sp_strs[i + 1], pos_sp)

        self.inc_sp = inc_sp

        # --- Add production system sp's ---
        self.main.parse('+'.join(self.ps_task_sp_strs))
        self.main.parse('+'.join(self.ps_state_sp_strs))
        self.main.parse('+'.join(self.ps_dec_sp_strs))
        if len(self.ps_action_sp_strs) > 0:
            self.main.parse('+'.join(self.ps_action_sp_strs))
        self.main.parse('+'.join(self.misc_ps_sp_strs))

        # --- Add instruction processing system sp's ---
        self.main.parse('+'.join(self.instr_tag_strs))

        # ################### Visual Vocabulary definitions ###################
        self.vis_sp_strs = list(stim_SP_labels)

        # Visual sp str vocab check
        if (not all(x in self.vis_sp_strs for x in self.num_sp_strs)):
            raise RuntimeError("Vocabulator - Stimulus vocabulary does not " +
                               "contain necessary Spaun NUM semantic pointer" +
                               " definitions.")
        if (not all(x in self.vis_sp_strs for x in self.misc_vis_sp_strs)):
            raise RuntimeError("Vocabulator - Stimulus vocabulary does not " +
                               "contain necessary Spaun MISC semantic " +
                               "pointer definitions.")
        if (not all(x in self.vis_sp_strs for x in self.ps_task_vis_sp_strs)):
            raise RuntimeError("Vocabulator - Stimulus vocabulary does not " +
                               "contain necessary Spaun PS semantic " +
                               "pointer definitions.")

        # ################# Sub-vocabulary definitions ########################
        self.vis_main = self.main.create_subset(self.vis_sp_strs)

        self.pos = self.main.create_subset(self.pos_sp_strs)

        self.item = self.main.create_subset(self.num_sp_strs)
        self.item_1_index = self.main.create_subset(self.num_sp_strs[1:])

        self.ps_task = self.main.create_subset(self.ps_task_sp_strs)
        self.ps_state = self.main.create_subset(self.ps_state_sp_strs)
        self.ps_dec = self.main.create_subset(self.ps_dec_sp_strs)
        self.ps_cmp = self.main.create_subset(self.misc_ps_sp_strs)
        self.ps_action = self.main.create_subset(self.ps_action_sp_strs)
        self.ps_action_learn = \
            self.main.create_subset(self.ps_action_learn_sp_strs)

        self.reward = self.main.create_subset(self.reward_sp_strs)

        self.instr = self.main.create_subset(self.instr_tag_strs)

        # ############ Enumerated vocabulary definitions ######################
        # --- Enumerated vocabulary, enumerates all possible combinations of
        #     position and item vectors (for debug purposes)
        self.enum = Vocabulary(self.sp_dim, rng=rng)
        for pos in self.pos_sp_strs:
            for num in self.num_sp_strs:
                sp_str = '%s*%s' % (pos, num)
                self.enum.add(sp_str, self.main.parse(sp_str))

        self.pos1 = Vocabulary(self.sp_dim, rng=rng)
        for num in self.num_sp_strs:
            sp_str = '%s*%s' % (self.pos_sp_strs[0], num)
            self.pos1.add(sp_str, self.main.parse(sp_str))

        # ############ Instruction vocabulary definitions #####################
        # --- ANTECEDENT and CONSEQUENCE permutation transforms
        self.perm_ant = np.arange(self.sp_dim)
        self.perm_con = np.arange(self.sp_dim)
        np.random.shuffle(self.perm_ant)
        np.random.shuffle(self.perm_con)

        self.perm_ant_inv = np.argsort(self.perm_ant)
        self.perm_con_inv = np.argsort(self.perm_con)