Ejemplo n.º 1
0
def test_extend(rng):
    v = Vocabulary(16, rng=rng)
    v.parse("A+B")
    assert v.keys == ["A", "B"]
    assert not v.unitary

    # Test extending the vocabulary
    v.extend(["C", "D"])
    assert v.keys == ["A", "B", "C", "D"]

    # Test extending the vocabulary with various unitary options
    v.extend(["E", "F"], unitary=["E"])
    assert v.keys == ["A", "B", "C", "D", "E", "F"]
    assert v.unitary == ["E"]

    # Check if 'E' is unitary
    fft_val = np.fft.fft(v["E"].v)
    fft_imag = fft_val.imag
    fft_real = fft_val.real
    fft_norms = np.sqrt(fft_imag**2 + fft_real**2)
    assert np.allclose(fft_norms, np.ones(16))

    v.extend(["G", "H"], unitary=True)
    assert v.keys == ["A", "B", "C", "D", "E", "F", "G", "H"]
    assert v.unitary == ["E", "G", "H"]
Ejemplo n.º 2
0
def test_am_defaults(Simulator):
    """Default assoc memory.

    Options: auto-associative, threshold = 0.3, non-inhibitable, non-wta,
    does not output utilities or thresholded utilities.
    """

    rng = np.random.RandomState(1)

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    m = nengo.Network('model', seed=123)
    with m:
        am = AssociativeMemory(vocab)
        in_node = nengo.Node(output=vocab.parse("A").v, label='input')
        out_node = nengo.Node(size_in=D, label='output')
        nengo.Connection(in_node, am.input)
        nengo.Connection(am.output, out_node, synapse=0.03)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(out_node)

    sim = Simulator(m)
    sim.run(1.0)

    assert np.allclose(sim.data[in_p][-10:],
                       vocab.parse("A").v,
                       atol=.1,
                       rtol=.01)
    assert np.allclose(sim.data[out_p][-10:],
                       vocab.parse("A").v,
                       atol=.1,
                       rtol=.01)
Ejemplo n.º 3
0
def test_extend(rng):
    v = Vocabulary(16, rng=rng)
    v.parse('A+B')
    assert v.keys == ['A', 'B']
    assert not v.unitary

    # Test extending the vocabulary
    v.extend(['C', 'D'])
    assert v.keys == ['A', 'B', 'C', 'D']

    # Test extending the vocabulary with various unitary options
    v.extend(['E', 'F'], unitary=['E'])
    assert v.keys == ['A', 'B', 'C', 'D', 'E', 'F']
    assert v.unitary == ['E']

    # Check if 'E' is unitary
    fft_val = np.fft.fft(v['E'].v)
    fft_imag = fft_val.imag
    fft_real = fft_val.real
    fft_norms = np.sqrt(fft_imag ** 2 + fft_real ** 2)
    assert np.allclose(fft_norms, np.ones(16))

    v.extend(['G', 'H'], unitary=True)
    assert v.keys == ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
    assert v.unitary == ['E', 'G', 'H']
Ejemplo n.º 4
0
def test_am_spa_interaction(Simulator):
    """Standard associative memory interacting with other SPA modules.

    Options: threshold = 0.5, non-inhibitable, non-wta, does not output
    utilities or thresholded utilities.
    """
    rng = np.random.RandomState(1)

    D = 16
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    D2 = int(D / 2)
    vocab2 = Vocabulary(D2, rng=rng)
    vocab2.parse('A+B+C+D')

    def input_func(t):
        if t < 0.5:
            return '0.49*A'
        else:
            return '0.79*A'

    m = nengo.spa.SPA('model', seed=123)
    with m:
        m.buf = nengo.spa.Buffer(D)
        m.input = nengo.spa.Input(buf=input_func)

        m.am = AssociativeMemory(vocab, vocab2, threshold=0.5)

        cortical_actions = nengo.spa.Actions('am = buf')
        m.c_act = nengo.spa.Cortical(cortical_actions)

    # Check to see if model builds properly. No functionality test needed
    Simulator(m)
Ejemplo n.º 5
0
def test_am_basic(Simulator, plt, seed, rng):
    """Basic associative memory test."""

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab)
        in_node = nengo.Node(output=vocab.parse("A").v, label='input')
        nengo.Connection(in_node, am.input)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)

    sim = Simulator(m)
    sim.run(0.2)
    t = sim.trange()

    plt.subplot(2, 1, 1)
    plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
    plt.ylabel("Input")
    plt.ylim(top=1.1)
    plt.legend(vocab.keys, loc='best')
    plt.subplot(2, 1, 2)
    plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))
    plt.plot(t[t > 0.15], np.ones(t.shape)[t > 0.15] * 0.8, c='g', lw=2)
    plt.ylabel("Output")
    plt.legend(vocab.keys, loc='best')

    assert similarity(sim.data[in_p][t > 0.15], vocab.parse("A").v) > 0.99
    assert similarity(sim.data[out_p][t > 0.15], vocab.parse("A").v) > 0.8
Ejemplo n.º 6
0
def test_am_spa_interaction(Simulator, seed, rng):
    """Make sure associative memory interacts with other SPA modules."""
    D = 16
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    D2 = int(D / 2)
    vocab2 = Vocabulary(D2, rng=rng)
    vocab2.parse('A+B+C+D')

    def input_func(t):
        return '0.49*A' if t < 0.5 else '0.79*A'

    with nengo.spa.SPA(seed=seed) as m:
        m.buf = nengo.spa.Buffer(D)
        m.input = nengo.spa.Input(buf=input_func)

        m.am = AssociativeMemory(vocab, vocab2,
                                 input_keys=['A', 'B', 'C'],
                                 output_keys=['B', 'C', 'D'],
                                 default_output_key='A',
                                 threshold=0.5,
                                 inhibitable=True,
                                 wta_output=True,
                                 threshold_output=True)

        cortical_actions = nengo.spa.Actions('am = buf')
        m.c_act = nengo.spa.Cortical(cortical_actions)

    # Check to see if model builds properly. No functionality test needed
    Simulator(m)
Ejemplo n.º 7
0
def test_am_defaults(Simulator):
    """Default assoc memory.

    Options: auto-associative, threshold = 0.3, non-inhibitable, non-wta,
    does not output utilities or thresholded utilities.
    """

    rng = np.random.RandomState(1)

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    m = nengo.Network('model', seed=123)
    with m:
        am = AssociativeMemory(vocab)
        in_node = nengo.Node(output=vocab.parse("A").v,
                             label='input')
        out_node = nengo.Node(size_in=D, label='output')
        nengo.Connection(in_node, am.input)
        nengo.Connection(am.output, out_node, synapse=0.03)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(out_node)

    sim = Simulator(m)
    sim.run(1.0)

    assert np.allclose(sim.data[in_p][-10:], vocab.parse("A").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[out_p][-10:], vocab.parse("A").v,
                       atol=.1, rtol=.01)
Ejemplo n.º 8
0
def test_am_basic(Simulator, plt, seed, rng):
    """Basic associative memory test."""

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab)
        in_node = nengo.Node(output=vocab.parse("A").v, label='input')
        nengo.Connection(in_node, am.input)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)

    sim = Simulator(m)
    sim.run(0.2)
    t = sim.trange()

    plt.subplot(2, 1, 1)
    plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
    plt.ylabel("Input")
    plt.ylim(top=1.1)
    plt.legend(vocab.keys, loc='best')
    plt.subplot(2, 1, 2)
    plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))
    plt.plot(t[t > 0.15], np.ones(t.shape)[t > 0.15] * 0.8, c='g', lw=2)
    plt.ylabel("Output")
    plt.legend(vocab.keys, loc='best')

    assert similarity(sim.data[in_p][t > 0.15], vocab.parse("A").v) > 0.99
    assert similarity(sim.data[out_p][t > 0.15], vocab.parse("A").v) > 0.8
Ejemplo n.º 9
0
def test_am_spa_interaction(Simulator):
    """Standard associative memory interacting with other SPA modules.

    Options: threshold = 0.5, non-inhibitable, non-wta, does not output
    utilities or thresholded utilities.
    """
    rng = np.random.RandomState(1)

    D = 16
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    D2 = int(D / 2)
    vocab2 = Vocabulary(D2, rng=rng)
    vocab2.parse('A+B+C+D')

    def input_func(t):
        if t < 0.5:
            return '0.49*A'
        else:
            return '0.79*A'

    m = nengo.spa.SPA('model', seed=123)
    with m:
        m.buf = nengo.spa.Buffer(D)
        m.input = nengo.spa.Input(buf=input_func)

        m.am = AssociativeMemory(vocab, vocab2, threshold=0.5)

        cortical_actions = nengo.spa.Actions('am = buf')
        m.c_act = nengo.spa.Cortical(cortical_actions)

    # Check to see if model builds properly. No functionality test needed
    Simulator(m)
Ejemplo n.º 10
0
def test_am_spa_interaction(Simulator, seed, rng):
    """Make sure associative memory interacts with other SPA modules."""
    D = 16
    vocab = Vocabulary(D, rng=rng)
    vocab.parse("A+B+C+D")

    D2 = int(D / 2)
    vocab2 = Vocabulary(D2, rng=rng)
    vocab2.parse("A+B+C+D")

    def input_func(t):
        return "0.49*A" if t < 0.5 else "0.79*A"

    with nengo.spa.SPA(seed=seed) as m:
        m.buf = nengo.spa.Buffer(D)
        m.input = nengo.spa.Input(buf=input_func)

        m.am = AssociativeMemory(
            vocab,
            vocab2,
            input_keys=["A", "B", "C"],
            output_keys=["B", "C", "D"],
            default_output_key="A",
            threshold=0.5,
            inhibitable=True,
            wta_output=True,
            threshold_output=True,
        )

        cortical_actions = nengo.spa.Actions("am = buf")
        m.c_act = nengo.spa.Cortical(cortical_actions)

    # Check to see if model builds properly. No functionality test needed
    with Simulator(m):
        pass
Ejemplo n.º 11
0
def test_subset(rng):
    v1 = Vocabulary(32, rng=rng)
    v1.parse('A+B+C+D+E+F+G')

    # Test creating a vocabulary subset
    v2 = v1.create_subset(['A', 'C', 'E'])
    assert v2.keys == ['A', 'C', 'E']
    assert v2['A'] == v1['A']
    assert v2['C'] == v1['C']
    assert v2['E'] == v1['E']
    assert v2.parent is v1

    # Test creating a subset from a subset (it should create off the parent)
    v3 = v2.create_subset(['C', 'E'])
    assert v3.parent is v2.parent and v2.parent is v1

    v3.include_pairs = True
    assert v3.key_pairs == ['C*E']
    assert not v1.include_pairs
    assert not v2.include_pairs

    # Test transform_to between subsets (should be identity transform)
    t = v1.transform_to(v2)

    assert v2.parse('A').compare(np.dot(t, v1.parse('A').v)) >= 0.99999999
Ejemplo n.º 12
0
def test_subset(rng):
    v1 = Vocabulary(32, rng=rng)
    v1.parse("A+B+C+D+E+F+G")

    # Test creating a vocabulary subset
    v2 = v1.create_subset(["A", "C", "E"])
    assert v2.keys == ["A", "C", "E"]
    assert v2["A"] == v1["A"]
    assert v2["C"] == v1["C"]
    assert v2["E"] == v1["E"]
    assert v2.parent is v1

    # Test creating a subset from a subset (it should create off the parent)
    v3 = v2.create_subset(["C", "E"])
    assert v3.parent is v2.parent and v2.parent is v1

    v3.include_pairs = True
    assert v3.key_pairs == ["C*E"]
    assert not v1.include_pairs
    assert not v2.include_pairs

    # Test transform_to between subsets (should be identity transform)
    t = v1.transform_to(v2)

    assert v2.parse("A").compare(np.dot(t, v1.parse("A").v)) >= 0.99999999
Ejemplo n.º 13
0
def test_extend(rng):
    v = Vocabulary(16, rng=rng)
    v.parse('A+B')
    assert v.keys == ['A', 'B']
    assert not v.unitary

    # Test extending the vocabulary
    v.extend(['C', 'D'])
    assert v.keys == ['A', 'B', 'C', 'D']

    # Test extending the vocabulary with various unitary options
    v.extend(['E', 'F'], unitary=['E'])
    assert v.keys == ['A', 'B', 'C', 'D', 'E', 'F']
    assert v.unitary == ['E']

    # Check if 'E' is unitary
    fft_val = np.fft.fft(v['E'].v)
    fft_imag = fft_val.imag
    fft_real = fft_val.real
    fft_norms = np.sqrt(fft_imag ** 2 + fft_real ** 2)
    assert np.allclose(fft_norms, np.ones(16))

    v.extend(['G', 'H'], unitary=True)
    assert v.keys == ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
    assert v.unitary == ['E', 'G', 'H']
Ejemplo n.º 14
0
def test_readonly(rng):
    v1 = Vocabulary(32, rng=rng)
    v1.parse('A+B+C')

    v1.readonly = True

    with pytest.raises(ValueError):
        v1.parse('D')
Ejemplo n.º 15
0
def test_readonly(rng):
    v1 = Vocabulary(32, rng=rng)
    v1.parse("A+B+C")

    v1.readonly = True

    with pytest.raises(ValueError):
        v1.parse("D")
Ejemplo n.º 16
0
def test_transform(rng):
    v1 = Vocabulary(32, rng=rng)
    v2 = Vocabulary(64, rng=rng)
    A = v1.parse("A")
    B = v1.parse("B")
    C = v1.parse("C")

    # Test transform from v1 to v2 (full vocbulary)
    # Expected: np.dot(t, A.v) ~= v2.parse('A')
    # Expected: np.dot(t, B.v) ~= v2.parse('B')
    # Expected: np.dot(t, C.v) ~= v2.parse('C')
    t = v1.transform_to(v2)

    assert v2.parse("A").compare(np.dot(t, A.v)) > 0.95
    assert v2.parse("C+B").compare(np.dot(t, C.v + B.v)) > 0.9

    # Test transform from v1 to v2 (only 'A' and 'B')
    t = v1.transform_to(v2, keys=["A", "B"])

    assert v2.parse("A").compare(np.dot(t, A.v)) > 0.95
    assert v2.parse("B").compare(np.dot(t, C.v + B.v)) > 0.95

    # Test transform_to when either vocabulary is read-only
    v1.parse("D")
    v2.parse("E")

    # When both are read-only, transform_to shouldn't add any new items to
    # either and the transform should be using keys that are the intersection
    # of both vocabularies
    v1.readonly = True
    v2.readonly = True

    t = v1.transform_to(v2)

    assert v1.keys == ["A", "B", "C", "D"]
    assert v2.keys == ["A", "B", "C", "E"]

    # When one is read-only, transform_to should add any new items to the non
    # read-only vocabulary
    v1.readonly = False
    v2.readonly = True

    t = v1.transform_to(v2)

    assert v1.keys == ["A", "B", "C", "D", "E"]
    assert v2.keys == ["A", "B", "C", "E"]

    # When one is read-only, transform_to should add any new items to the non
    # read-only vocabulary
    v1.readonly = True
    v2.readonly = False

    t = v1.transform_to(v2)

    assert v1.keys == ["A", "B", "C", "D", "E"]
    assert v2.keys == ["A", "B", "C", "E", "D"]
Ejemplo n.º 17
0
def test_transform(rng):
    v1 = Vocabulary(32, rng=rng)
    v2 = Vocabulary(64, rng=rng)
    A = v1.parse('A')
    B = v1.parse('B')
    C = v1.parse('C')

    # Test transform from v1 to v2 (full vocbulary)
    # Expected: np.dot(t, A.v) ~= v2.parse('A')
    # Expected: np.dot(t, B.v) ~= v2.parse('B')
    # Expected: np.dot(t, C.v) ~= v2.parse('C')
    t = v1.transform_to(v2)

    assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
    assert v2.parse('C+B').compare(np.dot(t, C.v + B.v)) > 0.9

    # Test transform from v1 to v2 (only 'A' and 'B')
    t = v1.transform_to(v2, keys=['A', 'B'])

    assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
    assert v2.parse('B').compare(np.dot(t, C.v + B.v)) > 0.95

    # Test transform_to when either vocabulary is read-only
    v1.parse('D')
    v2.parse('E')

    # When both are read-only, transform_to shouldn't add any new items to
    # either and the transform should be using keys that are the intersection
    # of both vocabularies
    v1.readonly = True
    v2.readonly = True

    t = v1.transform_to(v2)

    assert v1.keys == ['A', 'B', 'C', 'D']
    assert v2.keys == ['A', 'B', 'C', 'E']

    # When one is read-only, transform_to should add any new items to the non
    # read-only vocabulary
    v1.readonly = False
    v2.readonly = True

    t = v1.transform_to(v2)

    assert v1.keys == ['A', 'B', 'C', 'D', 'E']
    assert v2.keys == ['A', 'B', 'C', 'E']

    # When one is read-only, transform_to should add any new items to the non
    # read-only vocabulary
    v1.readonly = True
    v2.readonly = False

    t = v1.transform_to(v2)

    assert v1.keys == ['A', 'B', 'C', 'D', 'E']
    assert v2.keys == ['A', 'B', 'C', 'E', 'D']
Ejemplo n.º 18
0
def test_am_spa_keys_as_expressions(Simulator, plt, seed, rng):
    """Provide semantic pointer expressions as input and output keys."""
    D = 64

    vocab_in = Vocabulary(D, rng=rng)
    vocab_out = Vocabulary(D, rng=rng)

    vocab_in.parse("A+B")
    vocab_out.parse("C+D")

    in_keys = ["A", "A*B"]
    out_keys = ["C*D", "C+D"]

    with nengo.spa.SPA(seed=seed) as model:
        model.am = AssociativeMemory(
            input_vocab=vocab_in,
            output_vocab=vocab_out,
            input_keys=in_keys,
            output_keys=out_keys,
        )

        model.inp = Input(am=lambda t: "A" if t < 0.1 else "A*B")

        in_p = nengo.Probe(model.am.input)
        out_p = nengo.Probe(model.am.output, synapse=0.03)

    with Simulator(model) as sim:
        sim.run(0.2)

    # Specify t ranges
    t = sim.trange()
    t_item1 = (t > 0.075) & (t < 0.1)
    t_item2 = (t > 0.175) & (t < 0.2)

    # Modify vocabularies (for plotting purposes)
    vocab_in.add(in_keys[1], vocab_in.parse(in_keys[1]).v)
    vocab_out.add(out_keys[0], vocab_out.parse(out_keys[0]).v)

    plt.subplot(2, 1, 1)
    plt.plot(t, similarity(sim.data[in_p], vocab_in))
    plt.ylabel("Input: " + ", ".join(in_keys))
    plt.legend(vocab_in.keys, loc="best")
    plt.ylim(top=1.1)
    plt.subplot(2, 1, 2)
    plt.plot(t, similarity(sim.data[out_p], vocab_out))
    plt.plot(t[t_item1], np.ones(t.shape)[t_item1] * 0.9, c="r", lw=2)
    plt.plot(t[t_item2], np.ones(t.shape)[t_item2] * 0.91, c="g", lw=2)
    plt.plot(t[t_item2], np.ones(t.shape)[t_item2] * 0.89, c="b", lw=2)
    plt.ylabel("Output: " + ", ".join(out_keys))
    plt.legend(vocab_out.keys, loc="best")

    assert (np.mean(
        similarity(sim.data[out_p][t_item1],
                   vocab_out.parse(out_keys[0]).v,
                   normalize=True)) > 0.9)
    assert (np.mean(
        similarity(sim.data[out_p][t_item2],
                   vocab_out.parse(out_keys[1]).v,
                   normalize=True)) > 0.9)
Ejemplo n.º 19
0
def test_transform(rng):
    v1 = Vocabulary(32, rng=rng)
    v2 = Vocabulary(64, rng=rng)
    A = v1.parse("A")
    B = v1.parse("B")
    C = v1.parse("C")
    t = v1.transform_to(v2)

    assert v2.parse("A").compare(np.dot(t, A.v)) > 0.95
    assert v2.parse("C+B").compare(np.dot(t, C.v + B.v)) > 0.9

    t = v1.transform_to(v2, keys=["A", "B"])

    assert v2.parse("A").compare(np.dot(t, A.v)) > 0.95
    assert v2.parse("B").compare(np.dot(t, B.v)) > 0.95
Ejemplo n.º 20
0
def test_transform(rng):
    v1 = Vocabulary(32, rng=rng)
    v2 = Vocabulary(64, rng=rng)
    A = v1.parse('A')
    B = v1.parse('B')
    C = v1.parse('C')
    t = v1.transform_to(v2)

    assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
    assert v2.parse('C+B').compare(np.dot(t, C.v + B.v)) > 0.9

    t = v1.transform_to(v2, keys=['A', 'B'])

    assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
    assert v2.parse('B').compare(np.dot(t, B.v)) > 0.95
Ejemplo n.º 21
0
def test_transform(rng):
    v1 = Vocabulary(32, rng=rng)
    v2 = Vocabulary(64, rng=rng)
    A = v1.parse('A')
    B = v1.parse('B')
    C = v1.parse('C')
    t = v1.transform_to(v2)

    assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
    assert v2.parse('C+B').compare(np.dot(t, C.v + B.v)) > 0.9

    t = v1.transform_to(v2, keys=['A', 'B'])

    assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
    assert v2.parse('B').compare(np.dot(t, B.v)) > 0.95
Ejemplo n.º 22
0
def test_am_spa_keys_as_expressions(Simulator, plt, seed, rng):
    """Provide semantic pointer expressions as input and output keys."""
    D = 64

    vocab_in = Vocabulary(D, rng=rng)
    vocab_out = Vocabulary(D, rng=rng)

    vocab_in.parse('A+B')
    vocab_out.parse('C+D')

    in_keys = ['A', 'A*B']
    out_keys = ['C*D', 'C+D']

    with nengo.spa.SPA(seed=seed) as model:
        model.am = AssociativeMemory(input_vocab=vocab_in,
                                     output_vocab=vocab_out,
                                     input_keys=in_keys,
                                     output_keys=out_keys)

        model.inp = Input(am=lambda t: 'A' if t < 0.1 else 'A*B')

        in_p = nengo.Probe(model.am.input)
        out_p = nengo.Probe(model.am.output, synapse=0.03)

    with Simulator(model) as sim:
        sim.run(0.2)

    # Specify t ranges
    t = sim.trange()
    t_item1 = (t > 0.075) & (t < 0.1)
    t_item2 = (t > 0.175) & (t < 0.2)

    # Modify vocabularies (for plotting purposes)
    vocab_in.add(in_keys[1], vocab_in.parse(in_keys[1]).v)
    vocab_out.add(out_keys[0], vocab_out.parse(out_keys[0]).v)

    plt.subplot(2, 1, 1)
    plt.plot(t, similarity(sim.data[in_p], vocab_in))
    plt.ylabel("Input: " + ', '.join(in_keys))
    plt.legend(vocab_in.keys, loc='best')
    plt.ylim(top=1.1)
    plt.subplot(2, 1, 2)
    plt.plot(t, similarity(sim.data[out_p], vocab_out))
    plt.plot(t[t_item1], np.ones(t.shape)[t_item1] * 0.9, c='r', lw=2)
    plt.plot(t[t_item2], np.ones(t.shape)[t_item2] * 0.91, c='g', lw=2)
    plt.plot(t[t_item2], np.ones(t.shape)[t_item2] * 0.89, c='b', lw=2)
    plt.ylabel("Output: " + ', '.join(out_keys))
    plt.legend(vocab_out.keys, loc='best')

    assert np.mean(similarity(sim.data[out_p][t_item1],
                              vocab_out.parse(out_keys[0]).v,
                              normalize=True)) > 0.9
    assert np.mean(similarity(sim.data[out_p][t_item2],
                              vocab_out.parse(out_keys[1]).v,
                              normalize=True)) > 0.9
Ejemplo n.º 23
0
def test_am_wta(Simulator, plt, seed, rng):
    """Test the winner-take-all ability of the associative memory."""

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    def input_func(t):
        if t < 0.2:
            return vocab.parse('A+0.8*B').v
        elif t < 0.3:
            return np.zeros(D)
        else:
            return vocab.parse('0.8*A+B').v

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab, wta_output=True)
        in_node = nengo.Node(output=input_func, label='input')
        nengo.Connection(in_node, am.input)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)

    sim = Simulator(m)
    sim.run(0.5)
    t = sim.trange()
    more_a = (t > 0.15) & (t < 0.2)
    more_b = t > 0.45

    plt.subplot(2, 1, 1)
    plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
    plt.ylabel("Input")
    plt.ylim(top=1.1)
    plt.legend(vocab.keys, loc='best')
    plt.subplot(2, 1, 2)
    plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))
    plt.plot(t[more_a], np.ones(t.shape)[more_a] * 0.8, c='g', lw=2)
    plt.plot(t[more_b], np.ones(t.shape)[more_b] * 0.8, c='g', lw=2)
    plt.ylabel("Output")
    plt.legend(vocab.keys, loc='best')

    assert similarity(sim.data[out_p][more_a], vocab.parse("A").v) > 0.8
    assert similarity(sim.data[out_p][more_a], vocab.parse("B").v) < 0.2
    assert similarity(sim.data[out_p][more_b], vocab.parse("B").v) > 0.8
    assert similarity(sim.data[out_p][more_b], vocab.parse("A").v) < 0.2
Ejemplo n.º 24
0
def test_am_wta(Simulator, plt, seed, rng):
    """Test the winner-take-all ability of the associative memory."""

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    def input_func(t):
        if t < 0.2:
            return vocab.parse('A+0.8*B').v
        elif t < 0.3:
            return np.zeros(D)
        else:
            return vocab.parse('0.8*A+B').v

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab, wta_output=True)
        in_node = nengo.Node(output=input_func, label='input')
        nengo.Connection(in_node, am.input)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)

    sim = Simulator(m)
    sim.run(0.5)
    t = sim.trange()
    more_a = (t > 0.15) & (t < 0.2)
    more_b = t > 0.45

    plt.subplot(2, 1, 1)
    plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
    plt.ylabel("Input")
    plt.ylim(top=1.1)
    plt.legend(vocab.keys, loc='best')
    plt.subplot(2, 1, 2)
    plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))
    plt.plot(t[more_a], np.ones(t.shape)[more_a] * 0.8, c='g', lw=2)
    plt.plot(t[more_b], np.ones(t.shape)[more_b] * 0.8, c='g', lw=2)
    plt.ylabel("Output")
    plt.legend(vocab.keys, loc='best')

    assert similarity(sim.data[out_p][more_a], vocab.parse("A").v) > 0.8
    assert similarity(sim.data[out_p][more_a], vocab.parse("B").v) < 0.2
    assert similarity(sim.data[out_p][more_b], vocab.parse("B").v) > 0.8
    assert similarity(sim.data[out_p][more_b], vocab.parse("A").v) < 0.2
Ejemplo n.º 25
0
def test_am_assoc_mem_threshold(Simulator):
    """Standard associative memory (differing input and output vocabularies).

    Options: threshold = 0.5, non-inhibitable, non-wta, does not output
    utilities or thresholded utilities.
    """
    rng = np.random.RandomState(1)

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    D2 = int(D / 2)
    vocab2 = Vocabulary(D2, rng=rng)
    vocab2.parse('A+B+C+D')

    def input_func(t):
        if t < 0.5:
            return vocab.parse('0.49*A').v
        else:
            return vocab.parse('0.79*A').v

    m = nengo.Network('model', seed=123)
    with m:
        am = AssociativeMemory(vocab, vocab2, threshold=0.5)
        in_node = nengo.Node(output=input_func, label='input')
        out_node = nengo.Node(size_in=D2, label='output')
        nengo.Connection(in_node, am.input)
        nengo.Connection(am.output, out_node, synapse=0.03)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(out_node)

    sim = Simulator(m)
    sim.run(1.0)

    assert np.allclose(sim.data[in_p][490:500],
                       vocab.parse("0.49*A").v,
                       atol=.15,
                       rtol=.01)
    assert np.allclose(sim.data[in_p][-10:],
                       vocab.parse("0.79*A").v,
                       atol=.15,
                       rtol=.01)
    assert np.allclose(sim.data[out_p][490:500],
                       vocab2.parse("0").v,
                       atol=.15,
                       rtol=.01)
    assert np.allclose(sim.data[out_p][-10:],
                       vocab2.parse("A").v,
                       atol=.15,
                       rtol=.01)
Ejemplo n.º 26
0
def test_text(rng):
    v = Vocabulary(64, rng=rng)
    x = v.parse("A+B+C")
    y = v.parse("-D-E-F")
    ptr = r"-?[01]\.[0-9]{2}[A-F]"
    assert re.match(";".join([ptr] * 3), v.text(x))
    assert re.match(";".join([ptr] * 2), v.text(x, maximum_count=2))
    assert re.match(ptr, v.text(x, maximum_count=1))
    assert len(v.text(x, maximum_count=10).split(";")) <= 10
    assert re.match(";".join([ptr] * 4), v.text(x, minimum_count=4))
    assert re.match(";".join([ptr.replace("F", "C")] * 3), v.text(x, minimum_count=4, terms=["A", "B", "C"]))

    assert re.match(ptr, v.text(y, threshold=0.6))
    assert v.text(y, minimum_count=None, threshold=0.6) == ""

    assert v.text(x, join=",") == v.text(x).replace(";", ",")
    assert re.match(";".join([ptr] * 2), v.text(x, normalize=True))

    assert v.text([0] * 64) == "0.00F"
    assert v.text(v["D"].v) == "1.00D"
Ejemplo n.º 27
0
def test_text():
    rng = np.random.RandomState(1)
    v = Vocabulary(64, rng=rng)
    x = v.parse('A+B+C')
    y = v.parse('-D-E-F')
    assert v.text(x) == '0.99A;0.96C;0.90B'
    assert v.text(x, maximum_count=2) == '0.99A;0.96C'
    assert v.text(x, maximum_count=1) == '0.99A'
    assert v.text(x, maximum_count=10) == '0.99A;0.96C;0.90B'
    assert v.text(x, minimum_count=4) == '0.99A;0.96C;0.90B;-0.02D'
    assert v.text(y) == '0.50C;0.15B'
    assert v.text(y, threshold=0.6) == '0.50C'
    assert v.text(y, minimum_count=None, threshold=0.6) == ''
    assert (v.text(x, minimum_count=4, terms=['A', 'B', 'C']) ==
            '0.99A;0.96C;0.90B')

    assert v.text(x, join=',') == '0.99A,0.96C,0.90B'
    assert v.text(x, normalize=True) == '0.59A;0.57C;0.53B'

    assert v.text([0]*64) == '0.00F'
    assert v.text(v['D'].v) == '1.00D'
Ejemplo n.º 28
0
def test_text(rng):
    v = Vocabulary(64, rng=rng)
    x = v.parse('A+B+C')
    y = v.parse('-D-E-F')
    ptr = r'-?[01]\.[0-9]{2}[A-F]'
    assert re.match(';'.join([ptr] * 3), v.text(x))
    assert re.match(';'.join([ptr] * 2), v.text(x, maximum_count=2))
    assert re.match(ptr, v.text(x, maximum_count=1))
    assert len(v.text(x, maximum_count=10).split(';')) <= 10
    assert re.match(';'.join([ptr] * 4), v.text(x, minimum_count=4))
    assert re.match(';'.join([ptr.replace('F', 'C')] * 3),
                    v.text(x, minimum_count=4, terms=['A', 'B', 'C']))

    assert re.match(ptr, v.text(y, threshold=0.6))
    assert v.text(y, minimum_count=None, threshold=0.6) == ''

    assert v.text(x, join=',') == v.text(x).replace(';', ',')
    assert re.match(';'.join([ptr] * 2), v.text(x, normalize=True))

    assert v.text([0]*64) == '0.00F'
    assert v.text(v['D'].v) == '1.00D'
Ejemplo n.º 29
0
def test_text(rng):
    v = Vocabulary(64, rng=rng)
    x = v.parse('A+B+C')
    y = v.parse('-D-E-F')
    ptr = r'-?[01]\.[0-9]{2}[A-F]'
    assert re.match(';'.join([ptr] * 3), v.text(x))
    assert re.match(';'.join([ptr] * 2), v.text(x, maximum_count=2))
    assert re.match(ptr, v.text(x, maximum_count=1))
    assert len(v.text(x, maximum_count=10).split(';')) <= 10
    assert re.match(';'.join([ptr] * 4), v.text(x, minimum_count=4))
    assert re.match(';'.join([ptr.replace('F', 'C')] * 3),
                    v.text(x, minimum_count=4, terms=['A', 'B', 'C']))

    assert re.match(ptr, v.text(y, threshold=0.6))
    assert v.text(y, minimum_count=None, threshold=0.6) == ''

    assert v.text(x, join=',') == v.text(x).replace(';', ',')
    assert re.match(';'.join([ptr] * 2), v.text(x, normalize=True))

    assert v.text([0]*64) == '0.00F'
    assert v.text(v['D'].v) == '1.00D'
Ejemplo n.º 30
0
def test_text(rng):
    v = Vocabulary(64, rng=rng)
    x = v.parse("A+B+C")
    y = v.parse("-D-E-F")
    ptr = r"-?[01]\.[0-9]{2}[A-F]"
    assert re.match(";".join([ptr] * 3), v.text(x))
    assert re.match(";".join([ptr] * 2), v.text(x, maximum_count=2))
    assert re.match(ptr, v.text(x, maximum_count=1))
    assert len(v.text(x, maximum_count=10).split(";")) <= 10
    assert re.match(";".join([ptr] * 4), v.text(x, minimum_count=4))
    assert re.match(
        ";".join([ptr.replace("F", "C")] * 3),
        v.text(x, minimum_count=4, terms=["A", "B", "C"]),
    )

    assert re.match(ptr, v.text(y, threshold=0.6))
    assert v.text(y, minimum_count=None, threshold=0.6) == ""

    assert v.text(x, join=",") == v.text(x).replace(";", ",")
    assert re.match(";".join([ptr] * 2), v.text(x, normalize=True))

    assert v.text([0] * 64) == "0.00F"
    assert v.text(v["D"].v) == "1.00D"
Ejemplo n.º 31
0
def test_am_spa_interaction(Simulator, seed, rng):
    """Make sure associative memory interacts with other SPA modules."""
    D = 16
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    D2 = int(D / 2)
    vocab2 = Vocabulary(D2, rng=rng)
    vocab2.parse('A+B+C+D')

    def input_func(t):
        return '0.49*A' if t < 0.5 else '0.79*A'

    with nengo.spa.SPA(seed=seed) as m:
        m.buf = nengo.spa.Buffer(D)
        m.input = nengo.spa.Input(buf=input_func)

        m.am = AssociativeMemory(vocab, vocab2, threshold=0.5)

        cortical_actions = nengo.spa.Actions('am = buf')
        m.c_act = nengo.spa.Cortical(cortical_actions)

    # Check to see if model builds properly. No functionality test needed
    Simulator(m)
Ejemplo n.º 32
0
def test_create_pointer_warning(rng):
    v = Vocabulary(2, rng=rng)

    # five pointers shouldn't fit
    with warns(UserWarning):
        v.parse('A')
        v.parse('B')
        v.parse('C')
        v.parse('D')
        v.parse('E')
Ejemplo n.º 33
0
def test_create_pointer_warning(rng):
    v = Vocabulary(2, rng=rng)

    # five pointers shouldn't fit
    with pytest.warns(UserWarning):
        v.parse('A')
        v.parse('B')
        v.parse('C')
        v.parse('D')
        v.parse('E')
Ejemplo n.º 34
0
def test_create_pointer_warning(rng):
    v = Vocabulary(2, rng=rng)

    # five pointers shouldn't fit
    with pytest.warns(UserWarning):
        v.parse("A")
        v.parse("B")
        v.parse("C")
        v.parse("D")
        v.parse("E")
Ejemplo n.º 35
0
def test_am_assoc_mem_threshold(Simulator):
    """Standard associative memory (differing input and output vocabularies).

    Options: threshold = 0.5, non-inhibitable, non-wta, does not output
    utilities or thresholded utilities.
    """
    rng = np.random.RandomState(1)

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    D2 = int(D / 2)
    vocab2 = Vocabulary(D2, rng=rng)
    vocab2.parse('A+B+C+D')

    def input_func(t):
        if t < 0.5:
            return vocab.parse('0.49*A').v
        else:
            return vocab.parse('0.79*A').v

    m = nengo.Network('model', seed=123)
    with m:
        am = AssociativeMemory(vocab, vocab2, threshold=0.5)
        in_node = nengo.Node(output=input_func, label='input')
        out_node = nengo.Node(size_in=D2, label='output')
        nengo.Connection(in_node, am.input)
        nengo.Connection(am.output, out_node, synapse=0.03)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(out_node)

    sim = Simulator(m)
    sim.run(1.0)

    assert np.allclose(sim.data[in_p][490:500], vocab.parse("0.49*A").v,
                       atol=.15, rtol=.01)
    assert np.allclose(sim.data[in_p][-10:], vocab.parse("0.79*A").v,
                       atol=.15, rtol=.01)
    assert np.allclose(sim.data[out_p][490:500], vocab2.parse("0").v,
                       atol=.15, rtol=.01)
    assert np.allclose(sim.data[out_p][-10:], vocab2.parse("A").v,
                       atol=.15, rtol=.01)
Ejemplo n.º 36
0
def test_transform():
    v1 = Vocabulary(32, rng=np.random.RandomState(7))
    v2 = Vocabulary(64, rng=np.random.RandomState(8))
    A = v1.parse('A')
    B = v1.parse('B')
    C = v1.parse('C')
    t = v1.transform_to(v2)

    assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
    assert v2.parse('C+B').compare(np.dot(t, C.v + B.v)) > 0.95

    t = v1.transform_to(v2, keys=['A', 'B'])

    assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
    assert v2.parse('B').compare(np.dot(t, C.v + B.v)) > 0.95
Ejemplo n.º 37
0
def test_am_threshold(Simulator, plt, seed, rng):
    """Associative memory thresholding with differing input/output vocabs."""
    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    D2 = int(D / 2)
    vocab2 = Vocabulary(D2, rng=rng)
    vocab2.parse('A+B+C+D')

    def input_func(t):
        return vocab.parse('0.49*A').v if t < 0.1 else vocab.parse('0.79*A').v

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab, vocab2, threshold=0.5)
        in_node = nengo.Node(output=input_func, label='input')
        nengo.Connection(in_node, am.input)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)

    sim = Simulator(m)
    sim.run(0.3)
    t = sim.trange()
    below_th = t < 0.1
    above_th = t > 0.25

    plt.subplot(2, 1, 1)
    plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
    plt.ylabel("Input")
    plt.legend(vocab.keys, loc='best')
    plt.subplot(2, 1, 2)
    plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab2))
    plt.plot(t[above_th], np.ones(t.shape)[above_th] * 0.8, c='g', lw=2)
    plt.ylabel("Output")
    plt.legend(vocab.keys, loc='best')

    assert similarity(sim.data[in_p][below_th], vocab.parse("A").v) > 0.48
    assert similarity(sim.data[in_p][above_th], vocab.parse("A").v) > 0.78
    assert similarity(sim.data[out_p][below_th], vocab2.parse("0").v) < 0.01
    assert similarity(sim.data[out_p][above_th], vocab2.parse("A").v) > 0.8
Ejemplo n.º 38
0
def test_am_threshold(Simulator, plt, seed, rng):
    """Associative memory thresholding with differing input/output vocabs."""
    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D')

    D2 = int(D / 2)
    vocab2 = Vocabulary(D2, rng=rng)
    vocab2.parse('A+B+C+D')

    def input_func(t):
        return vocab.parse('0.49*A').v if t < 0.1 else vocab.parse('0.79*A').v

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab, vocab2, threshold=0.5)
        in_node = nengo.Node(output=input_func, label='input')
        nengo.Connection(in_node, am.input)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)

    sim = Simulator(m)
    sim.run(0.3)
    t = sim.trange()
    below_th = t < 0.1
    above_th = t > 0.25

    plt.subplot(2, 1, 1)
    plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
    plt.ylabel("Input")
    plt.legend(vocab.keys, loc='best')
    plt.subplot(2, 1, 2)
    plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab2))
    plt.plot(t[above_th], np.ones(t.shape)[above_th] * 0.8, c='g', lw=2)
    plt.ylabel("Output")
    plt.legend(vocab.keys, loc='best')

    assert similarity(sim.data[in_p][below_th], vocab.parse("A").v) > 0.48
    assert similarity(sim.data[in_p][above_th], vocab.parse("A").v) > 0.78
    assert similarity(sim.data[out_p][below_th], vocab2.parse("0").v) < 0.01
    assert similarity(sim.data[out_p][above_th], vocab2.parse("A").v) > 0.8
Ejemplo n.º 39
0
def test_parse():
    v = Vocabulary(64)
    A = v.parse("A")
    B = v.parse("B")
    C = v.parse("C")
    assert np.allclose((A * B).v, v.parse("A * B").v)
    assert np.allclose((A * ~B).v, v.parse("A * ~B").v)
    assert np.allclose((A + B).v, v.parse("A + B").v)
    assert np.allclose((A - (B * C) * 3 + ~C).v, v.parse("A-(B*C)*3+~C").v)

    assert np.allclose(v.parse("0").v, np.zeros(64))
    assert np.allclose(v.parse("1").v, np.eye(64)[0])
    assert np.allclose(v.parse("1.7").v, np.eye(64)[0] * 1.7)

    with pytest.raises(SyntaxError):
        v.parse("A((")
    with pytest.raises(TypeError):
        v.parse('"hello"')
Ejemplo n.º 40
0
class SpaunVocabulary(object):
    def __init__(self):
        self.main = None

        self.sp_dim = 512
        self.mtr_dim = 50
        self.vis_dim = 200

        # ############ Semantic pointer (strings) definitions #################
        # --- Numerical semantic pointers ---
        self.num_sp_strs = ['ZER', 'ONE', 'TWO', 'THR', 'FOR',
                            'FIV', 'SIX', 'SEV', 'EIG', 'NIN']
        self.n_num_sp = len(self.num_sp_strs)

        # --- Task semantic pointer list ---
        # W - Drawing (Copying visual input)
        # R - Recognition
        # L - Learning (Bandit Task)
        # M - Memory (forward serial recall)
        # C - Counting
        # A - Answering
        # V - Rapid Variable Creation
        # F - Fluid Induction (Ravens)
        # X - Task precursor
        # DEC - Decoding task (output to motor system)
        self.ps_task_sp_strs = ['W', 'R', 'L', 'M', 'C', 'A', 'V', 'F', 'X',
                                'DEC', 'REACT', 'INSTR', 'CMP']
        self.ps_task_vis_sp_strs = ['A', 'C', 'F', 'K', 'L', 'M', 'P', 'R',
                                    'V', 'W']
        # --- Task visual semantic pointer usage ---
        # A - Task initialization
        # F - Forward recall
        # R - Reverse recall
        # K - Q&A 'kind' probe
        # P - Q&A 'position' probe

        # --- Production system semantic pointers ---
        # DECW - Decoding state (output to motor system, but for drawing task)
        # DECI - Decoding state (output to motor system, but for inductn tasks)
        self.ps_state_sp_strs = ['QAP', 'QAK', 'TRANS0', 'TRANS1', 'TRANS2',
                                 'CNT0', 'CNT1', 'LEARN', 'DIRECT', 'INSTRP',
                                 'INSTRV', 'TRANSC']
        self.ps_dec_sp_strs = ['FWD', 'REV', 'CNT', 'DECW', 'DECI', 'NONE']

        # --- Misc actions semantic pointers
        self.ps_action_sp_strs = None
        self.min_num_ps_actions = 3

        # --- Misc visual semantic pointers ---
        self.misc_vis_sp_strs = ['OPEN', 'CLOSE', 'SPACE', 'QM']

        # --- Misc state semantic pointers ---
        self.misc_ps_sp_strs = ['NO_MATCH', 'MATCH']

        # --- 'I don't know' motor response vector
        self.mtr_sp_strs = ['UNK']

        # --- List of all visual semantic pointers ---
        # self.vis_sp_strs = list(self.num_sp_strs)
        # self.vis_sp_strs.extend(self.misc_vis_sp_strs)
        # self.vis_sp_strs.extend(self.ps_task_vis_sp_strs)

        # --- Position (enumerated) semantic pointers ---
        self.pos_sp_strs = None
        self.max_enum_list_pos = 8

        # --- Operations semantic pointers
        self.ops_sp_strs = ['ADD', 'INC']

        # --- Reward semantic pointers
        self.reward_n_sp_str = self.num_sp_strs[0]
        self.reward_y_sp_str = self.num_sp_strs[1]
        self.reward_sp_strs = [self.reward_n_sp_str, self.reward_y_sp_str]

        # --- Instruction processing input and output tags
        self.instr_tag_strs = ['VIS', 'TASK', 'STATE', 'DEC', 'DATA', 'ENABLE']

    def write_header(self):
        logger.write('# Spaun Vocabulary Options:\n')
        logger.write('# -------------------------\n')
        for param_name in sorted(self.__dict__.keys()):
            param_value = getattr(self, param_name)
            if not callable(param_value) and not isinstance(param_value, list)\
               and not isinstance(param_value, Vocabulary) \
               and not isinstance(param_value, SemanticPointer) \
               and not isinstance(param_value, np.ndarray):
                logger.write('# - %s = %s\n' % (param_name, param_value))
        logger.write('\n')

    def initialize(self, stim_SP_labels, num_learn_actions=3, rng=0):
        if rng == 0:
            rng = np.random.RandomState(int(time.time()))

        # ############### Semantic pointer list definitions ###################
        # --- Position (enumerated) semantic pointers ---
        self.pos_sp_strs = ['POS%i' % (i + 1)
                            for i in range(self.max_enum_list_pos)]

        # --- Unitary semantic pointers
        self.unitary_sp_strs = [self.num_sp_strs[0], self.pos_sp_strs[0]]
        self.unitary_sp_strs.extend(self.ops_sp_strs)

        # --- Production system (action) semantic pointers ---
        self.ps_action_learn_sp_strs = ['A%d' % (i + 1) for i in
                                        range(num_learn_actions)]
        self.ps_action_misc_sp_strs = []
        self.ps_action_sp_strs = (self.ps_action_learn_sp_strs +
                                  self.ps_action_misc_sp_strs)

        # #################### Vocabulary definitions #########################
        # --- Primary vocabulary ---
        self.main = Vocabulary(self.sp_dim, unitary=self.unitary_sp_strs,
                               max_similarity=0.2, rng=rng)

        # --- Add in visual sp's ---
        self.main.parse('+'.join(self.misc_vis_sp_strs))
        self.main.parse('+'.join(self.ps_task_vis_sp_strs))
        for sp_str in list(stim_SP_labels):
            if sp_str not in self.num_sp_strs and \
               sp_str not in self.pos_sp_strs:
                self.main.parse(sp_str)

        # --- Add numerical sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[0], self.num_sp_strs[0]))
        add_sp = self.main[self.ops_sp_strs[0]]
        num_sp = self.main[self.num_sp_strs[0]].copy()
        for i in range(len(self.num_sp_strs) - 1):
            num_sp = num_sp.copy() * add_sp
            self.main.add(self.num_sp_strs[i + 1], num_sp)

        self.add_sp = add_sp

        # --- Add positional sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[1], self.pos_sp_strs[0]))
        inc_sp = self.main[self.ops_sp_strs[1]]
        pos_sp = self.main[self.pos_sp_strs[0]].copy()
        for i in range(len(self.pos_sp_strs) - 1):
            pos_sp = pos_sp.copy() * inc_sp
            self.main.add(self.pos_sp_strs[i + 1], pos_sp)

        self.inc_sp = inc_sp

        # --- Add production system sp's ---
        self.main.parse('+'.join(self.ps_task_sp_strs))
        self.main.parse('+'.join(self.ps_state_sp_strs))
        self.main.parse('+'.join(self.ps_dec_sp_strs))
        if len(self.ps_action_sp_strs) > 0:
            self.main.parse('+'.join(self.ps_action_sp_strs))
        self.main.parse('+'.join(self.misc_ps_sp_strs))

        # --- Add instruction processing system sp's ---
        self.main.parse('+'.join(self.instr_tag_strs))

        # ################### Visual Vocabulary definitions ###################
        self.vis_sp_strs = list(stim_SP_labels)

        # Visual sp str vocab check
        if (not all(x in self.vis_sp_strs for x in self.num_sp_strs)):
            raise RuntimeError("Vocabulator - Stimulus vocabulary does not " +
                               "contain necessary Spaun NUM semantic pointer" +
                               " definitions.")
        if (not all(x in self.vis_sp_strs for x in self.misc_vis_sp_strs)):
            raise RuntimeError("Vocabulator - Stimulus vocabulary does not " +
                               "contain necessary Spaun MISC semantic " +
                               "pointer definitions.")
        if (not all(x in self.vis_sp_strs for x in self.ps_task_vis_sp_strs)):
            raise RuntimeError("Vocabulator - Stimulus vocabulary does not " +
                               "contain necessary Spaun PS semantic " +
                               "pointer definitions.")

        # ################# Sub-vocabulary definitions ########################
        self.vis_main = self.main.create_subset(self.vis_sp_strs)

        self.pos = self.main.create_subset(self.pos_sp_strs)

        self.item = self.main.create_subset(self.num_sp_strs)
        self.item_1_index = self.main.create_subset(self.num_sp_strs[1:])

        self.ps_task = self.main.create_subset(self.ps_task_sp_strs)
        self.ps_state = self.main.create_subset(self.ps_state_sp_strs)
        self.ps_dec = self.main.create_subset(self.ps_dec_sp_strs)
        self.ps_cmp = self.main.create_subset(self.misc_ps_sp_strs)
        self.ps_action = self.main.create_subset(self.ps_action_sp_strs)
        self.ps_action_learn = \
            self.main.create_subset(self.ps_action_learn_sp_strs)

        self.reward = self.main.create_subset(self.reward_sp_strs)

        self.instr = self.main.create_subset(self.instr_tag_strs)

        # ############ Enumerated vocabulary definitions ######################
        # --- Enumerated vocabulary, enumerates all possible combinations of
        #     position and item vectors (for debug purposes)
        self.enum = Vocabulary(self.sp_dim, rng=rng)
        for pos in self.pos_sp_strs:
            for num in self.num_sp_strs:
                sp_str = '%s*%s' % (pos, num)
                self.enum.add(sp_str, self.main.parse(sp_str))

        self.pos1 = Vocabulary(self.sp_dim, rng=rng)
        for num in self.num_sp_strs:
            sp_str = '%s*%s' % (self.pos_sp_strs[0], num)
            self.pos1.add(sp_str, self.main.parse(sp_str))

        # ############ Instruction vocabulary definitions #####################
        # --- ANTECEDENT and CONSEQUENCE permutation transforms
        self.perm_ant = np.arange(self.sp_dim)
        self.perm_con = np.arange(self.sp_dim)
        np.random.shuffle(self.perm_ant)
        np.random.shuffle(self.perm_con)

        self.perm_ant_inv = np.argsort(self.perm_ant)
        self.perm_con_inv = np.argsort(self.perm_con)

    def initialize_mtr_vocab(self, mtr_dim, mtr_sps):
        self.mtr_dim = mtr_dim

        self.mtr = Vocabulary(self.mtr_dim)
        for i, sp_str in enumerate(self.num_sp_strs):
            self.mtr.add(sp_str, mtr_sps[i, :])

        self.mtr_unk = Vocabulary(self.mtr_dim)
        self.mtr_unk.add(self.mtr_sp_strs[0], mtr_sps[-1, :])

        self.mtr_disp = self.mtr.create_subset(self.num_sp_strs)
        self.mtr_disp.readonly = False
        # Disable read-only flag for display vocab so that things can be added
        self.mtr_disp.add(self.mtr_sp_strs[0],
                          self.mtr_unk[self.mtr_sp_strs[0]].v)

    def initialize_vis_vocab(self, vis_dim, vis_sps):
        if vis_sps.shape[0] != len(self.vis_sp_strs):
            raise RuntimeError('Vocabulatory.initialize_vis_vocab: ' +
                               'Mismatch in shape of raw vision SPs and ' +
                               'number of vision SP labels.')

        self.vis_dim = vis_dim

        self.vis = Vocabulary(self.vis_dim)
        for i, sp_str in enumerate(self.vis_sp_strs):
            self.vis.add(sp_str, vis_sps[i, :])

    def parse_instr_sps_list(self, instr_sps_list):
        instr_sps_list_sp = self.main.parse('0')

        if len(instr_sps_list) > self.max_enum_list_pos:
            raise ValueError('Vocabulator: Too many sequential ' +
                             'instructions. Max: %d' %
                             self.max_enum_list_pos + ', Got: ' +
                             len(instr_sps_list))

        for i, instr_sps in enumerate(instr_sps_list):
            instr_sps_list_sp += (self.main.parse('POS%i' % (i + 1)) *
                                  self.parse_instr_sps(*instr_sps))
        return instr_sps_list_sp

    def parse_instr_sps(self, ant_sp='0', cons_sp='0'):
        # Note: The ant and con permutations are used here to separate the
        #       possible POS tags in the ant/con from the instruction POS tag.
        #       This permutation is not necessary if the instruction POS tags
        #       differ from the number-representation POS tags.
        return SemanticPointer(self.main.parse(ant_sp).v[self.perm_ant] +
                               self.main.parse(cons_sp).v[self.perm_con])
Ejemplo n.º 41
0
def test_capital():
    v = Vocabulary(16)
    with pytest.raises(KeyError):
        v.parse("a")
    with pytest.raises(KeyError):
        v.parse("A+B+C+a")
Ejemplo n.º 42
0
def test_am_complex(Simulator, plt, seed, rng):
    """Complex auto-associative memory test.

    Has a default output vector, outputs utilities, and becomes inhibited.
    """
    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D+E+F')

    vocab2 = vocab.create_subset(["A", "B", "C", "D"])

    def input_func(t):
        if t < 0.25:
            return vocab.parse('A+0.8*B').v
        elif t < 0.5:
            return vocab.parse('0.8*A+B').v
        else:
            return vocab.parse('E').v

    def inhib_func(t):
        return int(t > 0.75)

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab2,
                               default_output_vector=vocab.parse("F").v,
                               inhibitable=True,
                               output_utilities=True,
                               output_thresholded_utilities=True)
        in_node = nengo.Node(output=input_func, label='input')
        inhib_node = nengo.Node(output=inhib_func, label='inhib')
        nengo.Connection(in_node, am.input)
        nengo.Connection(inhib_node, am.inhibit)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)
        utils_p = nengo.Probe(am.utilities, synapse=0.05)
        utils_th_p = nengo.Probe(am.thresholded_utilities, synapse=0.05)

    sim = Simulator(m)
    sim.run(1.0)
    t = sim.trange()
    # Input: A+0.8B
    more_a = (t >= 0.2) & (t < 0.25)
    # Input: 0.8B+A
    more_b = (t >= 0.45) & (t < 0.5)
    # Input: E (but E isn't in the memory vocabulary, so should output F)
    all_e = (t >= 0.7) & (t < 0.75)
    # Input: E (but inhibited, so should output nothing)
    inhib = (t >= 0.95)

    def plot(i, y, ylabel):
        plt.subplot(4, 1, i)
        plt.plot(t, y)
        plt.axvline(0.25, c='k')
        plt.axvline(0.5, c='k')
        plt.axvline(0.75, c='k')
        plt.ylabel(ylabel)
        plt.legend(vocab.keys[:y.shape[1]], loc='best', fontsize='xx-small')
    plot(1, nengo.spa.similarity(sim.data[in_p], vocab), "Input")
    plot(2, sim.data[utils_p], "Utilities")
    plot(3, sim.data[utils_th_p], "Thresholded utilities")
    plot(4, nengo.spa.similarity(sim.data[out_p], vocab), "Output")

    assert all(np.mean(sim.data[utils_p][more_a], axis=0)[:2] > [0.8, 0.5])
    assert all(np.mean(sim.data[utils_p][more_a], axis=0)[2:] < [0.01, 0.01])
    assert all(np.mean(sim.data[utils_p][more_b], axis=0)[:2] > [0.5, 0.8])
    assert all(np.mean(sim.data[utils_p][more_b], axis=0)[2:] < [0.01, 0.01])
    assert similarity(sim.data[utils_p][all_e], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[utils_p][inhib], np.ones((1, 4))) < 0.05
    assert all(np.mean(sim.data[utils_th_p][more_a], axis=0)[:2] > [0.8, 0.8])
    assert all(
        np.mean(sim.data[utils_th_p][more_a], axis=0)[2:] < [0.01, 0.01])
    assert all(np.mean(sim.data[utils_th_p][more_b], axis=0)[:2] > [0.8, 0.8])
    assert all(
        np.mean(sim.data[utils_th_p][more_b], axis=0)[2:] < [0.01, 0.01])
    assert similarity(sim.data[utils_th_p][all_e], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[utils_th_p][inhib], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[out_p][more_a], vocab.parse("A").v) > 0.8
    assert similarity(sim.data[out_p][more_a], vocab.parse("B").v) > 0.8
    assert similarity(sim.data[out_p][more_b], vocab.parse("A").v) > 0.8
    assert similarity(sim.data[out_p][more_b], vocab.parse("B").v) > 0.8
    assert similarity(sim.data[out_p][all_e], vocab.parse("F").v) > 0.8
    assert similarity(sim.data[out_p][inhib], np.ones((1, D))) < 0.05
Ejemplo n.º 43
0
# show the sum of A and B as represented by the object Sum (left - shows the 
# semantic pointer representation in Sum, right - shows high similarity with 
# vecotors A and B).

# Setup the environment
import nengo
import nengo.spa as spa
from nengo.spa import Vocabulary
import numpy as np

D = 32  # the dimensionality of the vectors

#Creating a vocabulary
rng = np.random.RandomState(0)
vocab = Vocabulary(dimensions=D, rng=rng)
vocab.add('C', vocab.parse('A * B'))

model = spa.SPA(label="structure", vocabs=[vocab])
with model:
    model.A = spa.State(D)
    model.B = spa.State(D)
    model.C = spa.State(D, feedback=1)
    model.Sum = spa.State(D)

    actions = spa.Actions(
        'C = A * B',
        'Sum = A',
        'Sum = B'
        )

    model.cortical = spa.Cortical(actions)
Ejemplo n.º 44
0
def test_capital():
    v = Vocabulary(16)
    with pytest.raises(KeyError):
        v.parse('a')
    with pytest.raises(KeyError):
        v.parse('A+B+C+a')
Ejemplo n.º 45
0
def test_parse(rng):
    v = Vocabulary(64, rng=rng)
    A = v.parse('A')
    B = v.parse('B')
    C = v.parse('C')
    assert np.allclose((A * B).v, v.parse('A * B').v)
    assert np.allclose((A * ~B).v, v.parse('A * ~B').v)
    assert np.allclose((A + B).v, v.parse('A + B').v)
    assert np.allclose((A - (B*C)*3 + ~C).v, v.parse('A-(B*C)*3+~C').v)

    assert np.allclose(v.parse('0').v, np.zeros(64))
    assert np.allclose(v.parse('1').v, np.eye(64)[0])
    assert np.allclose(v.parse('1.7').v, np.eye(64)[0] * 1.7)

    with pytest.raises(SyntaxError):
        v.parse('A((')
    with pytest.raises(SpaParseError):
        v.parse('"hello"')
Ejemplo n.º 46
0
class SpaunVocabulary(object):
    def __init__(self):
        self.main = None

        self.sp_dim = 512
        self.mtr_dim = 50
        self.vis_dim = 200

        # ############ Semantic pointer (strings) definitions #################
        # --- Numerical semantic pointers ---
        self.num_sp_strs = ['ZER', 'ONE', 'TWO', 'THR', 'FOR',
                            'FIV', 'SIX', 'SEV', 'EIG', 'NIN']
        self.n_num_sp = len(self.num_sp_strs)

        # --- Task semantic pointer list ---
        # W - Drawing (Copying visual input)
        # R - Recognition
        # L - Learning (Bandit Task)
        # M - Memory (forward serial recall)
        # C - Counting
        # A - Answering
        # V - Rapid Variable Creation
        # F - Fluid Induction (Ravens)
        # X - Task precursor
        # DEC - Decoding task (output to motor system)
        self.ps_task_sp_strs = ['W', 'R', 'L', 'M', 'C', 'A', 'V', 'F', 'X',
                                'DEC']
        self.ps_task_vis_sp_strs = ['A', 'C', 'F', 'K', 'L', 'M', 'P', 'R',
                                    'V', 'W']
        # --- Task visual semantic pointer usage ---
        # A - Task initialization
        # F - Forward recall
        # R - Reverse recall
        # K - Q&A 'kind' probe
        # P - Q&A 'position' probe

        # --- Production system semantic pointers ---
        # DECW - Decoding state (output to motor system, but for drawing task)
        # DECI - Decoding state (output to motor system, but for inductn tasks)
        self.ps_state_sp_strs = ['QAP', 'QAK', 'TRANS0', 'TRANS1', 'TRANS2',
                                 'CNT0', 'CNT1', 'LEARN']
        self.ps_dec_sp_strs = ['FWD', 'REV', 'CNT', 'DECW', 'DECI', 'NONE']

        # --- Misc actions semantic pointers
        self.ps_action_sp_strs = None
        self.min_num_ps_actions = 3

        # --- Misc visual semantic pointers ---
        self.misc_vis_sp_strs = ['OPEN', 'CLOSE', 'SPACE', 'QM']

        # --- Misc state semantic pointers ---
        self.misc_ps_sp_strs = ['MATCH', 'NO_MATCH']

        # --- 'I don't know' motor response vector
        self.mtr_sp_strs = ['UNK']

        # --- List of all visual semantic pointers ---
        self.vis_sp_strs = list(self.num_sp_strs)
        self.vis_sp_strs.extend(self.misc_vis_sp_strs)
        self.vis_sp_strs.extend(self.ps_task_vis_sp_strs)

        # --- Position (enumerated) semantic pointers ---
        self.pos_sp_strs = None
        self.max_enum_list_pos = 8

        # --- Operations semantic pointers
        self.ops_sp_strs = ['ADD', 'INC']

        # --- Reward semantic pointers
        self.reward_n_sp_str = self.num_sp_strs[0]
        self.reward_y_sp_str = self.num_sp_strs[1]
        self.reward_sp_strs = [self.reward_n_sp_str, self.reward_y_sp_str]

    def write_header(self):
        logger.write('# Spaun Vocabulary Options:\n')
        logger.write('# -------------------------\n')
        for param_name in sorted(self.__dict__.keys()):
            param_value = getattr(self, param_name)
            if not callable(param_value) and not isinstance(param_value, list)\
               and not isinstance(param_value, Vocabulary) \
               and not isinstance(param_value, SemanticPointer) \
               and not isinstance(param_value, np.ndarray):
                logger.write('# - %s = %s\n' % (param_name, param_value))
        logger.write('\n')

    def initialize(self, num_learn_actions=3, rng=0):
        if rng == 0:
            rng = np.random.RandomState(int(time.time()))

        # ############### Semantic pointer list definitions ###################
        # --- Position (enumerated) semantic pointers ---
        self.pos_sp_strs = ['POS%i' % (i + 1)
                            for i in range(self.max_enum_list_pos)]

        # --- Unitary semantic pointers
        self.unitary_sp_strs = [self.num_sp_strs[0], self.pos_sp_strs[0]]
        self.unitary_sp_strs.extend(self.ops_sp_strs)

        # --- Production system (action) semantic pointers ---
        self.ps_action_learn_sp_strs = ['A%d' % (i + 1) for i in
                                        range(num_learn_actions)]
        self.ps_action_misc_sp_strs = []
        self.ps_action_sp_strs = (self.ps_action_learn_sp_strs +
                                  self.ps_action_misc_sp_strs)
        # #################### Vocabulary definitions #########################
        # --- Primary vocabulary ---
        self.main = Vocabulary(self.sp_dim, unitary=self.unitary_sp_strs,
                               rng=rng)

        # --- Add numerical sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[0], self.num_sp_strs[0]))
        add_sp = self.main[self.ops_sp_strs[0]]
        num_sp = self.main[self.num_sp_strs[0]].copy()
        for i in range(len(self.num_sp_strs) - 1):
            num_sp = num_sp.copy() * add_sp
            self.main.add(self.num_sp_strs[i + 1], num_sp)

        self.add_sp = add_sp

        # --- Add positional sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[1], self.pos_sp_strs[0]))
        inc_sp = self.main[self.ops_sp_strs[1]]
        pos_sp = self.main[self.pos_sp_strs[0]].copy()
        for i in range(len(self.pos_sp_strs) - 1):
            pos_sp = pos_sp.copy() * inc_sp
            self.main.add(self.pos_sp_strs[i + 1], pos_sp)

        self.inc_sp = inc_sp

        # --- Add other visual sp's ---
        self.main.parse('+'.join(self.misc_vis_sp_strs))
        self.main.parse('+'.join(self.ps_task_vis_sp_strs))

        # --- Add production system sp's ---
        self.main.parse('+'.join(self.ps_task_sp_strs))
        self.main.parse('+'.join(self.ps_state_sp_strs))
        self.main.parse('+'.join(self.ps_dec_sp_strs))
        if len(self.ps_action_sp_strs) > 0:
            self.main.parse('+'.join(self.ps_action_sp_strs))
        self.main.parse('+'.join(self.misc_ps_sp_strs))

        # ################# Sub-vocabulary definitions ########################
        self.vis_main = self.main.create_subset(self.vis_sp_strs)

        self.pos = self.main.create_subset(self.pos_sp_strs)

        self.item = self.main.create_subset(self.num_sp_strs)

        self.ps_task = self.main.create_subset(self.ps_task_sp_strs)
        self.ps_state = self.main.create_subset(self.ps_state_sp_strs)
        self.ps_dec = self.main.create_subset(self.ps_dec_sp_strs)
        self.ps_cmp = self.main.create_subset(self.misc_ps_sp_strs)
        self.ps_action = self.main.create_subset(self.ps_action_sp_strs)
        self.ps_action_learn = \
            self.main.create_subset(self.ps_action_learn_sp_strs)

        self.reward = self.main.create_subset(self.reward_sp_strs)

        # ############ Enumerated vocabulary definitions ######################
        # --- Enumerated vocabulary, enumerates all possible combinations of
        #     position and item vectors (for debug purposes)
        self.enum = Vocabulary(self.sp_dim, rng=rng)
        for pos in self.pos_sp_strs:
            for num in self.num_sp_strs:
                sp_str = '%s*%s' % (pos, num)
                self.enum.add(sp_str, self.main.parse(sp_str))

        self.pos1 = Vocabulary(self.sp_dim, rng=rng)
        for num in self.num_sp_strs:
            sp_str = '%s*%s' % (self.pos_sp_strs[0], num)
            self.pos1.add(sp_str, self.main.parse(sp_str))

    def initialize_mtr_vocab(self, mtr_dim, mtr_sps):
        self.mtr_dim = mtr_dim

        self.mtr = Vocabulary(self.mtr_dim)
        for i, sp_str in enumerate(self.num_sp_strs):
            self.mtr.add(sp_str, mtr_sps[i, :])

        self.mtr_unk = Vocabulary(self.mtr_dim)
        self.mtr_unk.add(self.mtr_sp_strs[0], mtr_sps[-1, :])

        self.mtr_disp = self.mtr.create_subset(self.num_sp_strs)
        self.mtr_disp.readonly = False
        # Disable read-only flag for display vocab so that things can be added
        self.mtr_disp.add(self.mtr_sp_strs[0],
                          self.mtr_unk[self.mtr_sp_strs[0]].v)

    def initialize_vis_vocab(self, vis_dim, vis_sps):
        self.vis_dim = vis_dim

        self.vis = Vocabulary(self.vis_dim)
        for i, sp_str in enumerate(self.vis_sp_strs):
            self.vis.add(sp_str, vis_sps[i, :])
Ejemplo n.º 47
0
dim = 64
rng = np.random.RandomState(0)
vocab = Vocabulary(dimensions=dim, rng=rng)

mysent = "Boys chase dogs.".upper()


def custom_parser(sent):
    # Assume S V O
    S, V, O = sent[:-1].split()
    return (S, V, O)


for w in custom_parser(mysent):
    w_up = w.upper()
    w_sp = vocab.parse(w_up.upper())
    exec("{} = w_sp".format(w_up))

# BOY = vocab.parse('BOY')
# DOG = vocab.parse('DOG')
# CHASE = vocab.parse('CHASE')
# HUG = vocab.parse('HUG')

AGENT = vocab.parse('AGENT')
VERB = vocab.parse('VERB')
THEME = vocab.parse('THEME')


def conv_expression(parsed):
    S, V, O = parsed
    return "p = VERB * {} + AGENT * {} + THEME * {}".format(V, S, O)
Ejemplo n.º 48
0
    
    #Creating the bind network
    bind = nengo.networks.CircularConvolution(n_neurons=N_conv, dimensions=dim)
    nengo.Connection(A, bind.A)
    nengo.Connection(B, bind.B)
    nengo.Connection(bind.output, D) 
    
    #Creating the Unbind network
    unbind = nengo.networks.CircularConvolution(n_neurons=N_conv, 
                                    dimensions=dim, invert_a=True)
    nengo.Connection(C, unbind.A)
    nengo.Connection(D, unbind.B)
    nengo.Connection(unbind.output, E)

    #Getting semantic pointer values
    CIRCLE=vocab.parse('CIRCLE').v
    BLUE=vocab.parse('BLUE').v
    RED=vocab.parse('RED').v
    SQUARE=vocab.parse('SQUARE').v
    ZERO=[0]*dim
    
    #function for providing color input
    def color_input(t):
        if (t // 0.5) % 2 == 0:
            return RED
        else:
            return BLUE
        
    #function for providing shape input
    def shape_input(t):
        if (t // 0.5) % 2 == 0:
Ejemplo n.º 49
0
pos_sp_strs = ['POS%i' % (i + 1) for i in range(cfg.max_enum_list_pos)]

# --- Operations semantic pointers
ops_sp_strs = ['ADD', 'INC']

# --- Unitary semantic pointers
unitary_sp_strs = [num_sp_strs[0], pos_sp_strs[0]]
unitary_sp_strs.extend(ops_sp_strs)


# ####################### Vocabulary definitions ##############################
# --- Primary vocabulary ---
vocab = Vocabulary(cfg.sp_dim, unitary=unitary_sp_strs, rng=cfg.rng)

# --- Add numerical sp's ---
vocab.parse('%s+%s' % (ops_sp_strs[0], num_sp_strs[0]))
add_sp = vocab[ops_sp_strs[0]]
num_sp = vocab[num_sp_strs[0]].copy()
for i in range(len(num_sp_strs) - 1):
    num_sp = num_sp.copy() * add_sp
    vocab.add(num_sp_strs[i + 1], num_sp)

# --- Add positional sp's ---
vocab.parse('%s+%s' % (ops_sp_strs[1], pos_sp_strs[0]))
inc_sp = vocab[ops_sp_strs[1]]
pos_sp = vocab[pos_sp_strs[0]].copy()
for i in range(len(pos_sp_strs) - 1):
    pos_sp = pos_sp.copy() * inc_sp
    vocab.add(pos_sp_strs[i + 1], pos_sp)

# --- Add other visual sp's ---
Ejemplo n.º 50
0
# Graphs show the colour, shape and cue inputs. The last graph
# shows that the output is most similar to the semantic pointer which was
# initially bound to the given cue. For example, when SQUARE is
# provided as a cue, the output is most similar to BLUE.

import nengo
import nengo.spa as spa
from nengo.spa import Vocabulary
import numpy as np

D = 32  # the dimensionality of the vectors
rng = np.random.RandomState(7)
vocab = Vocabulary(dimensions=D, rng=rng, max_similarity=0.1)

#Adding semantic pointers to the vocabulary
CIRCLE = vocab.parse('CIRCLE')
BLUE = vocab.parse('BLUE')
RED = vocab.parse('RED')
SQUARE = vocab.parse('SQUARE')
ZERO = vocab.add('ZERO', [0] * D)

model = spa.SPA(label="Question Answering with Memory", vocabs=[vocab])
with model:

    model.A = spa.State(D, label="color")
    model.B = spa.State(D, label="shape")
    model.C = spa.State(D, label="cue")
    model.D = spa.State(D, label="bound")
    model.E = spa.State(D, label="output")
    model.memory = spa.State(D, feedback=1, label="memory")
Ejemplo n.º 51
0
def test_am_complex(Simulator, plt, seed, rng):
    """Complex auto-associative memory test.

    Has a default output vector, outputs utilities, and becomes inhibited.
    """
    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D+E+F')

    vocab2 = vocab.create_subset(["A", "B", "C", "D"])

    def input_func(t):
        if t < 0.25:
            return vocab.parse('A+0.8*B').v
        elif t < 0.5:
            return vocab.parse('0.8*A+B').v
        else:
            return vocab.parse('E').v

    def inhib_func(t):
        return int(t > 0.75)

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab2,
                               default_output_vector=vocab.parse("F").v,
                               inhibitable=True,
                               output_utilities=True,
                               output_thresholded_utilities=True)
        in_node = nengo.Node(output=input_func, label='input')
        inhib_node = nengo.Node(output=inhib_func, label='inhib')
        nengo.Connection(in_node, am.input)
        nengo.Connection(inhib_node, am.inhibit)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)
        utils_p = nengo.Probe(am.utilities, synapse=0.05)
        utils_th_p = nengo.Probe(am.thresholded_utilities, synapse=0.05)

    sim = Simulator(m)
    sim.run(1.0)
    t = sim.trange()
    # Input: A+0.8B
    more_a = (t >= 0.2) & (t < 0.25)
    # Input: 0.8B+A
    more_b = (t >= 0.45) & (t < 0.5)
    # Input: E (but E isn't in the memory vocabulary, so should output F)
    all_e = (t >= 0.7) & (t < 0.75)
    # Input: E (but inhibited, so should output nothing)
    inhib = (t >= 0.95)

    def plot(i, y, ylabel):
        plt.subplot(4, 1, i)
        plt.plot(t, y)
        plt.axvline(0.25, c='k')
        plt.axvline(0.5, c='k')
        plt.axvline(0.75, c='k')
        plt.ylabel(ylabel)
        plt.legend(vocab.keys[:y.shape[1]], loc='best', fontsize='xx-small')

    plot(1, nengo.spa.similarity(sim.data[in_p], vocab), "Input")
    plot(2, sim.data[utils_p], "Utilities")
    plot(3, sim.data[utils_th_p], "Thresholded utilities")
    plot(4, nengo.spa.similarity(sim.data[out_p], vocab), "Output")

    assert all(np.mean(sim.data[utils_p][more_a], axis=0)[:2] > [0.8, 0.5])
    assert all(np.mean(sim.data[utils_p][more_a], axis=0)[2:] < [0.01, 0.01])
    assert all(np.mean(sim.data[utils_p][more_b], axis=0)[:2] > [0.5, 0.8])
    assert all(np.mean(sim.data[utils_p][more_b], axis=0)[2:] < [0.01, 0.01])
    assert similarity(sim.data[utils_p][all_e], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[utils_p][inhib], np.ones((1, 4))) < 0.05
    assert all(np.mean(sim.data[utils_th_p][more_a], axis=0)[:2] > [0.8, 0.8])
    assert all(
        np.mean(sim.data[utils_th_p][more_a], axis=0)[2:] < [0.01, 0.01])
    assert all(np.mean(sim.data[utils_th_p][more_b], axis=0)[:2] > [0.8, 0.8])
    assert all(
        np.mean(sim.data[utils_th_p][more_b], axis=0)[2:] < [0.01, 0.01])
    assert similarity(sim.data[utils_th_p][all_e], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[utils_th_p][inhib], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[out_p][more_a], vocab.parse("A").v) > 0.8
    assert similarity(sim.data[out_p][more_a], vocab.parse("B").v) > 0.8
    assert similarity(sim.data[out_p][more_b], vocab.parse("A").v) > 0.8
    assert similarity(sim.data[out_p][more_b], vocab.parse("B").v) > 0.8
    assert similarity(sim.data[out_p][all_e], vocab.parse("F").v) > 0.8
    assert similarity(sim.data[out_p][inhib], np.ones((1, D))) < 0.05
Ejemplo n.º 52
0
def test_am_default_output_inhibit_utilities(Simulator):
    """Auto-associative memory (non-wta) complex test.

    Options: defaults to predefined vector if no match is found,
    threshold = 0.3, inhibitable, non-wta, outputs utilities and thresholded
    utilities.
    """
    rng = np.random.RandomState(1)

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D+E+F')

    vocab2 = vocab.create_subset(["A", "B", "C", "D"])

    def input_func(t):
        if t < 0.25:
            return vocab.parse('A+0.8*B').v
        elif t < 0.5:
            return vocab.parse('0.8*A+B').v
        else:
            return vocab.parse('E').v

    def inhib_func(t):
        return int(t > 0.75)

    m = nengo.Network('model', seed=123)
    with m:
        am = AssociativeMemory(vocab2,
                               default_output_vector=vocab.parse("F").v,
                               inhibitable=True, output_utilities=True,
                               output_thresholded_utilities=True)
        in_node = nengo.Node(output=input_func, label='input')
        inhib_node = nengo.Node(output=inhib_func, label='inhib')
        out_node = nengo.Node(size_in=D, label='output')
        utils_node = nengo.Node(size_in=4, label='utils')
        utils_th_node = nengo.Node(size_in=4, label='utils_th')
        nengo.Connection(in_node, am.input)
        nengo.Connection(inhib_node, am.inhibit)
        nengo.Connection(am.output, out_node, synapse=0.03)
        nengo.Connection(am.utilities, utils_node, synapse=0.05)
        nengo.Connection(am.thresholded_utilities, utils_th_node, synapse=0.05)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(out_node)
        utils_p = nengo.Probe(utils_node)
        utils_th_p = nengo.Probe(utils_th_node)

    sim = Simulator(m)
    sim.run(1.0)

    assert np.allclose(sim.data[in_p][240:250], vocab.parse("A+0.8*B").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[in_p][490:500], vocab.parse("0.8*A+B").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[in_p][-10:], vocab.parse("E").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[out_p][240:250], vocab.parse("A+B").v,
                       atol=.2, rtol=.05)
    assert np.allclose(sim.data[out_p][490:500], vocab.parse("A+B").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[out_p][740:750], vocab.parse("F").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[out_p][-10:], vocab.parse("0").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_p][240:250], [1, 0.75, 0, 0],
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_p][490:500], [0.75, 1, 0, 0],
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_p][740:750], [0, 0, 0, 0],
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_p][-10:], [0, 0, 0, 0],
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_th_p][240:250], [1.05, 1.05, 0, 0],
                       atol=.2, rtol=.05)
    assert np.allclose(sim.data[utils_th_p][490:500], [1.05, 1.05, 0, 0],
                       atol=.1, rtol=.05)
    assert np.allclose(sim.data[utils_th_p][740:750], [0, 0, 0, 0],
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_th_p][-10:], [0, 0, 0, 0],
                       atol=.1, rtol=.01)
Ejemplo n.º 53
0
def test_capital(rng):
    v = Vocabulary(16, rng=rng)
    with pytest.raises(KeyError):
        v.parse('a')
    with pytest.raises(KeyError):
        v.parse('A+B+C+a')
Ejemplo n.º 54
0
    nengo.Connection(channel.output, memory.input)
    
    #Creating the unbind network
    unbind = nengo.networks.CircularConvolution(n_neurons=N_conv, 
                    dimensions=dim, invert_a=True)
    nengo.Connection(visual.output, unbind.A)
    nengo.Connection(memory.output, unbind.B)
    nengo.Connection(unbind.output, motor.input)
    
    #Creating the basal ganglia and the thalamus network
    BG = nengo.networks.BasalGanglia(dimensions=2)  
    thal = nengo.networks.Thalamus(dimensions=2)
    nengo.Connection(BG.output, thal.input, synapse=0.01)
    
    #Defining the transforms for connecting the visual input to the BG
    trans0 = np.matrix(vocab.parse('STATEMENT').v)
    trans1 = np.matrix(vocab.parse('QUESTION').v)
    nengo.Connection(visual.output, BG.input[0], transform=trans0)
    nengo.Connection(visual.output, BG.input[1], transform=trans1) 
    
    #Connecting thalamus output to the two gates gating the channel 
    # and the motor populations
    passthrough = nengo.Ensemble(n_neurons=N, dimensions=2)
    nengo.Connection(thal.output, passthrough)
 
    gate0 = nengo.Ensemble(N, 1, label='Gate0')
    nengo.Connection(passthrough[0], gate0, function=xBiased, synapse=0.01)    
    gate1 = nengo.Ensemble(N, 1, label='Gate1')
    nengo.Connection(passthrough[1], gate1, function=xBiased, synapse=0.01)
    
    for ensemble in channel.ea_ensembles:
Ejemplo n.º 55
0
def test_parse(rng):
    v = Vocabulary(64, rng=rng)
    A = v.parse("A")
    B = v.parse("B")
    C = v.parse("C")
    assert np.allclose((A * B).v, v.parse("A * B").v)
    assert np.allclose((A * ~B).v, v.parse("A * ~B").v)
    assert np.allclose((A + B).v, v.parse("A + B").v)
    assert np.allclose((A - (B * C) * 3 + ~C).v, v.parse("A-(B*C)*3+~C").v)

    assert np.allclose(v.parse("0").v, np.zeros(64))
    assert np.allclose(v.parse("1").v, np.eye(64)[0])
    assert np.allclose(v.parse("1.7").v, np.eye(64)[0] * 1.7)

    with pytest.raises(SyntaxError):
        v.parse("A((")
    with pytest.raises(SpaParseError):
        v.parse('"hello"')
Ejemplo n.º 56
0
pos_sp_strs = ['POS%i' % (i + 1) for i in range(cfg.max_enum_list_pos)]

# --- Operations semantic pointers
ops_sp_strs = ['ADD', 'INC']

# --- Unitary semantic pointers
unitary_sp_strs = [num_sp_strs[0], pos_sp_strs[0]]
unitary_sp_strs.extend(ops_sp_strs)


# ####################### Vocabulary definitions ##############################
# --- Primary vocabulary ---
vocab = Vocabulary(cfg.sp_dim, unitary=unitary_sp_strs, rng=cfg.rng)

# --- Add numerical sp's ---
vocab.parse('%s+%s' % (ops_sp_strs[0], num_sp_strs[0]))
add_sp = vocab[ops_sp_strs[0]]
num_sp = vocab[num_sp_strs[0]].copy()
for i in range(len(num_sp_strs) - 1):
    num_sp = num_sp.copy() * add_sp
    vocab.add(num_sp_strs[i + 1], num_sp)

# --- Add positional sp's ---
vocab.parse('%s+%s' % (ops_sp_strs[1], pos_sp_strs[0]))
inc_sp = vocab[ops_sp_strs[1]]
pos_sp = vocab[pos_sp_strs[0]].copy()
for i in range(len(pos_sp_strs) - 1):
    pos_sp = pos_sp.copy() * inc_sp
    vocab.add(pos_sp_strs[i + 1], pos_sp)

# --- Add other visual sp's ---
Ejemplo n.º 57
0
def test_capital(rng):
    v = Vocabulary(16, rng=rng)
    with pytest.raises(SpaParseError):
        v.parse("a")
    with pytest.raises(SpaParseError):
        v.parse("A+B+C+a")
Ejemplo n.º 58
0
def test_parse(rng):
    v = Vocabulary(64, rng=rng)
    A = v.parse('A')
    B = v.parse('B')
    C = v.parse('C')
    assert np.allclose((A * B).v, v.parse('A * B').v)
    assert np.allclose((A * ~B).v, v.parse('A * ~B').v)
    assert np.allclose((A + B).v, v.parse('A + B').v)
    assert np.allclose((A - (B*C)*3 + ~C).v, v.parse('A-(B*C)*3+~C').v)

    assert np.allclose(v.parse('0').v, np.zeros(64))
    assert np.allclose(v.parse('1').v, np.eye(64)[0])
    assert np.allclose(v.parse('1.7').v, np.eye(64)[0] * 1.7)

    with pytest.raises(SyntaxError):
        v.parse('A((')
    with pytest.raises(TypeError):
        v.parse('"hello"')