Exemple #1
0
def test_add_raises_exception_for_algebra_mismatch():
    v = Vocabulary(4, algebra=HrrAlgebra())
    with pytest.raises(ValidationError,
                       match="different vocabulary or algebra"):
        v.add("V", SemanticPointer(np.ones(4), algebra=VtbAlgebra()))
    v.add("V",
          SemanticPointer(np.ones(4), algebra=VtbAlgebra()).reinterpret(v))
Exemple #2
0
def test_added_algebra_match(rng):
    v = Vocabulary(4, algebra=VtbAlgebra())
    sp = v.create_pointer()
    assert sp.algebra is VtbAlgebra()
    v.add("V", sp)
    assert v["V"].vocab is v
    assert v["V"].algebra is VtbAlgebra()
    assert v["V"].name == "V"
def test_am_spa_keys_as_expressions(Simulator, plt, seed, rng):
    """Provide semantic pointer expressions as input and output keys."""
    d = 64

    vocab_in = Vocabulary(d, pointer_gen=rng)
    vocab_out = Vocabulary(d, pointer_gen=rng)

    vocab_in.populate("A; B")
    vocab_out.populate("C; D")

    in_keys = ["A", "A*B"]
    out_keys = ["C*D", "C+D"]
    mapping = dict(zip(in_keys, out_keys))

    with spa.Network(seed=seed) as m:
        m.am = ThresholdingAssocMem(threshold=0.3,
                                    input_vocab=vocab_in,
                                    output_vocab=vocab_out,
                                    mapping=mapping)

        m.inp = spa.Transcode(lambda t: "A" if t < 0.1 else "A*B",
                              output_vocab=vocab_in)
        m.inp >> m.am

        in_p = nengo.Probe(m.am.input)
        out_p = nengo.Probe(m.am.output, synapse=0.03)

    with nengo.Simulator(m) as sim:
        sim.run(0.2)

    # Specify t ranges
    t = sim.trange()
    t_item1 = (t > 0.075) & (t < 0.1)
    t_item2 = (t > 0.175) & (t < 0.2)

    # Modify vocabularies (for plotting purposes)
    vocab_in.add("AxB", vocab_in.parse(in_keys[1]).v)
    vocab_out.add("CxD", vocab_out.parse(out_keys[0]).v)

    plt.subplot(2, 1, 1)
    plt.plot(t, similarity(sim.data[in_p], vocab_in))
    plt.ylabel("Input: " + ", ".join(in_keys))
    plt.legend(vocab_in.keys(), loc="best")
    plt.ylim(top=1.1)
    plt.subplot(2, 1, 2)
    for t_item, c, k in zip([t_item1, t_item2], ["b", "g"], out_keys):
        plt.plot(
            t,
            similarity(sim.data[out_p], [vocab_out.parse(k).v],
                       normalize=True),
            label=k,
            c=c,
        )
        plt.plot(t[t_item], np.ones(t.shape)[t_item] * 0.9, c=c, lw=2)
    plt.ylabel("Output: " + ", ".join(out_keys))
    plt.legend(loc="best")

    assert (np.mean(
        similarity(sim.data[out_p][t_item1],
                   vocab_out.parse(out_keys[0]).v,
                   normalize=True)) > 0.9)
    assert (np.mean(
        similarity(sim.data[out_p][t_item2],
                   vocab_out.parse(out_keys[1]).v,
                   normalize=True)) > 0.9)
Exemple #4
0
def test_add(rng):
    v = Vocabulary(3, pointer_gen=rng)
    v.add('A', [1, 2, 3])
    v.add('B', [4, 5, 6])
    v.add('C', [7, 8, 9])
    assert np.allclose(v.vectors, [[1, 2, 3], [4, 5, 6], [7, 8, 9]])