コード例 #1
0
ファイル: test_assoc_mem.py プロジェクト: nagyistoce/nengo
def test_am_wta(Simulator, plt, seed, rng):
    """Test the winner-take-all ability of the associative memory."""

    D = 64
    vocab = make_vocab(4, D, rng)

    def input_func(t):
        if t < 0.2:
            return vocab[0, :] + 0.8 * vocab[1, :]
        elif t < 0.3:
            return np.zeros(D)
        else:
            return 0.8 * vocab[0, :] + vocab[1, :]

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab)
        am.add_wta_network()

        in_node = nengo.Node(output=input_func, label='input')
        nengo.Connection(in_node, am.input)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)
        utils_p = nengo.Probe(am.utilities, synapse=0.03)

    sim = Simulator(m)
    sim.run(0.5)
    t = sim.trange()
    more_a = (t > 0.15) & (t < 0.2)
    more_b = t > 0.45

    plt.subplot(2, 1, 1)
    plt.plot(t, np.dot(sim.data[in_p], vocab.T))
    plt.ylabel("Input")
    plt.ylim(top=1.1)
    plt.subplot(2, 1, 2)
    plt.plot(t, np.dot(sim.data[out_p], vocab.T))
    plt.plot(t[more_a], np.ones(t.shape)[more_a] * 0.9, c='g', lw=2)
    plt.plot(t[more_b], np.ones(t.shape)[more_b] * 0.9, c='g', lw=2)
    plt.ylabel("Output")

    assert similarity(sim.data[out_p][more_a], vocab[0, :]) > 0.79
    assert similarity(sim.data[out_p][more_a], vocab[1, :]) < 0.1
    assert similarity(sim.data[out_p][more_b], vocab[1, :]) > 0.79
    assert similarity(sim.data[out_p][more_b], vocab[0, :]) < 0.1
    assert similarity(sim.data[utils_p][more_a],
                      np.array([1, 0, 0, 0])) > 0.95
    assert similarity(sim.data[utils_p][more_a],
                      np.array([0, 1, 1, 1])) < 0.001
    assert similarity(sim.data[utils_p][more_b],
                      np.array([0, 1, 0, 0])) > 0.95
    assert similarity(sim.data[utils_p][more_b],
                      np.array([1, 0, 1, 1])) < 0.001
コード例 #2
0
def test_repeat_config_warning():
    """tests a warning is run on repeat config"""
    with nengo.Network():
        test_am = AssociativeMemory([0])

    test_am.add_threshold_to_outputs()
    with pytest.warns(UserWarning, match="already configured with thresholded outputs"):
        test_am.add_threshold_to_outputs()

    test_am.add_wta_network()
    with pytest.warns(UserWarning, match="already configured with a WTA network"):
        test_am.add_wta_network()
コード例 #3
0
def test_am_wta(Simulator, plt, seed, rng):
    """Test the winner-take-all ability of the associative memory."""

    D = 64
    vocab = make_vocab(4, D, rng)

    def input_func(t):
        if t < 0.2:
            return vocab[0, :] + 0.8 * vocab[1, :]
        elif t < 0.3:
            return np.zeros(D)
        else:
            return 0.8 * vocab[0, :] + vocab[1, :]

    with nengo.Network("model", seed=seed) as m:
        am = AssociativeMemory(vocab)
        am.add_wta_network()

        in_node = nengo.Node(output=input_func, label="input")
        nengo.Connection(in_node, am.input)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)
        utils_p = nengo.Probe(am.utilities, synapse=0.03)

    with Simulator(m) as sim:
        sim.run(0.5)
    t = sim.trange()
    more_a = (t > 0.15) & (t < 0.2)
    more_b = t > 0.45

    plt.subplot(2, 1, 1)
    plt.plot(t, np.dot(sim.data[in_p], vocab.T))
    plt.ylabel("Input")
    plt.ylim(top=1.1)
    plt.subplot(2, 1, 2)
    plt.plot(t, np.dot(sim.data[out_p], vocab.T))
    plt.plot(t[more_a], np.ones(t.shape)[more_a] * 0.9, c="g", lw=2)
    plt.plot(t[more_b], np.ones(t.shape)[more_b] * 0.9, c="g", lw=2)
    plt.ylabel("Output")

    assert similarity(sim.data[out_p][more_a], vocab[0, :]) > 0.79
    assert similarity(sim.data[out_p][more_a], vocab[1, :]) < 0.1
    assert similarity(sim.data[out_p][more_b], vocab[1, :]) > 0.79
    assert similarity(sim.data[out_p][more_b], vocab[0, :]) < 0.1
    assert similarity(sim.data[utils_p][more_a], np.array([1, 0, 0, 0])) > 0.95
    assert similarity(sim.data[utils_p][more_a], np.array([0, 1, 1, 1
                                                           ])) < 0.001
    assert similarity(sim.data[utils_p][more_b], np.array([0, 1, 0, 0])) > 0.95
    assert similarity(sim.data[utils_p][more_b], np.array([1, 0, 1, 1
                                                           ])) < 0.001
コード例 #4
0
def test_associativememory_edge_cases(seed, rng):
    """Tests that edge case code runs without error

    TODO: In the future, these features should be tested in an integration test.
    """
    vocab = make_vocab(4, 64, rng)
    out_vectors = rng.uniform(-1, 1, size=(4, 3))

    with nengo.Network(seed=seed):
        # test that an iterable threshold works
        am = AssociativeMemory(vocab, threshold=[0.1, 0.2, 0.3, 0.4])

        am.add_threshold_to_outputs()

        # test add_output_mapping works when `thresh_ens is not None`
        am.add_output_mapping("test", out_vectors)
        inp, out = am.thresh_ens.output, am.test
        conn = [c for c in am.out_conns if c.pre is inp and c.post is out][0]
        assert np.allclose(conn.transform.init, out_vectors.T)

        # test add_default_output_vector works when `thresh_ens is not None`
        am.add_default_output_vector(np.ones(64))
        assert len(am.default_vector_inhibit_conns) == 1
        conn = am.default_vector_inhibit_conns[0]
        assert conn.pre is am.thresh_ens.output
コード例 #5
0
ファイル: assoc_mem.py プロジェクト: 4n6strider/nengo
    def __init__(self, input_vocab, output_vocab=None,  # noqa: C901
                 input_keys=None, output_keys=None,
                 default_output_key=None, threshold=0.3,
                 inhibitable=False, wta_output=False,
                 wta_inhibit_scale=3.0, wta_synapse=0.005,
                 threshold_output=False, label=None, seed=None,
                 add_to_container=None):
        super(AssociativeMemory, self).__init__(label, seed, add_to_container)

        if input_keys is None:
            input_keys = input_vocab.keys
            input_vectors = input_vocab.vectors
        else:
            input_vectors = input_vocab.create_subset(input_keys).vectors

        # If output vocabulary is not specified, use input vocabulary
        # (i.e autoassociative memory)
        if output_vocab is None:
            output_vocab = input_vocab
            output_vectors = input_vectors
        else:
            if output_keys is None:
                output_keys = input_keys
            output_vectors = output_vocab.create_subset(output_keys).vectors

        if default_output_key is None:
            default_output_vector = None
        else:
            default_output_vector = output_vocab.parse(default_output_key).v

        # Create nengo network
        with self:
            self.am = AssocMem(input_vectors=input_vectors,
                               output_vectors=output_vectors,
                               threshold=threshold,
                               inhibitable=inhibitable,
                               label=label, seed=seed,
                               add_to_container=add_to_container)

            if default_output_vector is not None:
                self.am.add_default_output_vector(default_output_vector)

            if wta_output:
                self.am.add_wta_network(wta_inhibit_scale, wta_synapse)

            if threshold_output:
                self.am.add_threshold_to_outputs()

            self.input = self.am.input
            self.output = self.am.output

            if inhibitable:
                self.inhibit = self.am.inhibit

            self.utilities = self.am.utilities
            if threshold_output:
                self.thresholded_utilities = self.am.thresholded_utilities

        self.inputs = dict(default=(self.input, input_vocab))
        self.outputs = dict(default=(self.output, output_vocab))
コード例 #6
0
ファイル: assoc_mem.py プロジェクト: mahmoodalmansooei/nengo
    def __init__(self, input_vocab, output_vocab=None,  # noqa: C901
                 input_keys=None, output_keys=None,
                 default_output_key=None, threshold=0.3,
                 inhibitable=False, wta_output=False,
                 wta_inhibit_scale=3.0, wta_synapse=0.005,
                 threshold_output=False, label=None, seed=None,
                 add_to_container=None):
        super(AssociativeMemory, self).__init__(label, seed, add_to_container)

        if input_keys is None:
            input_keys = input_vocab.keys
            input_vectors = input_vocab.vectors
        else:
            input_vectors = input_vocab.create_subset(input_keys).vectors

        # If output vocabulary is not specified, use input vocabulary
        # (i.e autoassociative memory)
        if output_vocab is None:
            output_vocab = input_vocab
            output_vectors = input_vectors
        else:
            if output_keys is None:
                output_keys = input_keys
            output_vectors = output_vocab.create_subset(output_keys).vectors

        if default_output_key is None:
            default_output_vector = None
        else:
            default_output_vector = output_vocab.parse(default_output_key).v

        # Create nengo network
        with self:
            self.am = AssocMem(input_vectors=input_vectors,
                               output_vectors=output_vectors,
                               threshold=threshold,
                               inhibitable=inhibitable,
                               label=label, seed=seed,
                               add_to_container=add_to_container)

            if default_output_vector is not None:
                self.am.add_default_output_vector(default_output_vector)

            if wta_output:
                self.am.add_wta_network(wta_inhibit_scale, wta_synapse)

            if threshold_output:
                self.am.add_threshold_to_outputs()

            self.input = self.am.input
            self.output = self.am.output

            if inhibitable:
                self.inhibit = self.am.inhibit

            self.utilities = self.am.utilities
            if threshold_output:
                self.thresholded_utilities = self.am.thresholded_utilities

        self.inputs = dict(default=(self.input, input_vocab))
        self.outputs = dict(default=(self.output, output_vocab))
コード例 #7
0
def test_am_basic(Simulator, plt, seed, rng):
    """Basic associative memory test."""

    D = 64
    vocab = np.array([])

    with pytest.raises(ValueError):
        with nengo.Network():
            am = AssociativeMemory(vocab)

    vocab = make_vocab(4, D, rng)

    with nengo.Network("model", seed=seed) as m:
        am = AssociativeMemory(vocab)
        in_node = nengo.Node(output=vocab[0, :], label="input")
        nengo.Connection(in_node, am.input)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)
        utils_p = nengo.Probe(am.utilities, synapse=0.03)

    with Simulator(m) as sim:
        sim.run(0.2)
    t = sim.trange()

    plt.subplot(3, 1, 1)
    plt.plot(t, np.dot(sim.data[in_p], vocab.T))
    plt.ylabel("Input")
    plt.ylim(top=1.1)
    plt.subplot(3, 1, 2)
    plt.plot(t, np.dot(sim.data[out_p], vocab.T))
    plt.plot(t[t > 0.15], np.ones(t.shape)[t > 0.15] * 0.9, c="g", lw=2)
    plt.ylabel("Output")
    plt.subplot(3, 1, 3)
    plt.plot(t, sim.data[utils_p])
    plt.plot(t[t > 0.15], np.ones(t.shape)[t > 0.15] * 0.9, c="g", lw=2)
    plt.ylabel("Utilities")

    assert similarity(sim.data[in_p][t > 0.15], vocab[0, :]) > 0.99
    assert similarity(sim.data[out_p][t > 0.15], vocab[0, :]) > 0.95
    assert similarity(sim.data[utils_p][t > 0.15], np.array([1, 0, 0, 0
                                                             ])) > 0.95
    assert similarity(sim.data[utils_p][t > 0.15], np.array([0, 1, 1, 1
                                                             ])) < 0.001
コード例 #8
0
def test_associativememory_errors(rng):
    """tests multiple errors in AssociativeMemory"""
    vocab = make_vocab(4, 64, rng)

    with nengo.Network():
        with pytest.raises(
            ValidationError, match="Number of input vectors.*cannot be 0"
        ):
            AssociativeMemory(np.zeros((0, 1)))

        with pytest.raises(
            ValidationError,
            match="Number of input vectors.*does not match number of output vectors",
        ):
            AssociativeMemory(vocab, output_vectors=np.zeros((1, 64)))

        with pytest.raises(
            ValidationError,
            match="Number of threshold values.*does not match number of input vectors",
        ):
            AssociativeMemory(vocab, threshold=[1])
コード例 #9
0
def test_add_input_mapping(rng):
    """tests add_input_mapping edge cases and errors"""
    vocab = make_vocab(4, 64, rng)

    with nengo.Network():
        test_am = AssociativeMemory(vocab)

    input_vectors = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
    test_am.add_input_mapping("test", input_vectors, input_scales=[1, 2, 3, 4])
    assert isinstance(test_am.test, nengo.Node)

    with pytest.raises(ValidationError, match="Name .* already exists as a node"):
        test_am.add_input_mapping("test", input_vectors, input_scales=[1, 2, 3, 4])

    # wrong input scales shape
    with pytest.raises(ValidationError, match="Number of input_scale values"):
        test_am.add_input_mapping("test2", input_vectors, input_scales=[1])
コード例 #10
0
def test_add_output_mapping(rng):
    """tests add_output_mapping edge cases and errors"""
    vocab = make_vocab(4, 64, rng)

    with nengo.Network():
        test_am = AssociativeMemory(vocab)

    output_vectors = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
    test_am.add_output_mapping("test", output_vectors)
    assert isinstance(test_am.test, nengo.Node)

    with pytest.raises(ValidationError, match="Name .* already exists as a node"):
        test_am.add_output_mapping("test", output_vectors)
コード例 #11
0
def test_am_threshold(Simulator, plt, seed, rng):
    """Associative memory thresholding with differing input/output vocabs."""
    D = 64
    vocab = make_vocab(4, D, rng)

    D2 = int(D / 2)
    vocab2 = make_vocab(4, D2, rng)

    def input_func(t):
        return 0.49 * vocab[0, :] if t < 0.1 else 0.8 * vocab[0, :]

    with nengo.Network("model", seed=seed) as m:
        am = AssociativeMemory(vocab, vocab2, threshold=0.5)
        in_node = nengo.Node(output=input_func, label="input")
        nengo.Connection(in_node, am.input)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)
        utils_p = nengo.Probe(am.utilities, synapse=0.03)

    with Simulator(m) as sim:
        sim.run(0.3)
    t = sim.trange()
    below_th = t < 0.1
    above_th = t > 0.25

    plt.subplot(2, 1, 1)
    plt.plot(t, np.dot(sim.data[in_p], vocab.T))
    plt.ylabel("Input")
    plt.subplot(2, 1, 2)
    plt.plot(t, np.dot(sim.data[out_p], vocab2.T))
    plt.plot(t[above_th], np.ones(t.shape)[above_th] * 0.9, c="g", lw=2)
    plt.ylabel("Output")

    assert similarity(sim.data[in_p][below_th], vocab[0, :]) > 0.48
    assert similarity(sim.data[in_p][above_th], vocab[0, :]) > 0.79
    assert np.mean(sim.data[out_p][below_th]) < 0.01
    assert similarity(sim.data[out_p][above_th], vocab2[0, :]) > 0.90
    assert similarity(sim.data[utils_p][above_th], np.array([1, 0, 0, 0
                                                             ])) > 0.95
    assert similarity(sim.data[utils_p][above_th], np.array([0, 1, 1, 1
                                                             ])) < 0.001
コード例 #12
0
    def __init__(
            self,
            input_vocab,
            output_vocab=None,  # noqa: C901
            input_keys=None,
            output_keys=None,
            default_output_key=None,
            threshold=0.3,
            inhibitable=False,
            wta_output=False,
            wta_inhibit_scale=3.0,
            wta_synapse=0.005,
            cleanup_output=False,
            replace_output_with_cleaned_output=True,
            label=None,
            **module_kwargs):
        super(AssociativeMemory, self).__init__(label=label, **module_kwargs)

        if input_keys is None:
            input_keys = input_vocab.keys
            input_vectors = input_vocab.vectors
        else:
            input_vectors = input_vocab.create_subset(input_keys).vectors

        # If output vocabulary is not specified, use input vocabulary
        # (i.e autoassociative memory)
        if output_vocab is None:
            output_vocab = input_vocab
            output_vectors = input_vectors
        else:
            if output_keys is None:
                output_keys = input_keys
            output_vectors = output_vocab.create_subset(output_keys).vectors

        if default_output_key is None:
            default_output_vector = None
        else:
            default_output_vector = output_vocab.parse(default_output_key).v

        # Create nengo network
        with self:
            self.am = AssocMem(input_vectors=input_vectors,
                               output_vectors=output_vectors,
                               threshold=threshold,
                               inhibitable=inhibitable,
                               label=label,
                               **module_kwargs)

            if default_output_vector is not None:
                self.am.add_default_output_vector(default_output_vector)

            if wta_output:
                self.am.add_wta_network(wta_inhibit_scale, wta_synapse)

            if cleanup_output:
                self.am.add_cleanup_output(
                    replace_output=replace_output_with_cleaned_output)

            self.input = self.am.input
            self.output = self.am.output

            if cleanup_output and not replace_output_with_cleaned_output:
                self.cleaned_output = self.am.cleaned_output

            if inhibitable:
                self.inhibit = self.am.inhibit

            self.utilities = self.am.output_utilities
            if cleanup_output:
                self.cleaned_utilities = self.am.cleaned_output_utilities

        self.inputs = dict(default=(self.input, input_vocab))
        self.outputs = dict(default=(self.output, output_vocab))
コード例 #13
0
ファイル: test_assoc_mem.py プロジェクト: nagyistoce/nengo
def test_am_complex(Simulator, plt, seed, rng):
    """Complex auto-associative memory test.

    Has a default output vector, outputs utilities, and becomes inhibited.
    """
    D = 64
    vocab = make_vocab(6, D, rng)
    vocab2 = vocab[:4, :]

    def input_func(t):
        if t < 0.25:
            return vocab[0, :] + 0.31 * vocab[1, :]
        elif t < 0.5:
            return 0.31 * vocab[0, :] + vocab[1, :]
        else:
            return vocab[4, :]

    def inhib_func(t):
        return int(t > 0.75)

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab2, inhibitable=True)
        am.add_default_output_vector(vocab[5, :])
        am.add_threshold_to_outputs()

        in_node = nengo.Node(output=input_func, label='input')
        inhib_node = nengo.Node(output=inhib_func, label='inhib')
        nengo.Connection(in_node, am.input)
        nengo.Connection(inhib_node, am.inhibit)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)
        utils_p = nengo.Probe(am.utilities, synapse=0.05)
        utils_th_p = nengo.Probe(am.thresholded_utilities, synapse=0.05)

    sim = Simulator(m)
    sim.run(1.0)
    t = sim.trange()
    # Input: A+0.8B
    more_a = (t >= 0.2) & (t < 0.25)
    # Input: 0.8B+A
    more_b = (t >= 0.45) & (t < 0.5)
    # Input: E (but E isn't in the memory vocabulary, so should output F)
    all_e = (t >= 0.7) & (t < 0.75)
    # Input: E (but inhibited, so should output nothing)
    inhib = (t >= 0.95)

    def plot(i, y, ylabel):
        plt.subplot(4, 1, i)
        plt.plot(t, y)
        plt.axvline(0.25, c='k')
        plt.axvline(0.5, c='k')
        plt.axvline(0.75, c='k')
        plt.ylabel(ylabel)
    plot(1, np.dot(sim.data[in_p], vocab.T), "Input")
    plot(2, sim.data[utils_p], "Utilities")
    plot(3, sim.data[utils_th_p], "Thresholded utilities")
    plot(4, np.dot(sim.data[out_p], vocab.T), "Output")

    # Check that the output utilities (non-thresholded) are to be expected
    assert all(np.mean(sim.data[utils_p][more_a], axis=0)[:2] > [0.9, 0.35])
    assert all(np.mean(sim.data[utils_p][more_a], axis=0)[2:] < [0.01, 0.01])
    assert all(np.mean(sim.data[utils_p][more_b], axis=0)[:2] > [0.35, 0.9])
    assert all(np.mean(sim.data[utils_p][more_b], axis=0)[2:] < [0.01, 0.01])
    assert similarity(sim.data[utils_p][all_e], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[utils_p][inhib], np.ones((1, 4))) < 0.05

    # Check that the thresholded output utilities are to be expected
    assert all(np.mean(sim.data[utils_th_p][more_a], axis=0)[:2] > [0.9, 0.9])
    assert all(
        np.mean(sim.data[utils_th_p][more_a], axis=0)[2:] < [0.01, 0.01])
    assert all(np.mean(sim.data[utils_th_p][more_b], axis=0)[:2] > [0.9, 0.9])
    assert all(
        np.mean(sim.data[utils_th_p][more_b], axis=0)[2:] < [0.01, 0.01])
    assert similarity(sim.data[utils_th_p][all_e], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[utils_th_p][inhib], np.ones((1, 4))) < 0.05

    # Check that the output values are to be expected
    assert similarity(sim.data[out_p][more_a], vocab[0, :]) > 0.9
    assert similarity(sim.data[out_p][more_a], vocab[1, :]) > 0.9
    assert similarity(sim.data[out_p][more_b], vocab[0, :]) > 0.9
    assert similarity(sim.data[out_p][more_b], vocab[1, :]) > 0.9
    assert similarity(sim.data[out_p][all_e], vocab[5, :]) > 0.9
    assert similarity(sim.data[out_p][inhib], np.ones((1, D))) < 0.05
コード例 #14
0
class NeuralExtractor(Extractor):

    _type = "Neural"

    def __init__(self,
                 index_vectors,
                 stored_vectors,
                 threshold=0.3,
                 neurons_per_item=20,
                 neurons_per_dim=50,
                 timesteps=100,
                 dt=0.001,
                 tau_rc=0.02,
                 tau_ref=0.002,
                 synapse=0.005,
                 output_dir=".",
                 probe_keys=[],
                 plot=False,
                 ocl=[],
                 gpus=[],
                 identical=False,
                 collect_spikes=False):
        """
        index_vectors and stored_vectors are both dictionaries mapping from
        tuples of the form (POS, number), indicating a synset, to numpy
        ndarrays containing the assigned vector
        """

        then = datetime.datetime.now()

        self.ideal_dot = None
        self.second_dot = None

        self.return_vec = True

        self.output_dir = output_dir

        self.index_vectors = index_vectors
        self.stored_vectors = stored_vectors

        self.runtimes_file = open(self.output_dir + '/neural_runtimes', 'a')

        self.dimension = len(self.index_vectors.values()[0])
        self.num_items = len(self.index_vectors)
        self.neurons_per_item = neurons_per_item
        self.neurons_per_dim = neurons_per_dim
        self.dt = dt
        self.timesteps = timesteps
        self.plot = plot
        self.gpus = gpus
        self.ocl = ocl
        self.probe_keys = probe_keys
        self.identical = identical
        self.seed = np.random.randint(npext.maxint)
        self.collect_spikes = collect_spikes

        self.threshold = threshold
        self.threshold_func = lambda x: 1 if x > self.threshold else 0

        # association population parameters
        intercepts_low = 0.29
        intercepts_range = 0.00108
        n_eval_points = 750
        eval_point_mean = 0.39
        eval_point_std = 0.32

        intercepts = Uniform(intercepts_low, intercepts_low + intercepts_range)
        eval_points = np.random.normal(eval_point_mean, eval_point_std,
                                       (n_eval_points, 1))

        self.assoc_params = AssocParams(tau_rc=0.034,
                                        tau_ref=0.0026,
                                        synapse=0.005,
                                        radius=1.0,
                                        eval_points=eval_points,
                                        intercepts=intercepts)

        # other population parameters
        n_eval_points = 750

        # TODO: potentially use SubvectorLength distribution here.
        self.radius = 5.0 / np.sqrt(self.dimension)
        self.synapse = synapse

        self.A_input_vector = np.zeros(self.dimension)
        self.B_input_vector = np.zeros(self.dimension)

        self.setup_simulator()

        now = datetime.datetime.now()
        self.write_to_runtime_file(now - then, "setup")

    def setup_simulator(self):
        self.model = nengo.Network(label="Extractor", seed=self.seed)

        print "Specifiying model"
        # The order is important here
        self.build_unbind(self.model)
        self.build_output(self.model)
        self.build_association(self.model)

        print "Building simulator"
        self.simulator = self.build_simulator(self.model)
        print "Done building simulator"

    def build_unbind(self, model):
        A_input_func = make_func(self, "A_input_vector")
        B_input_func = make_func(self, "B_input_vector")

        neurons_per_dim = self.neurons_per_dim
        radius = self.radius
        synapse = self.synapse
        dimension = self.dimension

        with model:
            A_input = nengo.Node(output=A_input_func, size_out=dimension)
            B_input = nengo.Node(output=B_input_func, size_out=dimension)

            A = EnsembleArray(n_neurons=neurons_per_dim,
                              n_ensembles=dimension,
                              label="A",
                              radius=radius)

            B = EnsembleArray(n_neurons=neurons_per_dim,
                              n_ensembles=dimension,
                              label="B",
                              radius=radius)

            cconv = CircularConvolution(n_neurons=int(2 * neurons_per_dim),
                                        dimensions=dimension,
                                        invert_b=True)

            D = EnsembleArray(n_neurons=neurons_per_dim,
                              n_ensembles=dimension,
                              label="D",
                              radius=radius)

            A_output = A.output
            B_output = B.output
            D_output = D.output
            cconv_output = cconv.output

            nengo.Connection(A_input, A.input)
            nengo.Connection(B_input, B.input)

            nengo.Connection(A_output, cconv.A, synapse=synapse)
            nengo.Connection(B_output, cconv.B, synapse=synapse)
            nengo.Connection(cconv_output, D.input, synapse=synapse)

            assoc_synapse = self.assoc_params.synapse

            self.D_probe = nengo.Probe(D_output,
                                       'output',
                                       synapse=assoc_synapse)

            self.input_probe = nengo.Probe(A_output, 'output', synapse=synapse)

            self.D_output = D_output

            self.A = A
            self.B = B
            self.cconv = cconv
            self.D = D

    def build_association(self, model):

        tau_rc = self.assoc_params.tau_rc
        tau_ref = self.assoc_params.tau_ref
        synapse = self.assoc_params.synapse
        radius = self.assoc_params.radius
        eval_points = self.assoc_params.eval_points
        intercepts = self.assoc_params.intercepts

        neurons_per_item = self.neurons_per_item
        threshold = self.threshold

        assoc_probes = OrderedDict()
        threshold_probes = OrderedDict()
        assoc_spike_probes = OrderedDict()

        with model:
            if self.gpus:

                if not self.identical:
                    raise NotImplementedError(
                        "Currently, can only use gpu if --identical "
                        "is also specified")

                # Add a nengo.Node which calls out to a GPU library for
                # simulating the associative memory
                self.assoc_mem = \
                    AssociativeMemoryGPU(self.gpus, self.index_vectors,
                                         self.stored_vectors,
                                         threshold=threshold,
                                         neurons_per_item=neurons_per_item,
                                         tau_ref=tau_ref, tau_rc=tau_rc,
                                         eval_points=eval_points,
                                         intercepts=intercepts,
                                         radius=radius, do_print=False,
                                         identical=self.identical,
                                         probe_keys=self.probe_keys,
                                         seed=self.seed,
                                         collect_spikes=self.collect_spikes)

                def gpu_function(t, input_vector):
                    output_vector = self.assoc_mem.step(input_vector)
                    return output_vector

                assoc = nengo.Node(output=gpu_function,
                                   size_in=self.dimension,
                                   size_out=self.dimension)

                nengo.Connection(self.D_output, assoc, synapse=synapse)
                nengo.Connection(assoc, self.output.input, synapse=synapse)

                for k in self.probe_keys:
                    node = nengo.Node(output=self.assoc_mem.probe_func(k))
                    probe = nengo.Probe(node, synapse=synapse)

                    threshold_probes[k] = probe

                    node = nengo.Node(output=self.assoc_mem.spike_func(k))
                    assoc_spike_probes[k] = nengo.Probe(node, synapse=None)

            else:
                #print(self.index_vectors)
                #print(self.stored_vectors)
                '''
                index_vocab = Vocabulary(128)
                stored_vocab = Vocabulary(128)
                #index_vocab = Vocabulary(len(self.index_vectors.values()))
                #stored_vocab = Vocabulary(len(self.stored_vectors.values()))

                import string
                chars = list(string.ascii_uppercase)

                import random
                for idx, vector in enumerate(self.index_vectors.values()):
                    generated_key = ''.join([chars[i] for i in random.sample(range(len(chars)), len(chars)-1)])
                    print('index_len: {}'.format(np.shape(vector)))
                    print(vector)
                    index_vocab.add(generated_key, vector)
                for idx, vector in enumerate(self.stored_vectors.values()):
                    generated_key = ''.join([chars[i] for i in random.sample(range(len(chars)), len(chars)-1)])
                    print('stored_len: {}'.format(np.shape(vector)))
                    stored_vocab.add(generated_key, vector)
                '''

                self.assoc_mem = AssociativeMemory(
                    input_vectors=self.index_vectors.values(),
                    output_vectors=self.stored_vectors.values(),
                    threshold=self.threshold,
                    n_neurons=neurons_per_item)
                #neuron_type=nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref),
                #n_neurons_per_ensemble=neurons_per_item)
                self.assoc_mem.add_threshold_to_outputs(neurons_per_item)
                #self.assoc_mem = AssociativeMemory(
                #    input_vocab=index_vocab,
                #    output_vocab=stored_vocab,
                #    threshold=self.threshold,
                #    )
                #neuron_type=nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref),
                #n_neurons_per_ensemble=neurons_per_item)

                nengo.Connection(self.D_output,
                                 self.assoc_mem.input,
                                 synapse=synapse)
                nengo.Connection(self.assoc_mem.output,
                                 self.output.input,
                                 synapse=synapse)

                assoc_ensembles = (self.assoc_mem.thresh_ens.ensembles)
                #self.assoc_mem.thresholded_ens_array.ea_ensembles)

                for ens, k in zip(assoc_ensembles, self.index_vectors):
                    if k in self.probe_keys:
                        assoc_probes[k] = nengo.Probe(ens,
                                                      'decoded_output',
                                                      synapse=synapse)
                        assoc_spike_probes[k] = nengo.Probe(ens.neurons,
                                                            'spikes',
                                                            synapse=None)

        self.assoc_probes = assoc_probes
        self.threshold_probes = threshold_probes
        self.assoc_spike_probes = assoc_spike_probes

    def build_output(self, model):
        with model:

            self.output = EnsembleArray(n_neurons=self.neurons_per_dim,
                                        n_ensembles=self.dimension,
                                        label="output",
                                        radius=self.radius)

            output_output = self.output.output

            self.output_probe = nengo.Probe(output_output,
                                            'output',
                                            synapse=0.02)

    def extract(self, item, query, target_keys=None, *args, **kwargs):
        then = datetime.datetime.now()

        if target_keys:
            self.print_instance_difficulty(item, query, target_keys)

        self.reset()

        self.A_input_vector = item
        self.B_input_vector = query

        self.simulator.run(self.timesteps * self.dt)
        self.data = self.simulator.data

        now = datetime.datetime.now()
        self.write_to_runtime_file(now - then, "unbind")

        if self.plot:
            self.plot_simulation(target_keys)

        vector = self.simulator.data[self.output_probe][-1, :]
        return [vector]

    def reset(self):
        if hasattr(self.assoc_mem, 'reset'):
            self.assoc_mem.reset()

        if hasattr(self.simulator, 'reset'):
            self.simulator.reset()
        else:
            warnings.warn("Non-GPU ensembles could not be reset")

    def build_simulator(self, model):
        print('Neuron Count: {} neurons'.format(model.n_neurons))
        if ocl_imported and self.ocl:
            platforms = pyopencl.get_platforms()

            # 0 is the Nvidia platform
            devices = platforms[0].get_devices()
            devices = [devices[i] for i in self.ocl]
            devices.sort()

            ctx = pyopencl.Context(devices=devices)

            simulator = nengo_ocl.sim_ocl.Simulator(model, context=ctx)
        else:
            if self.ocl:
                print "Failed to import nengo_ocl"

            simulator = nengo.Simulator(model)

        return simulator

    def plot_simulation(self, target_keys):
        then = datetime.datetime.now()

        correct_key = None
        if target_keys:
            correct_key = target_keys[0]

        sim = self.simulator
        t = sim.trange()

        max_val = 5.0 / np.sqrt(self.dimension)

        gs = gridspec.GridSpec(7, 2)
        fig = plt.figure(figsize=(10, 10))

        ax = plt.subplot(gs[0, 0])

        plt.plot(t, self.data[self.D_probe], label='D')
        title = 'Input to associative memory'
        ax.text(.01,
                1.20,
                title,
                horizontalalignment='left',
                transform=ax.transAxes)
        plt.ylim((-max_val, max_val))

        ax = plt.subplot(gs[0, 1])
        plt.plot(t, self.data[self.output_probe], label='Output')
        title = 'Output of associative memory'
        ax.text(.01,
                1.20,
                title,
                horizontalalignment='left',
                transform=ax.transAxes)
        plt.ylim((-max_val, max_val))

        ax = plt.subplot(gs[1:3, :])

        if len(self.index_vectors) < 1000:
            for key, v in self.index_vectors.iteritems():
                input_sims = np.dot(self.data[self.D_probe], v)
                label = str(key[1])
                if key == correct_key:
                    plt.plot(t, input_sims, '--', label=label + '*')
                else:
                    plt.plot(t, input_sims, label=label)

            title = (
                'Dot product between id vectors and input to assoc memory.\n'
                'Target %s is dashed line.' % str(correct_key))

            ax.text(.01,
                    0.80,
                    title,
                    horizontalalignment='left',
                    transform=ax.transAxes)
            # plt.legend(bbox_to_anchor=(-0.03, 0.5), loc='center right')
            if self.ideal_dot:
                ax.text(.01,
                        0.10,
                        "Ideal dot: " + str(self.ideal_dot),
                        horizontalalignment='left',
                        transform=ax.transAxes)
            if self.second_dot:
                ax.text(.99,
                        0.10,
                        "Second dot: " + str(self.second_dot),
                        horizontalalignment='right',
                        transform=ax.transAxes)

            plt.ylim((-1.0, 1.5))
            plt.axhline(1.0, ls=':', c='k')

        ax = plt.subplot(gs[3:5, :])
        for key, p in self.assoc_probes.iteritems():
            if key == correct_key:
                plt.plot(t, self.data[p], '--')
            else:
                plt.plot(t, self.data[p])

        title = ('Decoded values of association populations.\n' +
                 'Target %s is dashed line.' % str(correct_key))

        ax.text(.01,
                0.80,
                title,
                horizontalalignment='left',
                transform=ax.transAxes)
        plt.ylim((-0.2, 1.5))
        plt.axhline(y=1.0, ls=':', c='k')

        ax = plt.subplot(gs[5:7, :])
        before_ls = '--'
        after_ls = '-'
        before_norms = [np.linalg.norm(v) for v in self.data[self.D_probe]]
        after_norms = [np.linalg.norm(v) for v in self.data[self.output_probe]]

        plt.plot(t, before_norms, before_ls, c='g', label='Norm - Before')
        plt.plot(t, after_norms, after_ls, c='g', label='Norm - After')

        if correct_key is not None:
            correct_index_hrr = HRR(data=self.index_vectors[correct_key])
            correct_stored_hrr = HRR(data=self.stored_vectors[correct_key])

            before_sims = [
                correct_index_hrr.compare(HRR(data=i))
                for i in self.data[self.D_probe]
            ]

            after_sims = [
                correct_stored_hrr.compare(HRR(data=o))
                for o in self.data[self.output_probe]
            ]

            plt.plot(t,
                     before_sims,
                     before_ls,
                     c='b',
                     label='Cosine Sim - Before')
            plt.plot(t,
                     after_sims,
                     after_ls,
                     c='b',
                     label='Cosine Sim - After')

        title = 'Before and After Associative Memory'
        ax.text(.01,
                0.90,
                title,
                horizontalalignment='left',
                transform=ax.transAxes)
        plt.ylim((-1.0, 1.5))
        plt.legend(loc=4, prop={'size': 6})
        plt.axhline(y=1.0, ls=':', c='k')
        ax.set_xlabel('Time (s)')

        date_time_string = str(datetime.datetime.now()).split('.')[0]
        date_time_string = reduce(lambda y, z: string.replace(y, z, "_"),
                                  [date_time_string, ":", ".", " ", "-"])

        plot_name = 'neural_extraction_' + date_time_string + ".png"
        plot_path = os.path.join(self.output_dir, plot_name)

        plt.savefig(plot_path)

        symlink_name = os.path.join(self.output_dir,
                                    'latest_neural_extraction')
        make_sym_link(plot_name, symlink_name)

        now = datetime.datetime.now()
        self.write_to_runtime_file(now - then, "plot")

        plt.close(fig)

    def write_to_runtime_file(self, delta, label=''):
        to_print = [
            self.dimension, self.num_items, self.neurons_per_item,
            self.neurons_per_dim, self.timesteps, "OCL: " + str(self.ocl),
            "GPUS: " + str(self.gpus), delta
        ]
        print >> self.runtimes_file, label, \
            ": " ",".join([str(tp) for tp in to_print])

    def print_config(self, output_file):
        super(NeuralExtractor, self).print_config(output_file)

        output_file.write("Neural extractor config:\n")

        output_file.write("Neurons per item: " + str(self.neurons_per_item) +
                          "\n")
        output_file.write("Neurons per dimension: " +
                          str(self.neurons_per_dim) + "\n")

        output_file.write("Assoc params tau_rc: " +
                          str(self.assoc_params.tau_rc) + "\n")
        output_file.write("Assoc params tau_ref: " +
                          str(self.assoc_params.tau_ref) + "\n")
        output_file.write("Assoc params synapse: " +
                          str(self.assoc_params.synapse) + "\n")
        output_file.write("Assoc params radius: " +
                          str(self.assoc_params.radius) + "\n")
        output_file.write("Assoc params intercepts: " +
                          str(self.assoc_params.intercepts) + "\n")

        output_file.write("radius:" + str(self.radius) + "\n")
        output_file.write("synapse:" + str(self.synapse) + "\n")

        output_file.write("dimension:" + str(self.dimension) + "\n")
        output_file.write("num_items:" + str(self.num_items) + "\n")
        output_file.write("neurons_per_item:" + str(self.neurons_per_item) +
                          "\n")
        output_file.write("neurons_per_dim:" + str(self.neurons_per_dim) +
                          "\n")
        output_file.write("dt:" + str(self.dt) + "\n")
        output_file.write("timesteps:" + str(self.timesteps) + "\n")
        output_file.write("plot:" + str(self.plot) + "\n")
        output_file.write("gpus:" + str(self.gpus) + "\n")
        output_file.write("ocl:" + str(self.ocl) + "\n")
        output_file.write("probe_keys:" + str(self.probe_keys) + "\n")
        output_file.write("identical:" + str(self.identical) + "\n")
        output_file.write("seed:" + str(self.seed) + "\n")

        output_file.write("threshold:" + str(self.threshold) + "\n")
        output_file.write("threshold_func:" + str(self.threshold_func) + "\n")
コード例 #15
0
ファイル: assoc_mem.py プロジェクト: 4n6strider/nengo
class AssociativeMemory(Module):
    """Associative memory module.

    See :doc:`examples/associative_memory` for an introduction and examples.

    Parameters
    ----------
    input_vocab: list or Vocabulary
        The vocabulary (or list of vectors) to match.
    output_vocab: list or Vocabulary, optional (Default: None)
        The vocabulary (or list of vectors) to be produced for each match. If
        None, the associative memory will act like an autoassociative memory
        (cleanup memory).
    input_keys : list, optional (Default: None)
        A list of strings that correspond to the input vectors.
    output_keys : list, optional (Default: None)
        A list of strings that correspond to the output vectors.
    default_output_key: str, optional (Default: None)
        The semantic pointer string to be produced if the input value matches
        none of vectors in the input vector list.
    threshold: float, optional (Default: 0.3)
        The association activation threshold.
    inhibitable: bool, optional (Default: False)
        Flag to indicate if the entire associative memory module is
        inhibitable (i.e., the entire module can be inhibited).
    wta_output: bool, optional (Default: False)
        Flag to indicate if output of the associative memory should contain
        more than one vector. If True, only one vector's output will be
        produced; i.e. produce a winner-take-all (WTA) output.
        If False, combinations of vectors will be produced.
    wta_inhibit_scale: float, optional (Default: 3.0)
        Scaling factor on the winner-take-all (WTA) inhibitory connections.
    wta_synapse: float, optional (Default: 0.005)
        Synapse to use for the winner-take-all (wta) inhibitory connections.
    threshold_output: bool, optional (Default: False)
        Adds a threholded output if True.
    label : str, optional (Default: None)
        A name for the ensemble. Used for debugging and visualization.
    seed : int, optional (Default: None)
        The seed used for random number generation.
    add_to_container : bool, optional (Default: None)
        Determines if this Network will be added to the current container.
        If None, will be true if currently within a Network.
    """

    def __init__(self, input_vocab, output_vocab=None,  # noqa: C901
                 input_keys=None, output_keys=None,
                 default_output_key=None, threshold=0.3,
                 inhibitable=False, wta_output=False,
                 wta_inhibit_scale=3.0, wta_synapse=0.005,
                 threshold_output=False, label=None, seed=None,
                 add_to_container=None):
        super(AssociativeMemory, self).__init__(label, seed, add_to_container)

        if input_keys is None:
            input_keys = input_vocab.keys
            input_vectors = input_vocab.vectors
        else:
            input_vectors = input_vocab.create_subset(input_keys).vectors

        # If output vocabulary is not specified, use input vocabulary
        # (i.e autoassociative memory)
        if output_vocab is None:
            output_vocab = input_vocab
            output_vectors = input_vectors
        else:
            if output_keys is None:
                output_keys = input_keys
            output_vectors = output_vocab.create_subset(output_keys).vectors

        if default_output_key is None:
            default_output_vector = None
        else:
            default_output_vector = output_vocab.parse(default_output_key).v

        # Create nengo network
        with self:
            self.am = AssocMem(input_vectors=input_vectors,
                               output_vectors=output_vectors,
                               threshold=threshold,
                               inhibitable=inhibitable,
                               label=label, seed=seed,
                               add_to_container=add_to_container)

            if default_output_vector is not None:
                self.am.add_default_output_vector(default_output_vector)

            if wta_output:
                self.am.add_wta_network(wta_inhibit_scale, wta_synapse)

            if threshold_output:
                self.am.add_threshold_to_outputs()

            self.input = self.am.input
            self.output = self.am.output

            if inhibitable:
                self.inhibit = self.am.inhibit

            self.utilities = self.am.utilities
            if threshold_output:
                self.thresholded_utilities = self.am.thresholded_utilities

        self.inputs = dict(default=(self.input, input_vocab))
        self.outputs = dict(default=(self.output, output_vocab))
コード例 #16
0
ファイル: assoc_mem.py プロジェクト: mahmoodalmansooei/nengo
class AssociativeMemory(Module):
    """Associative memory module.

    Parameters
    ----------
    input_vocab: list of numpy.array, spa.Vocabulary
        The vocabulary (or list of vectors) to match.
    output_vocab: list of numpy.array, spa.Vocabulary, optional
        The vocabulary (or list of vectors) to be produced for each match. If
        not given, the associative memory will act like an auto-associative
        memory (cleanup memory).
    default_output_vector: numpy.array, spa.SemanticPointer, optional
        The vector to be produced if the input value matches none of vectors
        in the input vector list.
    threshold: float, optional
        The association activation threshold.
    input_scale: float, optional
        Scaling factor to apply on the input vectors.

    inhibitable: boolean, optional
        Flag to indicate if the entire associative memory module is
        inhibitable (entire thing can be shut off).
    inhibit_scale: float, optional
        Scaling factor on the gating connections (must have inhibitable =
        True). Setting a larger value will ensure that the cleanup memory
        output is inhibited at a faster rate, however, recovery of the
        network when inhibition is released will be slower.

    wta_output: boolean, optional
        Flag to indicate if output of the associative memory should contain
        more than one vectors. Set to True if only one vectors output is
        desired -- i.e. a winner-take-all (wta) output. Leave as default
        (False) if (possible) combinations of vectors is desired.
    wta_inhibit_scale: float, optional
        Scaling factor on the winner-take-all (wta) inhibitory connections.
    wta_synapse: float, optional
        Synapse to use for the winner-take-all (wta) inhibitory connections.

    output_utilities: boolean, optional
        Flag to indicate if the direct utilities (in addition to the vectors)
        are output as well.
    output_thresholded_utilities: boolean, optional
        Flag to indicate if the direct thresholded utilities (in addition to
        the vectors) are output as well.

    neuron_type: nengo.Neurons, optional
        Neuron type to use in the associative memory. Defaults to
    n_neurons_per_ensemble: int, optional
        Number of neurons per ensemble in the associative memory. There is
        one ensemble created per vector being compared.

    """

    def __init__(self, input_vocab, output_vocab=None,  # noqa: C901
                 input_keys=None, output_keys=None,
                 default_output_key=None, threshold=0.3,
                 inhibitable=False, wta_output=False,
                 wta_inhibit_scale=3.0, wta_synapse=0.005,
                 threshold_output=False, label=None, seed=None,
                 add_to_container=None):
        super(AssociativeMemory, self).__init__(label, seed, add_to_container)

        if input_keys is None:
            input_keys = input_vocab.keys
            input_vectors = input_vocab.vectors
        else:
            input_vectors = input_vocab.create_subset(input_keys).vectors

        # If output vocabulary is not specified, use input vocabulary
        # (i.e autoassociative memory)
        if output_vocab is None:
            output_vocab = input_vocab
            output_vectors = input_vectors
        else:
            if output_keys is None:
                output_keys = input_keys
            output_vectors = output_vocab.create_subset(output_keys).vectors

        if default_output_key is None:
            default_output_vector = None
        else:
            default_output_vector = output_vocab.parse(default_output_key).v

        # Create nengo network
        with self:
            self.am = AssocMem(input_vectors=input_vectors,
                               output_vectors=output_vectors,
                               threshold=threshold,
                               inhibitable=inhibitable,
                               label=label, seed=seed,
                               add_to_container=add_to_container)

            if default_output_vector is not None:
                self.am.add_default_output_vector(default_output_vector)

            if wta_output:
                self.am.add_wta_network(wta_inhibit_scale, wta_synapse)

            if threshold_output:
                self.am.add_threshold_to_outputs()

            self.input = self.am.input
            self.output = self.am.output

            if inhibitable:
                self.inhibit = self.am.inhibit

            self.utilities = self.am.utilities
            if threshold_output:
                self.thresholded_utilities = self.am.thresholded_utilities

        self.inputs = dict(default=(self.input, input_vocab))
        self.outputs = dict(default=(self.output, output_vocab))
コード例 #17
0
    def build_association(self, model):

        tau_rc = self.assoc_params.tau_rc
        tau_ref = self.assoc_params.tau_ref
        synapse = self.assoc_params.synapse
        radius = self.assoc_params.radius
        eval_points = self.assoc_params.eval_points
        intercepts = self.assoc_params.intercepts

        neurons_per_item = self.neurons_per_item
        threshold = self.threshold

        assoc_probes = OrderedDict()
        threshold_probes = OrderedDict()
        assoc_spike_probes = OrderedDict()

        with model:
            if self.gpus:

                if not self.identical:
                    raise NotImplementedError(
                        "Currently, can only use gpu if --identical "
                        "is also specified")

                # Add a nengo.Node which calls out to a GPU library for
                # simulating the associative memory
                self.assoc_mem = \
                    AssociativeMemoryGPU(self.gpus, self.index_vectors,
                                         self.stored_vectors,
                                         threshold=threshold,
                                         neurons_per_item=neurons_per_item,
                                         tau_ref=tau_ref, tau_rc=tau_rc,
                                         eval_points=eval_points,
                                         intercepts=intercepts,
                                         radius=radius, do_print=False,
                                         identical=self.identical,
                                         probe_keys=self.probe_keys,
                                         seed=self.seed,
                                         collect_spikes=self.collect_spikes)

                def gpu_function(t, input_vector):
                    output_vector = self.assoc_mem.step(input_vector)
                    return output_vector

                assoc = nengo.Node(output=gpu_function,
                                   size_in=self.dimension,
                                   size_out=self.dimension)

                nengo.Connection(self.D_output, assoc, synapse=synapse)
                nengo.Connection(assoc, self.output.input, synapse=synapse)

                for k in self.probe_keys:
                    node = nengo.Node(output=self.assoc_mem.probe_func(k))
                    probe = nengo.Probe(node, synapse=synapse)

                    threshold_probes[k] = probe

                    node = nengo.Node(output=self.assoc_mem.spike_func(k))
                    assoc_spike_probes[k] = nengo.Probe(node, synapse=None)

            else:
                #print(self.index_vectors)
                #print(self.stored_vectors)
                '''
                index_vocab = Vocabulary(128)
                stored_vocab = Vocabulary(128)
                #index_vocab = Vocabulary(len(self.index_vectors.values()))
                #stored_vocab = Vocabulary(len(self.stored_vectors.values()))

                import string
                chars = list(string.ascii_uppercase)

                import random
                for idx, vector in enumerate(self.index_vectors.values()):
                    generated_key = ''.join([chars[i] for i in random.sample(range(len(chars)), len(chars)-1)])
                    print('index_len: {}'.format(np.shape(vector)))
                    print(vector)
                    index_vocab.add(generated_key, vector)
                for idx, vector in enumerate(self.stored_vectors.values()):
                    generated_key = ''.join([chars[i] for i in random.sample(range(len(chars)), len(chars)-1)])
                    print('stored_len: {}'.format(np.shape(vector)))
                    stored_vocab.add(generated_key, vector)
                '''

                self.assoc_mem = AssociativeMemory(
                    input_vectors=self.index_vectors.values(),
                    output_vectors=self.stored_vectors.values(),
                    threshold=self.threshold,
                    n_neurons=neurons_per_item)
                #neuron_type=nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref),
                #n_neurons_per_ensemble=neurons_per_item)
                self.assoc_mem.add_threshold_to_outputs(neurons_per_item)
                #self.assoc_mem = AssociativeMemory(
                #    input_vocab=index_vocab,
                #    output_vocab=stored_vocab,
                #    threshold=self.threshold,
                #    )
                #neuron_type=nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref),
                #n_neurons_per_ensemble=neurons_per_item)

                nengo.Connection(self.D_output,
                                 self.assoc_mem.input,
                                 synapse=synapse)
                nengo.Connection(self.assoc_mem.output,
                                 self.output.input,
                                 synapse=synapse)

                assoc_ensembles = (self.assoc_mem.thresh_ens.ensembles)
                #self.assoc_mem.thresholded_ens_array.ea_ensembles)

                for ens, k in zip(assoc_ensembles, self.index_vectors):
                    if k in self.probe_keys:
                        assoc_probes[k] = nengo.Probe(ens,
                                                      'decoded_output',
                                                      synapse=synapse)
                        assoc_spike_probes[k] = nengo.Probe(ens.neurons,
                                                            'spikes',
                                                            synapse=None)

        self.assoc_probes = assoc_probes
        self.threshold_probes = threshold_probes
        self.assoc_spike_probes = assoc_spike_probes
コード例 #18
0
ファイル: assoc_mem.py プロジェクト: Stanford-BIS/spaun2.0
class AssociativeMemory(Module):
    """Associative memory module.

    Parameters
    ----------
    input_vocab: list of numpy.array, spa.Vocabulary
        The vocabulary (or list of vectors) to match.
    output_vocab: list of numpy.array, spa.Vocabulary, optional
        The vocabulary (or list of vectors) to be produced for each match. If
        not given, the associative memory will act like an auto-associative
        memory (cleanup memory).

    input_keys: list of strings, optional
        List of keys (ordered) from the input vocabulary to use as the input
        semantic pointers for the associative memory.
    output_keys: list of strings, optional
        List of keys (ordered) from the output vocabulary to use as the output
        semantic pointers for the associative memory.

    default_output_vector: numpy.array, spa.SemanticPointer, optional
        The vector to be produced if the input value matches none of vectors
        in the input vector list.
    threshold: float, optional
        The association activation threshold.

    inhibitable: boolean, optional
        Flag to indicate if the entire associative memory module is
        inhibitable (entire thing can be shut off).

    wta_output: boolean, optional
        Flag to indicate if output of the associative memory should contain
        more than one vectors. Set to True if only one vectors output is
        desired -- i.e. a winner-take-all (wta) output. Leave as default
        (False) if (possible) combinations of vectors is desired.
    wta_inhibit_scale: float, optional
        Scaling factor on the winner-take-all (wta) inhibitory connections.
    wta_synapse: float, optional
        Synapse to use for the winner-take-all (wta) inhibitory connections.

    cleanup_output: boolean, optional
        Create the associative memory with cleaned outputs as well as the
        standard outputs.
    replace_output_with_cleaned_output: boolean, optional
        Set to true to use the cleaned outputs as the default output of the
        associative memory module.

    label : str, optional
        A name to assign this AssociativeMemory. Used for visualization and
        debugging. Also used as a label prefix for each of the internal
        ensembles in the AssociativeMemory network.

    Additional network parameters are passed to the Network constructor through
    **module_kwargs
    """

    def __init__(self, input_vocab, output_vocab=None,  # noqa: C901
                 input_keys=None, output_keys=None,
                 default_output_key=None, threshold=0.3,
                 inhibitable=False, wta_output=False,
                 wta_inhibit_scale=3.0, wta_synapse=0.005,
                 cleanup_output=False,
                 replace_output_with_cleaned_output=True,
                 label=None, **module_kwargs):
        super(AssociativeMemory, self).__init__(label=label, **module_kwargs)

        if input_keys is None:
            input_keys = input_vocab.keys
            input_vectors = input_vocab.vectors
        else:
            input_vectors = input_vocab.create_subset(input_keys).vectors

        # If output vocabulary is not specified, use input vocabulary
        # (i.e autoassociative memory)
        if output_vocab is None:
            output_vocab = input_vocab
            output_vectors = input_vectors
        else:
            if output_keys is None:
                output_keys = input_keys
            output_vectors = output_vocab.create_subset(output_keys).vectors

        if default_output_key is None:
            default_output_vector = None
        else:
            default_output_vector = output_vocab.parse(default_output_key).v

        # Create nengo network
        with self:
            self.am = AssocMem(input_vectors=input_vectors,
                               output_vectors=output_vectors,
                               threshold=threshold,
                               inhibitable=inhibitable,
                               label=label, **module_kwargs)

            if default_output_vector is not None:
                self.am.add_default_output_vector(default_output_vector)

            if wta_output:
                self.am.add_wta_network(wta_inhibit_scale, wta_synapse)

            if cleanup_output:
                self.am.add_cleanup_output(
                    replace_output=replace_output_with_cleaned_output)

            self.input = self.am.input
            self.output = self.am.output

            if cleanup_output and not replace_output_with_cleaned_output:
                self.cleaned_output = self.am.cleaned_output

            if inhibitable:
                self.inhibit = self.am.inhibit

            self.utilities = self.am.output_utilities
            if cleanup_output:
                self.cleaned_utilities = self.am.cleaned_output_utilities

        self.inputs = dict(default=(self.input, input_vocab))
        self.outputs = dict(default=(self.output, output_vocab))
コード例 #19
0
ファイル: assoc_mem.py プロジェクト: Stanford-BIS/spaun2.0
    def __init__(self, input_vocab, output_vocab=None,  # noqa: C901
                 input_keys=None, output_keys=None,
                 default_output_key=None, threshold=0.3,
                 inhibitable=False, wta_output=False,
                 wta_inhibit_scale=3.0, wta_synapse=0.005,
                 cleanup_output=False,
                 replace_output_with_cleaned_output=True,
                 label=None, **module_kwargs):
        super(AssociativeMemory, self).__init__(label=label, **module_kwargs)

        if input_keys is None:
            input_keys = input_vocab.keys
            input_vectors = input_vocab.vectors
        else:
            input_vectors = input_vocab.create_subset(input_keys).vectors

        # If output vocabulary is not specified, use input vocabulary
        # (i.e autoassociative memory)
        if output_vocab is None:
            output_vocab = input_vocab
            output_vectors = input_vectors
        else:
            if output_keys is None:
                output_keys = input_keys
            output_vectors = output_vocab.create_subset(output_keys).vectors

        if default_output_key is None:
            default_output_vector = None
        else:
            default_output_vector = output_vocab.parse(default_output_key).v

        # Create nengo network
        with self:
            self.am = AssocMem(input_vectors=input_vectors,
                               output_vectors=output_vectors,
                               threshold=threshold,
                               inhibitable=inhibitable,
                               label=label, **module_kwargs)

            if default_output_vector is not None:
                self.am.add_default_output_vector(default_output_vector)

            if wta_output:
                self.am.add_wta_network(wta_inhibit_scale, wta_synapse)

            if cleanup_output:
                self.am.add_cleanup_output(
                    replace_output=replace_output_with_cleaned_output)

            self.input = self.am.input
            self.output = self.am.output

            if cleanup_output and not replace_output_with_cleaned_output:
                self.cleaned_output = self.am.cleaned_output

            if inhibitable:
                self.inhibit = self.am.inhibit

            self.utilities = self.am.output_utilities
            if cleanup_output:
                self.cleaned_utilities = self.am.cleaned_output_utilities

        self.inputs = dict(default=(self.input, input_vocab))
        self.outputs = dict(default=(self.output, output_vocab))
コード例 #20
0
def test_am_complex(Simulator, plt, seed, rng):
    """Complex auto-associative memory test.

    Has a default output vector, outputs utilities, and becomes inhibited.
    """
    D = 64
    vocab = make_vocab(6, D, rng)
    vocab2 = vocab[:4]

    def input_func(t):
        if t < 0.25:
            return 0.6 * vocab[0] + 0.4 * vocab[1]
        elif t < 0.5:
            return 0.4 * vocab[0] + 0.6 * vocab[1]
        else:
            return vocab[4]

    def inhib_func(t):
        return int(t > 0.75)

    with nengo.Network("model", seed=seed) as m:
        am = AssociativeMemory(vocab2, inhibitable=True)
        am.add_default_output_vector(vocab[5])
        am.add_threshold_to_outputs()

        in_node = nengo.Node(output=input_func, label="input")
        inhib_node = nengo.Node(output=inhib_func, label="inhib")
        nengo.Connection(in_node, am.input)
        nengo.Connection(inhib_node, am.inhibit)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)
        utils_p = nengo.Probe(am.utilities, synapse=0.05)
        utils_th_p = nengo.Probe(am.thresholded_utilities, synapse=0.05)

    with Simulator(m) as sim:
        sim.run(1.0)
    t = sim.trange()
    # Input: 0.6A + 0.4B
    more_a = (t >= 0.2) & (t < 0.25)
    # Input: 0.4A + 0.6B
    more_b = (t >= 0.45) & (t < 0.5)
    # Input: D (but D isn't in the memory vocabulary, so should output E)
    all_e = (t >= 0.7) & (t < 0.75)
    # Input: D (E) (but inhibited, so should output nothing)
    inhib = t >= 0.95

    def plot(i, y, ylabel):
        plt.subplot(4, 1, i)
        plt.plot(t, y)
        plt.axvline(0.25, c="k")
        plt.axvline(0.5, c="k")
        plt.axvline(0.75, c="k")
        plt.ylabel(ylabel)

    plot(1, np.dot(sim.data[in_p], vocab.T), "Input")
    plot(2, sim.data[utils_p], "Utilities")
    plot(3, sim.data[utils_th_p], "Thresholded utilities")
    plot(4, np.dot(sim.data[out_p], vocab.T), "Output")

    # Check that the output utilities (non-thresholded) are to be expected
    assert all(np.mean(sim.data[utils_p][more_a], axis=0)[:2] > [0.9, 0.35])
    assert all(np.mean(sim.data[utils_p][more_a], axis=0)[2:] < [0.01, 0.01])
    assert all(np.mean(sim.data[utils_p][more_b], axis=0)[:2] > [0.35, 0.9])
    assert all(np.mean(sim.data[utils_p][more_b], axis=0)[2:] < [0.01, 0.01])
    assert similarity(sim.data[utils_p][all_e], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[utils_p][inhib], np.ones((1, 4))) < 0.05

    # Check that the thresholded output utilities are to be expected
    assert all(np.mean(sim.data[utils_th_p][more_a], axis=0)[:2] > [0.9, 0.9])
    assert all(np.mean(sim.data[utils_th_p][more_a], axis=0)[2:] < [0.01, 0.01])
    assert all(np.mean(sim.data[utils_th_p][more_b], axis=0)[:2] > [0.9, 0.9])
    assert all(np.mean(sim.data[utils_th_p][more_b], axis=0)[2:] < [0.01, 0.01])
    assert similarity(sim.data[utils_th_p][all_e], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[utils_th_p][inhib], np.ones((1, 4))) < 0.05

    # Check that the output values are to be expected
    assert similarity(sim.data[out_p][more_a], vocab[0]) > 0.7
    assert similarity(sim.data[out_p][more_a], vocab[1]) > 0.7
    assert similarity(sim.data[out_p][more_b], vocab[0]) > 0.7
    assert similarity(sim.data[out_p][more_b], vocab[1]) > 0.7
    assert similarity(sim.data[out_p][all_e], vocab[5]) > 0.7
    assert similarity(sim.data[out_p][inhib], np.ones((1, D))) < 0.05
コード例 #21
0
ファイル: assoc_mem.py プロジェクト: shaunren/nengo
class AssociativeMemory(Module):
    """Associative memory module.

    See :doc:`examples/associative_memory` for an introduction and examples.

    Parameters
    ----------
    input_vocab: list or Vocabulary
        The vocabulary (or list of vectors) to match.
    output_vocab: list or Vocabulary, optional (Default: None)
        The vocabulary (or list of vectors) to be produced for each match. If
        None, the associative memory will act like an autoassociative memory
        (cleanup memory).
    input_keys : list, optional (Default: None)
        A list of strings that correspond to the input vectors.
    output_keys : list, optional (Default: None)
        A list of strings that correspond to the output vectors.
    default_output_key: str, optional (Default: None)
        The semantic pointer string to be produced if the input value matches
        none of vectors in the input vector list.
    threshold: float, optional (Default: 0.3)
        The association activation threshold.
    inhibitable: bool, optional (Default: False)
        Flag to indicate if the entire associative memory module is
        inhibitable (i.e., the entire module can be inhibited).
    wta_output: bool, optional (Default: False)
        Flag to indicate if output of the associative memory should contain
        more than one vector. If True, only one vector's output will be
        produced; i.e. produce a winner-take-all (WTA) output.
        If False, combinations of vectors will be produced.
    wta_inhibit_scale: float, optional (Default: 3.0)
        Scaling factor on the winner-take-all (WTA) inhibitory connections.
    wta_synapse: float, optional (Default: 0.005)
        Synapse to use for the winner-take-all (wta) inhibitory connections.
    threshold_output: bool, optional (Default: False)
        Adds a threholded output if True.
    label : str, optional (Default: None)
        A name for the ensemble. Used for debugging and visualization.
    seed : int, optional (Default: None)
        The seed used for random number generation.
    add_to_container : bool, optional (Default: None)
        Determines if this Network will be added to the current container.
        If None, will be true if currently within a Network.
    """
    def __init__(
            self,
            input_vocab,
            output_vocab=None,  # noqa: C901
            input_keys=None,
            output_keys=None,
            default_output_key=None,
            threshold=0.3,
            inhibitable=False,
            wta_output=False,
            wta_inhibit_scale=3.0,
            wta_synapse=0.005,
            threshold_output=False,
            label=None,
            seed=None,
            add_to_container=None):
        super(AssociativeMemory, self).__init__(label, seed, add_to_container)

        if input_keys is None:
            input_keys = input_vocab.keys
            input_vectors = input_vocab.vectors
        else:
            input_vectors = input_vocab.create_subset(input_keys).vectors

        # If output vocabulary is not specified, use input vocabulary
        # (i.e autoassociative memory)
        if output_vocab is None:
            output_vocab = input_vocab
            output_vectors = input_vectors
        else:
            if output_keys is None:
                output_keys = input_keys
            output_vectors = output_vocab.create_subset(output_keys).vectors

        if default_output_key is None:
            default_output_vector = None
        else:
            default_output_vector = output_vocab.parse(default_output_key).v

        # Create nengo network
        with self:
            self.am = AssocMem(input_vectors=input_vectors,
                               output_vectors=output_vectors,
                               threshold=threshold,
                               inhibitable=inhibitable,
                               label=label,
                               seed=seed,
                               add_to_container=add_to_container)

            if default_output_vector is not None:
                self.am.add_default_output_vector(default_output_vector)

            if wta_output:
                self.am.add_wta_network(wta_inhibit_scale, wta_synapse)

            if threshold_output:
                self.am.add_threshold_to_outputs()

            self.input = self.am.input
            self.output = self.am.output

            if inhibitable:
                self.inhibit = self.am.inhibit

            self.utilities = self.am.utilities
            if threshold_output:
                self.thresholded_utilities = self.am.thresholded_utilities

        self.inputs = dict(default=(self.input, input_vocab))
        self.outputs = dict(default=(self.output, output_vocab))
コード例 #22
0
class AssociativeMemory(Module):
    """Associative memory module.

    Parameters
    ----------
    input_vocab: list of numpy.array, spa.Vocabulary
        The vocabulary (or list of vectors) to match.
    output_vocab: list of numpy.array, spa.Vocabulary, optional
        The vocabulary (or list of vectors) to be produced for each match. If
        not given, the associative memory will act like an auto-associative
        memory (cleanup memory).

    input_keys: list of strings, optional
        List of keys (ordered) from the input vocabulary to use as the input
        semantic pointers for the associative memory.
    output_keys: list of strings, optional
        List of keys (ordered) from the output vocabulary to use as the output
        semantic pointers for the associative memory.

    default_output_vector: numpy.array, spa.SemanticPointer, optional
        The vector to be produced if the input value matches none of vectors
        in the input vector list.
    threshold: float, optional
        The association activation threshold.

    inhibitable: boolean, optional
        Flag to indicate if the entire associative memory module is
        inhibitable (entire thing can be shut off).

    wta_output: boolean, optional
        Flag to indicate if output of the associative memory should contain
        more than one vectors. Set to True if only one vectors output is
        desired -- i.e. a winner-take-all (wta) output. Leave as default
        (False) if (possible) combinations of vectors is desired.
    wta_inhibit_scale: float, optional
        Scaling factor on the winner-take-all (wta) inhibitory connections.
    wta_synapse: float, optional
        Synapse to use for the winner-take-all (wta) inhibitory connections.

    cleanup_output: boolean, optional
        Create the associative memory with cleaned outputs as well as the
        standard outputs.
    replace_output_with_cleaned_output: boolean, optional
        Set to true to use the cleaned outputs as the default output of the
        associative memory module.

    label : str, optional
        A name to assign this AssociativeMemory. Used for visualization and
        debugging. Also used as a label prefix for each of the internal
        ensembles in the AssociativeMemory network.

    Additional network parameters are passed to the Network constructor through
    **module_kwargs
    """
    def __init__(
            self,
            input_vocab,
            output_vocab=None,  # noqa: C901
            input_keys=None,
            output_keys=None,
            default_output_key=None,
            threshold=0.3,
            inhibitable=False,
            wta_output=False,
            wta_inhibit_scale=3.0,
            wta_synapse=0.005,
            cleanup_output=False,
            replace_output_with_cleaned_output=True,
            label=None,
            **module_kwargs):
        super(AssociativeMemory, self).__init__(label=label, **module_kwargs)

        if input_keys is None:
            input_keys = input_vocab.keys
            input_vectors = input_vocab.vectors
        else:
            input_vectors = input_vocab.create_subset(input_keys).vectors

        # If output vocabulary is not specified, use input vocabulary
        # (i.e autoassociative memory)
        if output_vocab is None:
            output_vocab = input_vocab
            output_vectors = input_vectors
        else:
            if output_keys is None:
                output_keys = input_keys
            output_vectors = output_vocab.create_subset(output_keys).vectors

        if default_output_key is None:
            default_output_vector = None
        else:
            default_output_vector = output_vocab.parse(default_output_key).v

        # Create nengo network
        with self:
            self.am = AssocMem(input_vectors=input_vectors,
                               output_vectors=output_vectors,
                               threshold=threshold,
                               inhibitable=inhibitable,
                               label=label,
                               **module_kwargs)

            if default_output_vector is not None:
                self.am.add_default_output_vector(default_output_vector)

            if wta_output:
                self.am.add_wta_network(wta_inhibit_scale, wta_synapse)

            if cleanup_output:
                self.am.add_cleanup_output(
                    replace_output=replace_output_with_cleaned_output)

            self.input = self.am.input
            self.output = self.am.output

            if cleanup_output and not replace_output_with_cleaned_output:
                self.cleaned_output = self.am.cleaned_output

            if inhibitable:
                self.inhibit = self.am.inhibit

            self.utilities = self.am.output_utilities
            if cleanup_output:
                self.cleaned_utilities = self.am.cleaned_output_utilities

        self.inputs = dict(default=(self.input, input_vocab))
        self.outputs = dict(default=(self.output, output_vocab))