Beispiel #1
0
def test_am_spa_keys_as_expressions(Simulator, plt, seed, rng):
    """Provide semantic pointer expressions as input and output keys."""
    D = 64

    vocab_in = Vocabulary(D, rng=rng)
    vocab_out = Vocabulary(D, rng=rng)

    vocab_in.parse("A+B")
    vocab_out.parse("C+D")

    in_keys = ["A", "A*B"]
    out_keys = ["C*D", "C+D"]

    with nengo.spa.SPA(seed=seed) as model:
        model.am = AssociativeMemory(
            input_vocab=vocab_in,
            output_vocab=vocab_out,
            input_keys=in_keys,
            output_keys=out_keys,
        )

        model.inp = Input(am=lambda t: "A" if t < 0.1 else "A*B")

        in_p = nengo.Probe(model.am.input)
        out_p = nengo.Probe(model.am.output, synapse=0.03)

    with Simulator(model) as sim:
        sim.run(0.2)

    # Specify t ranges
    t = sim.trange()
    t_item1 = (t > 0.075) & (t < 0.1)
    t_item2 = (t > 0.175) & (t < 0.2)

    # Modify vocabularies (for plotting purposes)
    vocab_in.add(in_keys[1], vocab_in.parse(in_keys[1]).v)
    vocab_out.add(out_keys[0], vocab_out.parse(out_keys[0]).v)

    plt.subplot(2, 1, 1)
    plt.plot(t, similarity(sim.data[in_p], vocab_in))
    plt.ylabel("Input: " + ", ".join(in_keys))
    plt.legend(vocab_in.keys, loc="best")
    plt.ylim(top=1.1)
    plt.subplot(2, 1, 2)
    plt.plot(t, similarity(sim.data[out_p], vocab_out))
    plt.plot(t[t_item1], np.ones(t.shape)[t_item1] * 0.9, c="r", lw=2)
    plt.plot(t[t_item2], np.ones(t.shape)[t_item2] * 0.91, c="g", lw=2)
    plt.plot(t[t_item2], np.ones(t.shape)[t_item2] * 0.89, c="b", lw=2)
    plt.ylabel("Output: " + ", ".join(out_keys))
    plt.legend(vocab_out.keys, loc="best")

    assert (np.mean(
        similarity(sim.data[out_p][t_item1],
                   vocab_out.parse(out_keys[0]).v,
                   normalize=True)) > 0.9)
    assert (np.mean(
        similarity(sim.data[out_p][t_item2],
                   vocab_out.parse(out_keys[1]).v,
                   normalize=True)) > 0.9)
Beispiel #2
0
def test_am_spa_keys_as_expressions(Simulator, plt, seed, rng):
    """Provide semantic pointer expressions as input and output keys."""
    D = 64

    vocab_in = Vocabulary(D, rng=rng)
    vocab_out = Vocabulary(D, rng=rng)

    vocab_in.parse('A+B')
    vocab_out.parse('C+D')

    in_keys = ['A', 'A*B']
    out_keys = ['C*D', 'C+D']

    with nengo.spa.SPA(seed=seed) as model:
        model.am = AssociativeMemory(input_vocab=vocab_in,
                                     output_vocab=vocab_out,
                                     input_keys=in_keys,
                                     output_keys=out_keys)

        model.inp = Input(am=lambda t: 'A' if t < 0.1 else 'A*B')

        in_p = nengo.Probe(model.am.input)
        out_p = nengo.Probe(model.am.output, synapse=0.03)

    with Simulator(model) as sim:
        sim.run(0.2)

    # Specify t ranges
    t = sim.trange()
    t_item1 = (t > 0.075) & (t < 0.1)
    t_item2 = (t > 0.175) & (t < 0.2)

    # Modify vocabularies (for plotting purposes)
    vocab_in.add(in_keys[1], vocab_in.parse(in_keys[1]).v)
    vocab_out.add(out_keys[0], vocab_out.parse(out_keys[0]).v)

    plt.subplot(2, 1, 1)
    plt.plot(t, similarity(sim.data[in_p], vocab_in))
    plt.ylabel("Input: " + ', '.join(in_keys))
    plt.legend(vocab_in.keys, loc='best')
    plt.ylim(top=1.1)
    plt.subplot(2, 1, 2)
    plt.plot(t, similarity(sim.data[out_p], vocab_out))
    plt.plot(t[t_item1], np.ones(t.shape)[t_item1] * 0.9, c='r', lw=2)
    plt.plot(t[t_item2], np.ones(t.shape)[t_item2] * 0.91, c='g', lw=2)
    plt.plot(t[t_item2], np.ones(t.shape)[t_item2] * 0.89, c='b', lw=2)
    plt.ylabel("Output: " + ', '.join(out_keys))
    plt.legend(vocab_out.keys, loc='best')

    assert np.mean(similarity(sim.data[out_p][t_item1],
                              vocab_out.parse(out_keys[0]).v,
                              normalize=True)) > 0.9
    assert np.mean(similarity(sim.data[out_p][t_item2],
                              vocab_out.parse(out_keys[1]).v,
                              normalize=True)) > 0.9
def generate(input_signal, alpha=1000.0):
    beta = alpha / 4.0

    # Read in the class mean for numbers from vision network
    weights_data = np.load('models/mnist_vision_data/params.npz')
    weights = weights_data['Wc']
    means_data = np.load('models/mnist_vision_data/class_means.npz')
    means = np.matrix(1.0 / means_data['means'])
    sps = np.multiply(weights.T, means.T)[:10]
    sps_labels = [
        'ZERO', 'ONE', 'TWO', 'THREE', 'FOUR', 'FIVE', 'SIX', 'SEVEN', 'EIGHT',
        'NINE'
    ]
    dimensions = weights.shape[0]

    # generate the Function Space
    forces, _, goals = forcing_functions.load_folder(
        'models/handwriting_trajectories',
        rhythmic=False,
        alpha=alpha,
        beta=beta)
    # make an array out of all the possible functions we want to represent
    force_space = np.vstack(forces)
    # use this array as our space to perform svd over
    fs = nengo.FunctionSpace(space=force_space, n_basis=10)

    # store the weights for each number
    weights_x = []
    weights_y = []
    for ii in range(len(goals)):
        forces = force_space[ii * 2:ii * 2 + 2]
        # load up the forces to be output by the forcing function
        # calculate the corresponding weights over the basis functions
        weights_x.append(np.dot(fs.basis.T, forces[0]))
        weights_y.append(np.dot(fs.basis.T, forces[1]))

    # Create our vocabularies
    rng = np.random.RandomState(0)
    vocab_vision = Vocabulary(dimensions=dimensions, rng=rng)
    vocab_dmp_weights_x = Vocabulary(dimensions=fs.n_basis + 2, rng=rng)
    vocab_dmp_weights_y = Vocabulary(dimensions=fs.n_basis + 2, rng=rng)
    for label, sp, wx, wy, goal in zip(sps_labels, sps, weights_x, weights_y,
                                       goals):
        vocab_vision.add(label,
                         np.array(sp)[0] / np.linalg.norm(np.array(sp)[0]))
        vocab_dmp_weights_x.add(label, np.hstack([wx, goal[0][0], goal[1][0]]))
        vocab_dmp_weights_y.add(label, np.hstack([wy, goal[0][1], goal[1][1]]))

    net = spa.SPA()
    # net.config[nengo.Ensemble].neuron_type = nengo.Direct()
    with net:

        # def input_func(t):
        #     return vocab_vision.parse(input_signal).v
        # net.input = nengo.Node(input_func, label='input')
        net.input = spa.State(dimensions, subdimensions=10, vocab=vocab_vision)

        time_func = lambda t: min(max((t * 2) % 4 - 2.5, -1), 1)
        timer_node = nengo.Node(output=time_func, label='timer')

        # ------------------- Point Attractors --------------------

        def goals_func(t, x):
            if (x[0] + 1) < 1e-5:
                return x[1], x[2]
            return x[3], x[4]

        goal_node = nengo.Node(goals_func,
                               size_in=5,
                               size_out=2,
                               label='goals')
        nengo.Connection(timer_node, goal_node[0])

        net.x = point_attractor.generate(n_neurons=1000,
                                         alpha=alpha,
                                         beta=beta)
        nengo.Connection(goal_node[0], net.x.input[0], synapse=None)
        net.y = point_attractor.generate(n_neurons=1000,
                                         alpha=alpha,
                                         beta=beta)
        nengo.Connection(goal_node[1], net.y.input[0], synapse=None)

        # -------------------- Ramp ------------------------------
        ramp_node = nengo.Node(output=time_func, label='ramp node')
        ramp = nengo.Ensemble(n_neurons=1000, dimensions=1, label='ramp')
        nengo.Connection(ramp_node, ramp)

        # ------------------- Forcing Functions --------------------

        net.assoc_mem_x = spa.AssociativeMemory(
            input_vocab=vocab_vision,
            output_vocab=vocab_dmp_weights_x,
            wta_output=False)
        nengo.Connection(net.input.output, net.assoc_mem_x.input)
        nengo.Connection(net.assoc_mem_x.output[[-2, -1]], goal_node[[1, 3]])

        net.assoc_mem_y = spa.AssociativeMemory(
            input_vocab=vocab_vision,
            output_vocab=vocab_dmp_weights_y,
            wta_output=False)
        nengo.Connection(net.input.output, net.assoc_mem_y.input)
        nengo.Connection(net.assoc_mem_y.output[[-2, -1]], goal_node[[2, 4]])

        # -------------------- Product for decoding -----------------------

        net.product_x = nengo.Network('Product X')
        nengo.networks.Product(n_neurons=1000,
                               dimensions=fs.n_basis,
                               net=net.product_x,
                               input_magnitude=1.0)
        net.product_y = nengo.Network('Product Y')
        nengo.networks.Product(n_neurons=1000,
                               dimensions=fs.n_basis,
                               net=net.product_y,
                               input_magnitude=1.0)

        # get the largest basis function value for normalization
        max_basis = np.max(fs.basis * fs.scale)
        domain = np.linspace(-1, 1, fs.basis.shape[0])

        for ff, product in zip(
            [net.assoc_mem_x.output, net.assoc_mem_y.output],
            [net.product_x, net.product_y]):
            for ii in range(fs.n_basis):
                # find the value of a basis function at a value of x
                def basis_fn(x, jj=ii):
                    index = int(x[0] * len(domain) / 2.0 + len(domain) / 2.0)
                    index = max(min(index, len(domain) - 1), 0)
                    return fs.basis[index][jj] * fs.scale / max_basis

                # multiply the value of each basis function at x by its weight
                nengo.Connection(ramp, product.B[ii], function=basis_fn)
                nengo.Connection(ff[ii], product.A[ii])

        def relay_func(t, x):
            t = time_func(t)
            if t <= -1:
                return [0, 0]
            return x

        relay = nengo.Node(output=relay_func,
                           size_in=2,
                           size_out=2,
                           label='relay')

        nengo.Connection(net.product_x.output,
                         relay[0],
                         transform=np.ones((1, fs.n_basis)) * max_basis,
                         synapse=None)
        nengo.Connection(net.product_y.output,
                         relay[1],
                         transform=np.ones((1, fs.n_basis)) * max_basis,
                         synapse=None)

        nengo.Connection(relay[0], net.x.input[1], synapse=None)
        nengo.Connection(relay[1], net.y.input[1], synapse=None)

        # -------------------- Output ------------------------------

        net.output = nengo.Node(size_in=2)
        nengo.Connection(net.x.output, net.output[0], synapse=0.01)
        nengo.Connection(net.y.output, net.output[1], synapse=0.01)

        # create a node to give a plot of the represented function
        ff_plot = fs.make_plot_node(domain=domain, lines=2, ylim=[-75, 75])
        nengo.Connection(net.assoc_mem_x.output[:fs.n_basis],
                         ff_plot[:fs.n_basis],
                         synapse=0.1)
        nengo.Connection(net.assoc_mem_y.output[:fs.n_basis],
                         ff_plot[fs.n_basis:],
                         synapse=0.1)

    return net
Beispiel #4
0
class SpaunVocabulary(object):
    def __init__(self):
        self.main = None

        self.sp_dim = 512
        self.mtr_dim = 50
        self.vis_dim = 200

        # ############ Semantic pointer (strings) definitions #################
        # --- Numerical semantic pointers ---
        self.num_sp_strs = ['ZER', 'ONE', 'TWO', 'THR', 'FOR',
                            'FIV', 'SIX', 'SEV', 'EIG', 'NIN']
        self.n_num_sp = len(self.num_sp_strs)

        # --- Task semantic pointer list ---
        # W - Drawing (Copying visual input)
        # R - Recognition
        # L - Learning (Bandit Task)
        # M - Memory (forward serial recall)
        # C - Counting
        # A - Answering
        # V - Rapid Variable Creation
        # F - Fluid Induction (Ravens)
        # X - Task precursor
        # DEC - Decoding task (output to motor system)
        self.ps_task_sp_strs = ['W', 'R', 'L', 'M', 'C', 'A', 'V', 'F', 'X',
                                'DEC', 'REACT', 'INSTR', 'CMP']
        self.ps_task_vis_sp_strs = ['A', 'C', 'F', 'K', 'L', 'M', 'P', 'R',
                                    'V', 'W']
        # --- Task visual semantic pointer usage ---
        # A - Task initialization
        # F - Forward recall
        # R - Reverse recall
        # K - Q&A 'kind' probe
        # P - Q&A 'position' probe

        # --- Production system semantic pointers ---
        # DECW - Decoding state (output to motor system, but for drawing task)
        # DECI - Decoding state (output to motor system, but for inductn tasks)
        self.ps_state_sp_strs = ['QAP', 'QAK', 'TRANS0', 'TRANS1', 'TRANS2',
                                 'CNT0', 'CNT1', 'LEARN', 'DIRECT', 'INSTRP',
                                 'INSTRV', 'TRANSC']
        self.ps_dec_sp_strs = ['FWD', 'REV', 'CNT', 'DECW', 'DECI', 'NONE']

        # --- Misc actions semantic pointers
        self.ps_action_sp_strs = None
        self.min_num_ps_actions = 3

        # --- Misc visual semantic pointers ---
        self.misc_vis_sp_strs = ['OPEN', 'CLOSE', 'SPACE', 'QM']

        # --- Misc state semantic pointers ---
        self.misc_ps_sp_strs = ['NO_MATCH', 'MATCH']

        # --- 'I don't know' motor response vector
        self.mtr_sp_strs = ['UNK']

        # --- List of all visual semantic pointers ---
        # self.vis_sp_strs = list(self.num_sp_strs)
        # self.vis_sp_strs.extend(self.misc_vis_sp_strs)
        # self.vis_sp_strs.extend(self.ps_task_vis_sp_strs)

        # --- Position (enumerated) semantic pointers ---
        self.pos_sp_strs = None
        self.max_enum_list_pos = 8

        # --- Operations semantic pointers
        self.ops_sp_strs = ['ADD', 'INC']

        # --- Reward semantic pointers
        self.reward_n_sp_str = self.num_sp_strs[0]
        self.reward_y_sp_str = self.num_sp_strs[1]
        self.reward_sp_strs = [self.reward_n_sp_str, self.reward_y_sp_str]

        # --- Instruction processing input and output tags
        self.instr_tag_strs = ['VIS', 'TASK', 'STATE', 'DEC', 'DATA', 'ENABLE']

    def write_header(self):
        logger.write('# Spaun Vocabulary Options:\n')
        logger.write('# -------------------------\n')
        for param_name in sorted(self.__dict__.keys()):
            param_value = getattr(self, param_name)
            if not callable(param_value) and not isinstance(param_value, list)\
               and not isinstance(param_value, Vocabulary) \
               and not isinstance(param_value, SemanticPointer) \
               and not isinstance(param_value, np.ndarray):
                logger.write('# - %s = %s\n' % (param_name, param_value))
        logger.write('\n')

    def initialize(self, stim_SP_labels, num_learn_actions=3, rng=0):
        if rng == 0:
            rng = np.random.RandomState(int(time.time()))

        # ############### Semantic pointer list definitions ###################
        # --- Position (enumerated) semantic pointers ---
        self.pos_sp_strs = ['POS%i' % (i + 1)
                            for i in range(self.max_enum_list_pos)]

        # --- Unitary semantic pointers
        self.unitary_sp_strs = [self.num_sp_strs[0], self.pos_sp_strs[0]]
        self.unitary_sp_strs.extend(self.ops_sp_strs)

        # --- Production system (action) semantic pointers ---
        self.ps_action_learn_sp_strs = ['A%d' % (i + 1) for i in
                                        range(num_learn_actions)]
        self.ps_action_misc_sp_strs = []
        self.ps_action_sp_strs = (self.ps_action_learn_sp_strs +
                                  self.ps_action_misc_sp_strs)

        # #################### Vocabulary definitions #########################
        # --- Primary vocabulary ---
        self.main = Vocabulary(self.sp_dim, unitary=self.unitary_sp_strs,
                               max_similarity=0.2, rng=rng)

        # --- Add in visual sp's ---
        self.main.parse('+'.join(self.misc_vis_sp_strs))
        self.main.parse('+'.join(self.ps_task_vis_sp_strs))
        for sp_str in list(stim_SP_labels):
            if sp_str not in self.num_sp_strs and \
               sp_str not in self.pos_sp_strs:
                self.main.parse(sp_str)

        # --- Add numerical sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[0], self.num_sp_strs[0]))
        add_sp = self.main[self.ops_sp_strs[0]]
        num_sp = self.main[self.num_sp_strs[0]].copy()
        for i in range(len(self.num_sp_strs) - 1):
            num_sp = num_sp.copy() * add_sp
            self.main.add(self.num_sp_strs[i + 1], num_sp)

        self.add_sp = add_sp

        # --- Add positional sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[1], self.pos_sp_strs[0]))
        inc_sp = self.main[self.ops_sp_strs[1]]
        pos_sp = self.main[self.pos_sp_strs[0]].copy()
        for i in range(len(self.pos_sp_strs) - 1):
            pos_sp = pos_sp.copy() * inc_sp
            self.main.add(self.pos_sp_strs[i + 1], pos_sp)

        self.inc_sp = inc_sp

        # --- Add production system sp's ---
        self.main.parse('+'.join(self.ps_task_sp_strs))
        self.main.parse('+'.join(self.ps_state_sp_strs))
        self.main.parse('+'.join(self.ps_dec_sp_strs))
        if len(self.ps_action_sp_strs) > 0:
            self.main.parse('+'.join(self.ps_action_sp_strs))
        self.main.parse('+'.join(self.misc_ps_sp_strs))

        # --- Add instruction processing system sp's ---
        self.main.parse('+'.join(self.instr_tag_strs))

        # ################### Visual Vocabulary definitions ###################
        self.vis_sp_strs = list(stim_SP_labels)

        # Visual sp str vocab check
        if (not all(x in self.vis_sp_strs for x in self.num_sp_strs)):
            raise RuntimeError("Vocabulator - Stimulus vocabulary does not " +
                               "contain necessary Spaun NUM semantic pointer" +
                               " definitions.")
        if (not all(x in self.vis_sp_strs for x in self.misc_vis_sp_strs)):
            raise RuntimeError("Vocabulator - Stimulus vocabulary does not " +
                               "contain necessary Spaun MISC semantic " +
                               "pointer definitions.")
        if (not all(x in self.vis_sp_strs for x in self.ps_task_vis_sp_strs)):
            raise RuntimeError("Vocabulator - Stimulus vocabulary does not " +
                               "contain necessary Spaun PS semantic " +
                               "pointer definitions.")

        # ################# Sub-vocabulary definitions ########################
        self.vis_main = self.main.create_subset(self.vis_sp_strs)

        self.pos = self.main.create_subset(self.pos_sp_strs)

        self.item = self.main.create_subset(self.num_sp_strs)
        self.item_1_index = self.main.create_subset(self.num_sp_strs[1:])

        self.ps_task = self.main.create_subset(self.ps_task_sp_strs)
        self.ps_state = self.main.create_subset(self.ps_state_sp_strs)
        self.ps_dec = self.main.create_subset(self.ps_dec_sp_strs)
        self.ps_cmp = self.main.create_subset(self.misc_ps_sp_strs)
        self.ps_action = self.main.create_subset(self.ps_action_sp_strs)
        self.ps_action_learn = \
            self.main.create_subset(self.ps_action_learn_sp_strs)

        self.reward = self.main.create_subset(self.reward_sp_strs)

        self.instr = self.main.create_subset(self.instr_tag_strs)

        # ############ Enumerated vocabulary definitions ######################
        # --- Enumerated vocabulary, enumerates all possible combinations of
        #     position and item vectors (for debug purposes)
        self.enum = Vocabulary(self.sp_dim, rng=rng)
        for pos in self.pos_sp_strs:
            for num in self.num_sp_strs:
                sp_str = '%s*%s' % (pos, num)
                self.enum.add(sp_str, self.main.parse(sp_str))

        self.pos1 = Vocabulary(self.sp_dim, rng=rng)
        for num in self.num_sp_strs:
            sp_str = '%s*%s' % (self.pos_sp_strs[0], num)
            self.pos1.add(sp_str, self.main.parse(sp_str))

        # ############ Instruction vocabulary definitions #####################
        # --- ANTECEDENT and CONSEQUENCE permutation transforms
        self.perm_ant = np.arange(self.sp_dim)
        self.perm_con = np.arange(self.sp_dim)
        np.random.shuffle(self.perm_ant)
        np.random.shuffle(self.perm_con)

        self.perm_ant_inv = np.argsort(self.perm_ant)
        self.perm_con_inv = np.argsort(self.perm_con)

    def initialize_mtr_vocab(self, mtr_dim, mtr_sps):
        self.mtr_dim = mtr_dim

        self.mtr = Vocabulary(self.mtr_dim)
        for i, sp_str in enumerate(self.num_sp_strs):
            self.mtr.add(sp_str, mtr_sps[i, :])

        self.mtr_unk = Vocabulary(self.mtr_dim)
        self.mtr_unk.add(self.mtr_sp_strs[0], mtr_sps[-1, :])

        self.mtr_disp = self.mtr.create_subset(self.num_sp_strs)
        self.mtr_disp.readonly = False
        # Disable read-only flag for display vocab so that things can be added
        self.mtr_disp.add(self.mtr_sp_strs[0],
                          self.mtr_unk[self.mtr_sp_strs[0]].v)

    def initialize_vis_vocab(self, vis_dim, vis_sps):
        if vis_sps.shape[0] != len(self.vis_sp_strs):
            raise RuntimeError('Vocabulatory.initialize_vis_vocab: ' +
                               'Mismatch in shape of raw vision SPs and ' +
                               'number of vision SP labels.')

        self.vis_dim = vis_dim

        self.vis = Vocabulary(self.vis_dim)
        for i, sp_str in enumerate(self.vis_sp_strs):
            self.vis.add(sp_str, vis_sps[i, :])

    def parse_instr_sps_list(self, instr_sps_list):
        instr_sps_list_sp = self.main.parse('0')

        if len(instr_sps_list) > self.max_enum_list_pos:
            raise ValueError('Vocabulator: Too many sequential ' +
                             'instructions. Max: %d' %
                             self.max_enum_list_pos + ', Got: ' +
                             len(instr_sps_list))

        for i, instr_sps in enumerate(instr_sps_list):
            instr_sps_list_sp += (self.main.parse('POS%i' % (i + 1)) *
                                  self.parse_instr_sps(*instr_sps))
        return instr_sps_list_sp

    def parse_instr_sps(self, ant_sp='0', cons_sp='0'):
        # Note: The ant and con permutations are used here to separate the
        #       possible POS tags in the ant/con from the instruction POS tag.
        #       This permutation is not necessary if the instruction POS tags
        #       differ from the number-representation POS tags.
        return SemanticPointer(self.main.parse(ant_sp).v[self.perm_ant] +
                               self.main.parse(cons_sp).v[self.perm_con])
import nengo
import nengo.spa as spa
from nengo.spa import Vocabulary
import numpy as np

D = 32  # the dimensionality of the vectors
rng = np.random.RandomState(7)
vocab = Vocabulary(dimensions=D, rng=rng, max_similarity=0.1)

#Adding semantic pointers to the vocabulary
CIRCLE = vocab.parse('CIRCLE')
BLUE = vocab.parse('BLUE')
RED = vocab.parse('RED')
SQUARE = vocab.parse('SQUARE')
ZERO = vocab.add('ZERO', [0] * D)

model = spa.SPA(label="Question Answering with Memory", vocabs=[vocab])
with model:

    model.A = spa.State(D, label="color")
    model.B = spa.State(D, label="shape")
    model.C = spa.State(D, label="cue")
    model.D = spa.State(D, label="bound")
    model.E = spa.State(D, label="output")
    model.memory = spa.State(D, feedback=1, label="memory")

    actions = spa.Actions('D = A * B', 'memory = D', 'E = memory * ~C')

    model.cortical = spa.Cortical(actions)
def main():
    
    print "Loading Word2Vec model..."
    word2vec_model = gensim.models.Word2Vec.load("word2vec_model_1_cleaned")
    word2vec_model.init_sims(replace=True)
    word2vec_vocab = word2vec_model.index2word
    
    import readline
    readline.parse_and_bind("tab: complete")

    def complete(text,state):
       results = [x for x in word2vec_vocab if x.startswith(text)] + [None]
       return results[state]

    readline.set_completer(complete)

    print "This program uses an SPA network in Nengo to perform vector operations on a semantically structured word-vector space *learned* from a sentence corpus."
    print "When trained on a large corpus of English sentences, for example, it should produce: Vector[king] - Vector[man] + Vector[woman] = Vector[king]"
    
    print "For now, it just does subtraction..."
    print "\nPress <tab> twice to see all your autocomplete options."
    print "_______________________________________________________"
    line1 = raw_input('\nFirst word:> ')
    line2 = raw_input('\nSecond word:> ')

    if line1 and line2 in word2vec_vocab:
           val1 = word2vec_model[line1]
           val2 = word2vec_model[line2]
           diff = val1 - val2
           dot_products = [np.dot(word2vec_model[word2vec_model.index2word[i]], diff) for i in range(len(word2vec_model.index2word))]
           closest_word = word2vec_model.index2word[dot_products.index(max(dot_products))]
           print "\nWhat the Nengo model SHOULD return is something like: %s" % closest_word
    
    print "\nDefining SPA network..."
    model = spa.SPA(label = "Vector Storage")
    with model:
        
        # Dimensionality of each representation
        num_dimensions = 100
        sub_dimensions = 1
        
        # Create the vocabulary
        vocab = Vocabulary(num_dimensions, randomize = False)
                
        stored_value_1 = val1
        vocab.add("Stored_value_1", stored_value_1)
        
        stored_value_2 = val2
        vocab.add("Stored_value_2", stored_value_2)
        
        # Create a semantic pointer corresponding to the "correct" answer for the operation
        sum_vector = np.subtract(stored_value_1, stored_value_2)
        sum_vector = sum_vector/np.linalg.norm(sum_vector)
        vocab.add("Correct_target", sum_vector)

        # Define the control signal inputs as random vectors
        r1 = [1] * num_dimensions
        r1 = r1 / np.linalg.norm(r1)
        r2 = [(-1)**k for k in range(num_dimensions)]
        r2 = r2 / np.linalg.norm(r2)
        vocab.add("Hold_signal", r1)
        vocab.add("Start_signal", r2)

        # Control when the vector operation takes place
        def control_input(t):
            if t < 1:
                return "Hold_signal"
            else:
                return "Start_signal"
                
        # Control buffer
        model.control = spa.Buffer(dimensions = num_dimensions, subdimensions = sub_dimensions, neurons_per_dimension = 200, direct = True, vocab = vocab)
        control_probe = nengo.Probe(model.control.state.output)
        
        # Inputs to the word input buffers
        def first_input(t):
            return "Stored_value_1"
        def second_input(t):
            return "Stored_value_2"
        
        # Buffers to store the inputs:
        model.word_buffer1 = spa.Buffer(dimensions = num_dimensions, subdimensions = sub_dimensions, neurons_per_dimension = 200, direct = True, vocab = vocab)
        model.word_buffer2 = spa.Buffer(dimensions = num_dimensions, subdimensions = sub_dimensions, neurons_per_dimension = 200, direct = True, vocab = vocab)
        
        # Probe to visualize the values stored in the buffers
        buffer_1_probe = nengo.Probe(model.word_buffer1.state.output)
        buffer_2_probe = nengo.Probe(model.word_buffer2.state.output)
        
        # Buffer to hold the result:
        model.result = spa.Buffer(dimensions = num_dimensions, subdimensions = sub_dimensions, neurons_per_dimension = 200, direct = True, vocab = vocab)
        result_probe = nengo.Probe(model.result.state.output)        
        
        # Control system        
        actions = spa.Actions('dot(control, Start_signal) --> result = word_buffer1 - word_buffer2', 'dot(control, Hold_signal) --> result = Hold_signal')
        model.bg = spa.BasalGanglia(actions)
        model.thalamus = spa.Thalamus(model.bg, subdim_channel = sub_dimensions)
        
        # Connect up the inputs
        model.input = spa.Input(control = control_input, word_buffer1 = first_input, word_buffer2 = second_input)
        
    # Start the simulator
    sim = nengo.Simulator(model)
    
    # Dynamic plotting
    plt.ion() # Dynamic updating of plots
    fig = plt.figure(figsize=(15,8))
    plt.show()
    ax = fig.gca()
    ax.set_title("Vector Storage")

    while True:
        sim.run(0.5) # Run for an additional 1 second
        plt.clf() # Clear the figure
        plt.plot(sim.trange(), similarity(sim.data, result_probe, vocab), label = "Buffer 3 Value")
        legend_symbols = vocab.keys
        plt.legend(legend_symbols, loc = 2)
        plt.draw() # Re-draw
        
        # Go back to our manually-stored vocabulary and see how well it did
        diff = sim.data[result_probe][-1]
        dot_products = [np.dot(word2vec_model[word2vec_model.index2word[i]], diff) for i in range(len(word2vec_model.index2word))]
        closest_word = word2vec_model.index2word[dot_products.index(max(dot_products))]
        print "Time: %f" % sim.trange()[-1]
        print "\nWhat the Nengo model DID return is something like: %s" % closest_word
        print "\n"
        
    plt.show()
def main():
    
    model = spa.SPA(label="Vector Storage")
    with model:
        
        # Dimensionality of each representation
        num_dimensions = 2
        sub_dimensions = 2
        
        # Create the vocabulary
        vocab = Vocabulary(num_dimensions, randomize = False)
        
        # Form the inputs
        stored_value_1 = [1] * num_dimensions
        stored_value_1 = [s/np.linalg.norm(stored_value_1) for s in stored_value_1]
        vocab.add("Stored_value_1", stored_value_1)
        
        stored_value_2 = [(-1**i) for i in range(num_dimensions)]
        stored_value_2 = [s/np.linalg.norm(stored_value_2) for s in stored_value_2]
        vocab.add("Stored_value_2", stored_value_2)
                
        def first_input(t):
            if t < 10:
                return "Stored_value_2"
            else:
                return "Stored_value_1"
        
        def second_input(t):
            if t < 5:
                return "Stored_value_1"
            else:
                return "Stored_value_2"
                
        # Buffers to store the input
        model.buffer1 = spa.Buffer(dimensions = num_dimensions, subdimensions = sub_dimensions, neurons_per_dimension = 200, direct = True)
        model.buffer2 = spa.Buffer(dimensions = num_dimensions, subdimensions = sub_dimensions, neurons_per_dimension = 200, direct = True)
        
        # Probe to visualize the values stored in the buffers
        buffer_1_probe = nengo.Probe(model.buffer1.state.output)
        buffer_2_probe = nengo.Probe(model.buffer2.state.output)
        
        # Connect up the inputs
        model.input = spa.Input(buffer1 = first_input)
        model.input = spa.Input(buffer2 = second_input)
        
        # Buffer to store the output
        model.buffer3 = spa.Buffer(dimensions = num_dimensions, subdimensions = sub_dimensions, neurons_per_dimension = 200, direct = True)
        buffer_3_probe = nengo.Probe(model.buffer3.state.output)
        
        # Control system
        actions = spa.Actions('dot(buffer1, Stored_value_2) --> buffer3=Stored_value_2', 'dot(buffer1, Stored_value_1) --> buffer3=Stored_value_1+Stored_value_2')
        model.bg = spa.BasalGanglia(actions)
        model.thalamus = spa.Thalamus(model.bg)

        
    # Start the simulator
    sim = nengo.Simulator(model)

    # Dynamic plotting
    plt.ion() # Dynamic updating of plots
    fig = plt.figure(figsize=(15,8))
    plt.show()
    ax = fig.gca()
    ax.set_title("Vector Storage")

    while True:
        sim.run(1) # Run for an additional 1 second
        plt.clf() # Clear the figure
        plt.plot(sim.trange(), similarity(sim.data, buffer_1_probe, vocab), label = "Buffer 1 Value") # Plot the entire dataset so far
        plt.plot(sim.trange(), similarity(sim.data, buffer_2_probe, vocab), label = "Buffer 2 Value")
        plt.plot(sim.trange(), similarity(sim.data, buffer_3_probe, vocab), label = "Buffer 3 Value")
        print sim.data[buffer_1_probe][-1]
        print sim.data[buffer_2_probe][-1]
        print sim.data[buffer_3_probe][-1]
        plt.legend(vocab.keys * 3, loc = 2)
        plt.draw() # Re-draw
Beispiel #8
0
class SpaunVocabulary(object):
    def __init__(self):
        self.main = None

        self.sp_dim = 512
        self.mtr_dim = 50
        self.vis_dim = 200

        # ############ Semantic pointer (strings) definitions #################
        # --- Numerical semantic pointers ---
        self.num_sp_strs = ['ZER', 'ONE', 'TWO', 'THR', 'FOR',
                            'FIV', 'SIX', 'SEV', 'EIG', 'NIN']
        self.n_num_sp = len(self.num_sp_strs)

        # --- Task semantic pointer list ---
        # W - Drawing (Copying visual input)
        # R - Recognition
        # L - Learning (Bandit Task)
        # M - Memory (forward serial recall)
        # C - Counting
        # A - Answering
        # V - Rapid Variable Creation
        # F - Fluid Induction (Ravens)
        # X - Task precursor
        # DEC - Decoding task (output to motor system)
        self.ps_task_sp_strs = ['W', 'R', 'L', 'M', 'C', 'A', 'V', 'F', 'X',
                                'DEC']
        self.ps_task_vis_sp_strs = ['A', 'C', 'F', 'K', 'L', 'M', 'P', 'R',
                                    'V', 'W']
        # --- Task visual semantic pointer usage ---
        # A - Task initialization
        # F - Forward recall
        # R - Reverse recall
        # K - Q&A 'kind' probe
        # P - Q&A 'position' probe

        # --- Production system semantic pointers ---
        # DECW - Decoding state (output to motor system, but for drawing task)
        # DECI - Decoding state (output to motor system, but for inductn tasks)
        self.ps_state_sp_strs = ['QAP', 'QAK', 'TRANS0', 'TRANS1', 'TRANS2',
                                 'CNT0', 'CNT1', 'LEARN']
        self.ps_dec_sp_strs = ['FWD', 'REV', 'CNT', 'DECW', 'DECI', 'NONE']

        # --- Misc actions semantic pointers
        self.ps_action_sp_strs = None
        self.min_num_ps_actions = 3

        # --- Misc visual semantic pointers ---
        self.misc_vis_sp_strs = ['OPEN', 'CLOSE', 'SPACE', 'QM']

        # --- Misc state semantic pointers ---
        self.misc_ps_sp_strs = ['MATCH', 'NO_MATCH']

        # --- 'I don't know' motor response vector
        self.mtr_sp_strs = ['UNK']

        # --- List of all visual semantic pointers ---
        self.vis_sp_strs = list(self.num_sp_strs)
        self.vis_sp_strs.extend(self.misc_vis_sp_strs)
        self.vis_sp_strs.extend(self.ps_task_vis_sp_strs)

        # --- Position (enumerated) semantic pointers ---
        self.pos_sp_strs = None
        self.max_enum_list_pos = 8

        # --- Operations semantic pointers
        self.ops_sp_strs = ['ADD', 'INC']

        # --- Reward semantic pointers
        self.reward_n_sp_str = self.num_sp_strs[0]
        self.reward_y_sp_str = self.num_sp_strs[1]
        self.reward_sp_strs = [self.reward_n_sp_str, self.reward_y_sp_str]

    def write_header(self):
        logger.write('# Spaun Vocabulary Options:\n')
        logger.write('# -------------------------\n')
        for param_name in sorted(self.__dict__.keys()):
            param_value = getattr(self, param_name)
            if not callable(param_value) and not isinstance(param_value, list)\
               and not isinstance(param_value, Vocabulary) \
               and not isinstance(param_value, SemanticPointer) \
               and not isinstance(param_value, np.ndarray):
                logger.write('# - %s = %s\n' % (param_name, param_value))
        logger.write('\n')

    def initialize(self, num_learn_actions=3, rng=0):
        if rng == 0:
            rng = np.random.RandomState(int(time.time()))

        # ############### Semantic pointer list definitions ###################
        # --- Position (enumerated) semantic pointers ---
        self.pos_sp_strs = ['POS%i' % (i + 1)
                            for i in range(self.max_enum_list_pos)]

        # --- Unitary semantic pointers
        self.unitary_sp_strs = [self.num_sp_strs[0], self.pos_sp_strs[0]]
        self.unitary_sp_strs.extend(self.ops_sp_strs)

        # --- Production system (action) semantic pointers ---
        self.ps_action_learn_sp_strs = ['A%d' % (i + 1) for i in
                                        range(num_learn_actions)]
        self.ps_action_misc_sp_strs = []
        self.ps_action_sp_strs = (self.ps_action_learn_sp_strs +
                                  self.ps_action_misc_sp_strs)
        # #################### Vocabulary definitions #########################
        # --- Primary vocabulary ---
        self.main = Vocabulary(self.sp_dim, unitary=self.unitary_sp_strs,
                               rng=rng)

        # --- Add numerical sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[0], self.num_sp_strs[0]))
        add_sp = self.main[self.ops_sp_strs[0]]
        num_sp = self.main[self.num_sp_strs[0]].copy()
        for i in range(len(self.num_sp_strs) - 1):
            num_sp = num_sp.copy() * add_sp
            self.main.add(self.num_sp_strs[i + 1], num_sp)

        self.add_sp = add_sp

        # --- Add positional sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[1], self.pos_sp_strs[0]))
        inc_sp = self.main[self.ops_sp_strs[1]]
        pos_sp = self.main[self.pos_sp_strs[0]].copy()
        for i in range(len(self.pos_sp_strs) - 1):
            pos_sp = pos_sp.copy() * inc_sp
            self.main.add(self.pos_sp_strs[i + 1], pos_sp)

        self.inc_sp = inc_sp

        # --- Add other visual sp's ---
        self.main.parse('+'.join(self.misc_vis_sp_strs))
        self.main.parse('+'.join(self.ps_task_vis_sp_strs))

        # --- Add production system sp's ---
        self.main.parse('+'.join(self.ps_task_sp_strs))
        self.main.parse('+'.join(self.ps_state_sp_strs))
        self.main.parse('+'.join(self.ps_dec_sp_strs))
        if len(self.ps_action_sp_strs) > 0:
            self.main.parse('+'.join(self.ps_action_sp_strs))
        self.main.parse('+'.join(self.misc_ps_sp_strs))

        # ################# Sub-vocabulary definitions ########################
        self.vis_main = self.main.create_subset(self.vis_sp_strs)

        self.pos = self.main.create_subset(self.pos_sp_strs)

        self.item = self.main.create_subset(self.num_sp_strs)

        self.ps_task = self.main.create_subset(self.ps_task_sp_strs)
        self.ps_state = self.main.create_subset(self.ps_state_sp_strs)
        self.ps_dec = self.main.create_subset(self.ps_dec_sp_strs)
        self.ps_cmp = self.main.create_subset(self.misc_ps_sp_strs)
        self.ps_action = self.main.create_subset(self.ps_action_sp_strs)
        self.ps_action_learn = \
            self.main.create_subset(self.ps_action_learn_sp_strs)

        self.reward = self.main.create_subset(self.reward_sp_strs)

        # ############ Enumerated vocabulary definitions ######################
        # --- Enumerated vocabulary, enumerates all possible combinations of
        #     position and item vectors (for debug purposes)
        self.enum = Vocabulary(self.sp_dim, rng=rng)
        for pos in self.pos_sp_strs:
            for num in self.num_sp_strs:
                sp_str = '%s*%s' % (pos, num)
                self.enum.add(sp_str, self.main.parse(sp_str))

        self.pos1 = Vocabulary(self.sp_dim, rng=rng)
        for num in self.num_sp_strs:
            sp_str = '%s*%s' % (self.pos_sp_strs[0], num)
            self.pos1.add(sp_str, self.main.parse(sp_str))

    def initialize_mtr_vocab(self, mtr_dim, mtr_sps):
        self.mtr_dim = mtr_dim

        self.mtr = Vocabulary(self.mtr_dim)
        for i, sp_str in enumerate(self.num_sp_strs):
            self.mtr.add(sp_str, mtr_sps[i, :])

        self.mtr_unk = Vocabulary(self.mtr_dim)
        self.mtr_unk.add(self.mtr_sp_strs[0], mtr_sps[-1, :])

        self.mtr_disp = self.mtr.create_subset(self.num_sp_strs)
        self.mtr_disp.readonly = False
        # Disable read-only flag for display vocab so that things can be added
        self.mtr_disp.add(self.mtr_sp_strs[0],
                          self.mtr_unk[self.mtr_sp_strs[0]].v)

    def initialize_vis_vocab(self, vis_dim, vis_sps):
        self.vis_dim = vis_dim

        self.vis = Vocabulary(self.vis_dim)
        for i, sp_str in enumerate(self.vis_sp_strs):
            self.vis.add(sp_str, vis_sps[i, :])
Beispiel #9
0
# --- Unitary semantic pointers
unitary_sp_strs = [num_sp_strs[0], pos_sp_strs[0]]
unitary_sp_strs.extend(ops_sp_strs)


# ####################### Vocabulary definitions ##############################
# --- Primary vocabulary ---
vocab = Vocabulary(cfg.sp_dim, unitary=unitary_sp_strs, rng=cfg.rng)

# --- Add numerical sp's ---
vocab.parse('%s+%s' % (ops_sp_strs[0], num_sp_strs[0]))
add_sp = vocab[ops_sp_strs[0]]
num_sp = vocab[num_sp_strs[0]].copy()
for i in range(len(num_sp_strs) - 1):
    num_sp = num_sp.copy() * add_sp
    vocab.add(num_sp_strs[i + 1], num_sp)

# --- Add positional sp's ---
vocab.parse('%s+%s' % (ops_sp_strs[1], pos_sp_strs[0]))
inc_sp = vocab[ops_sp_strs[1]]
pos_sp = vocab[pos_sp_strs[0]].copy()
for i in range(len(pos_sp_strs) - 1):
    pos_sp = pos_sp.copy() * inc_sp
    vocab.add(pos_sp_strs[i + 1], pos_sp)

# --- Add other visual sp's ---
vocab.parse('+'.join(misc_vis_sp_strs))
vocab.parse('+'.join(ps_task_vis_sp_strs))

# --- Add production system sp's ---
vocab.parse('+'.join(ps_task_sp_strs))
Beispiel #10
0
# show the sum of A and B as represented by the object Sum (left - shows the 
# semantic pointer representation in Sum, right - shows high similarity with 
# vecotors A and B).

# Setup the environment
import nengo
import nengo.spa as spa
from nengo.spa import Vocabulary
import numpy as np

D = 32  # the dimensionality of the vectors

#Creating a vocabulary
rng = np.random.RandomState(0)
vocab = Vocabulary(dimensions=D, rng=rng)
vocab.add('C', vocab.parse('A * B'))

model = spa.SPA(label="structure", vocabs=[vocab])
with model:
    model.A = spa.State(D)
    model.B = spa.State(D)
    model.C = spa.State(D, feedback=1)
    model.Sum = spa.State(D)

    actions = spa.Actions(
        'C = A * B',
        'Sum = A',
        'Sum = B'
        )

    model.cortical = spa.Cortical(actions)
Beispiel #11
0
def generate(input_signal, alpha=1000.0):
    beta = alpha / 4.0

    # generate the Function Space
    forces, _, goals = forcing_functions.load_folder(
        'models/locomotion_trajectories',
        rhythmic=True,
        alpha=alpha,
        beta=beta)
    # make an array out of all the possible functions we want to represent
    force_space = np.vstack(forces)
    # use this array as our space to perform svd over
    fs = nengo.FunctionSpace(space=force_space, n_basis=10)

    # store the weights for each movement
    weights_a = []  # ankle
    weights_k = []  # knee
    weights_h = []  # hip
    # NOTE: things are added to weights based on the order files are read
    for ii in range(int(len(goals) / 6)):
        forces = force_space[ii * 6:ii * 6 + 6]
        # load up the forces to be output by the forcing function
        # calculate the corresponding weights over the basis functions
        weights_a.append(
            np.hstack([
                np.dot(fs.basis.T, forces[0]),  # ankle 1
                np.dot(fs.basis.T, forces[1])
            ]))  # ankle 2
        weights_h.append(
            np.hstack([
                np.dot(fs.basis.T, forces[2]),  # hip 1
                np.dot(fs.basis.T, forces[3])
            ]))  # hip 2
        weights_k.append(
            np.hstack([
                np.dot(fs.basis.T, forces[4]),  # knee 1
                np.dot(fs.basis.T, forces[5])
            ]))  # knee 2

    # Create our vocabularies
    sps_labels = ['GALLOP', 'RUNNING', 'WALKING']
    rng = np.random.RandomState(0)
    dimensions = 50  # some arbitrary number
    vocab_input = Vocabulary(dimensions=dimensions, rng=rng)
    vocab_dmp_weights_a = Vocabulary(dimensions=fs.n_basis * 2, rng=rng)
    vocab_dmp_weights_k = Vocabulary(dimensions=fs.n_basis * 2, rng=rng)
    vocab_dmp_weights_h = Vocabulary(dimensions=fs.n_basis * 2, rng=rng)

    for ii, (label, wa, wk,
             wh) in enumerate(zip(sps_labels, weights_a, weights_k,
                                  weights_h)):
        vocab_input.parse(label)  # randomly generate input vector
        vocab_dmp_weights_a.add(label, wa)
        vocab_dmp_weights_k.add(label, wk)
        vocab_dmp_weights_h.add(label, wh)

    net = spa.SPA()
    net.config[nengo.Ensemble].neuron_type = nengo.LIFRate()
    with net:

        config = nengo.Config(nengo.Ensemble)
        config[nengo.Ensemble].neuron_type = nengo.Direct()
        with config:
            # --------------------- Inputs --------------------------

            # def input_func(t):
            #     return vocab_input.parse(input_signal).v
            # net.input = nengo.Node(input_func)
            net.input = spa.State(dimensions,
                                  subdimensions=10,
                                  vocab=vocab_input)

            # ------------------- Point Attractors --------------------

            zero = nengo.Node([0])
            net.a1 = point_attractor.generate(n_neurons=1000,
                                              alpha=alpha,
                                              beta=beta)
            nengo.Connection(zero, net.a1.input[0], synapse=None)
            net.a2 = point_attractor.generate(n_neurons=1000,
                                              alpha=alpha,
                                              beta=beta)
            nengo.Connection(zero, net.a1.input[0], synapse=None)

            net.k1 = point_attractor.generate(n_neurons=1000,
                                              alpha=alpha,
                                              beta=beta)
            nengo.Connection(zero, net.k1.input[0], synapse=None)
            net.k2 = point_attractor.generate(n_neurons=1000,
                                              alpha=alpha,
                                              beta=beta)
            nengo.Connection(zero, net.k2.input[0], synapse=None)

            net.h1 = point_attractor.generate(n_neurons=1000,
                                              alpha=alpha,
                                              beta=beta)
            nengo.Connection(zero, net.h1.input[0], synapse=None)
            net.h2 = point_attractor.generate(n_neurons=1000,
                                              alpha=alpha,
                                              beta=beta)
            nengo.Connection(zero, net.h2.input[0], synapse=None)

        # -------------------- Oscillators ----------------------

        kick = nengo.Node(nengo.utils.functions.piecewise({
            0: 1,
            .05: 0
        }),
                          label='kick')

        osc = oscillator.generate(net, n_neurons=3000, speed=.01)
        osc.label = 'oscillator'
        nengo.Connection(kick, osc[0])

        # ------------------- Forcing Functions --------------------

        with config:
            net.assoc_mem_a = spa.AssociativeMemory(
                input_vocab=vocab_input,
                output_vocab=vocab_dmp_weights_a,
                wta_output=False)
            nengo.Connection(net.input.output, net.assoc_mem_a.input)

            net.assoc_mem_k = spa.AssociativeMemory(
                input_vocab=vocab_input,
                output_vocab=vocab_dmp_weights_k,
                wta_output=False)
            nengo.Connection(net.input.output, net.assoc_mem_k.input)

            net.assoc_mem_h = spa.AssociativeMemory(
                input_vocab=vocab_input,
                output_vocab=vocab_dmp_weights_h,
                wta_output=False)
            nengo.Connection(net.input.output, net.assoc_mem_h.input)

            # -------------------- Product for decoding -----------------------

            product_a1 = nengo.Network('Product A1')
            nengo.networks.Product(n_neurons=1000,
                                   dimensions=fs.n_basis,
                                   net=product_a1)
            product_a2 = nengo.Network('Product A2')
            nengo.networks.Product(n_neurons=1000,
                                   dimensions=fs.n_basis,
                                   net=product_a2)

            product_h1 = nengo.Network('Product H1')
            nengo.networks.Product(n_neurons=1000,
                                   dimensions=fs.n_basis,
                                   net=product_h1)
            product_h2 = nengo.Network('Product H2')
            nengo.networks.Product(n_neurons=1000,
                                   dimensions=fs.n_basis,
                                   net=product_h2)

            product_k1 = nengo.Network('Product K1')
            nengo.networks.Product(n_neurons=1000,
                                   dimensions=fs.n_basis,
                                   net=product_k1)
            product_k2 = nengo.Network('Product K2')
            nengo.networks.Product(n_neurons=1000,
                                   dimensions=fs.n_basis,
                                   net=product_k2)

            # get the largest basis function value for normalization
            max_basis = np.max(fs.basis * fs.scale)
            domain = np.linspace(-np.pi, np.pi, fs.basis.shape[0])
            domain_cossin = np.array([np.cos(domain), np.sin(domain)]).T
            for ff, product in zip([
                    net.assoc_mem_a.output[:fs.n_basis],
                    net.assoc_mem_a.output[fs.n_basis:],
                    net.assoc_mem_k.output[:fs.n_basis],
                    net.assoc_mem_k.output[fs.n_basis:],
                    net.assoc_mem_h.output[:fs.n_basis],
                    net.assoc_mem_h.output[fs.n_basis:]
            ], [
                    product_a1, product_a2, product_k1, product_k2, product_h1,
                    product_h2
            ]):
                for ii in range(fs.n_basis):
                    # find the value of a basis function at a value of (x, y)
                    target_function = nengo.utils.connection.target_function(
                        domain_cossin, fs.basis[:, ii] * fs.scale / max_basis)
                    nengo.Connection(osc, product.B[ii], **target_function)
                    # multiply the value of each basis function at x by its weight
                nengo.Connection(ff, product.A)

            nengo.Connection(product_a1.output,
                             net.a1.input[1],
                             transform=np.ones((1, fs.n_basis)) * max_basis)
            nengo.Connection(product_a2.output,
                             net.a2.input[1],
                             transform=np.ones((1, fs.n_basis)) * max_basis)

            nengo.Connection(product_k1.output,
                             net.k1.input[1],
                             transform=np.ones((1, fs.n_basis)) * max_basis)
            nengo.Connection(product_k2.output,
                             net.k2.input[1],
                             transform=np.ones((1, fs.n_basis)) * max_basis)

            nengo.Connection(product_h1.output,
                             net.h1.input[1],
                             transform=np.ones((1, fs.n_basis)) * max_basis)
            nengo.Connection(product_h2.output,
                             net.h2.input[1],
                             transform=np.ones((1, fs.n_basis)) * max_basis)

            # -------------------- Output ------------------------------

            net.output = nengo.Node(size_in=6, label='output')
            nengo.Connection(net.a1.output, net.output[0], synapse=0.01)
            nengo.Connection(net.a2.output, net.output[1], synapse=0.01)
            nengo.Connection(net.k1.output, net.output[2], synapse=0.01)
            nengo.Connection(net.k2.output, net.output[3], synapse=0.01)
            nengo.Connection(net.h1.output, net.output[4], synapse=0.01)
            nengo.Connection(net.h2.output, net.output[5], synapse=0.01)

            # add in the goal offsets
            nengo.Connection(net.assoc_mem_a.output[[-2, -1]],
                             net.output[[0, 1]],
                             synapse=None)
            nengo.Connection(net.assoc_mem_k.output[[-2, -1]],
                             net.output[[2, 3]],
                             synapse=None)
            nengo.Connection(net.assoc_mem_h.output[[-2, -1]],
                             net.output[[4, 5]],
                             synapse=None)

            # create a node to give a plot of the represented function
            ff_plot_a = fs.make_plot_node(domain=domain,
                                          lines=2,
                                          ylim=[-1000000, 1000000])
            nengo.Connection(net.assoc_mem_a.output, ff_plot_a, synapse=0.1)

            ff_plot_k = fs.make_plot_node(domain=domain,
                                          lines=2,
                                          ylim=[-1000000, 1000000])
            nengo.Connection(net.assoc_mem_k.output, ff_plot_k, synapse=0.1)

            ff_plot_h = fs.make_plot_node(domain=domain,
                                          lines=2,
                                          ylim=[-1000000, 1000000])
            nengo.Connection(net.assoc_mem_h.output, ff_plot_h, synapse=0.1)

    return net
Beispiel #12
0
def test_add():
    v = Vocabulary(3)
    v.add("A", [1, 2, 3])
    v.add("B", [4, 5, 6])
    v.add("C", [7, 8, 9])
    assert np.allclose(v.vectors, [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
def main():

    model = spa.SPA(label="Vector Storage")
    with model:

        # Dimensionality of each representation
        num_dimensions = 2
        sub_dimensions = 2

        # Create the vocabulary
        vocab = Vocabulary(num_dimensions, randomize=False)

        # Form the inputs
        stored_value_1 = [1] * num_dimensions
        stored_value_1 = [
            s / np.linalg.norm(stored_value_1) for s in stored_value_1
        ]
        vocab.add("Stored_value_1", stored_value_1)

        stored_value_2 = [(-1**i) for i in range(num_dimensions)]
        stored_value_2 = [
            s / np.linalg.norm(stored_value_2) for s in stored_value_2
        ]
        vocab.add("Stored_value_2", stored_value_2)

        def first_input(t):
            if t < 10:
                return "Stored_value_2"
            else:
                return "Stored_value_1"

        def second_input(t):
            if t < 5:
                return "Stored_value_1"
            else:
                return "Stored_value_2"

        # Buffers to store the input
        model.buffer1 = spa.Buffer(dimensions=num_dimensions,
                                   subdimensions=sub_dimensions,
                                   neurons_per_dimension=200,
                                   direct=True)
        model.buffer2 = spa.Buffer(dimensions=num_dimensions,
                                   subdimensions=sub_dimensions,
                                   neurons_per_dimension=200,
                                   direct=True)

        # Probe to visualize the values stored in the buffers
        buffer_1_probe = nengo.Probe(model.buffer1.state.output)
        buffer_2_probe = nengo.Probe(model.buffer2.state.output)

        # Connect up the inputs
        model.input = spa.Input(buffer1=first_input)
        model.input = spa.Input(buffer2=second_input)

        # Buffer to store the output
        model.buffer3 = spa.Buffer(dimensions=num_dimensions,
                                   subdimensions=sub_dimensions,
                                   neurons_per_dimension=200,
                                   direct=True)
        buffer_3_probe = nengo.Probe(model.buffer3.state.output)

        # Control system
        actions = spa.Actions(
            'dot(buffer1, Stored_value_2) --> buffer3=Stored_value_2',
            'dot(buffer1, Stored_value_1) --> buffer3=Stored_value_1+Stored_value_2'
        )
        model.bg = spa.BasalGanglia(actions)
        model.thalamus = spa.Thalamus(model.bg)

    # Start the simulator
    sim = nengo.Simulator(model)

    # Dynamic plotting
    plt.ion()  # Dynamic updating of plots
    fig = plt.figure(figsize=(15, 8))
    plt.show()
    ax = fig.gca()
    ax.set_title("Vector Storage")

    while True:
        sim.run(1)  # Run for an additional 1 second
        plt.clf()  # Clear the figure
        plt.plot(sim.trange(),
                 similarity(sim.data, buffer_1_probe, vocab),
                 label="Buffer 1 Value")  # Plot the entire dataset so far
        plt.plot(sim.trange(),
                 similarity(sim.data, buffer_2_probe, vocab),
                 label="Buffer 2 Value")
        plt.plot(sim.trange(),
                 similarity(sim.data, buffer_3_probe, vocab),
                 label="Buffer 3 Value")
        print sim.data[buffer_1_probe][-1]
        print sim.data[buffer_2_probe][-1]
        print sim.data[buffer_3_probe][-1]
        plt.legend(vocab.keys * 3, loc=2)
        plt.draw()  # Re-draw
Beispiel #14
0
def setup_probes_generic(model):
    with model:
        model.config[nengo.Probe].synapse = Lowpass(0.005)

        vocab_dict = {}
        graph_list = []
        anim_config = []

        sub_vocab1 = enum_vocab.create_subset(['POS1*ONE', 'POS2*TWO',
                                               'POS3*THR', 'POS4*FOR',
                                               'POS5*FIV'])

        sub_vocab2 = vocab.create_subset(['ADD'])
        sub_vocab2.readonly = False
        sub_vocab2.add('N_ADD', vocab.parse('~ADD'))
        sub_vocab2.add('ADD*ADD', vocab.parse('ADD*ADD'))
        sub_vocab2.add('ADD*ADD*ADD', vocab.parse('ADD*ADD*ADD'))
        # sub_vocab2.add('ADD*ADD*ADD*ADD', vocab.parse('ADD*ADD*ADD*ADD'))
        # sub_vocab2.add('ADD*ADD*ADD*ADD*ADD',
        #                vocab.parse('ADD*ADD*ADD*ADD*ADD'))

        sub_vocab3 = vocab.create_subset([])
        sub_vocab3.readonly = False
        # sub_vocab3.add('N_POS1*ONE', vocab.parse('~(POS1*ONE)'))
        # sub_vocab3.add('N_POS1*TWO', vocab.parse('~(POS1*TWO)'))
        # sub_vocab3.add('N_POS1*THR', vocab.parse('~(POS1*THR)'))
        # sub_vocab3.add('N_POS1*FOR', vocab.parse('~(POS1*FOR)'))
        # sub_vocab3.add('N_POS1*FIV', vocab.parse('~(POS1*FIV)'))
        sub_vocab3.add('ADD', vocab.parse('ADD'))
        sub_vocab3.add('INC', vocab.parse('INC'))

        vocab_seq_list = vocab.create_subset([])
        vocab_seq_list.readonly = False
        for sp_str in ['POS1*ONE', 'POS2*TWO', 'POS3*THR', 'POS4*FOR',
                       'POS5*FIV', 'POS6*SIX', 'POS7*SEV', 'POS8*EIG']:
            vocab_seq_list.add(sp_str, vocab.parse(sp_str))

        vocab_rpm = vocab.create_subset([])
        vocab_rpm.readonly = False
        for i in [1, 3, 8]:
            sp_str = num_sp_strs[i]
            vocab_rpm.add('A_(P1+P2+P3)*%s' % sp_str,
                          vocab.parse('POS1*%s+POS2*%s+POS3*%s' %
                                      (sp_str, sp_str, sp_str)))
            vocab_rpm.add('N_(P1+P2+P3)*%s' % sp_str,
                          vocab.parse('~(POS1*%s+POS2*%s+POS3*%s)' %
                                      (sp_str, sp_str, sp_str)))

        ####
        vocab_seq_list = vocab_rpm

        if hasattr(model, 'stim'):
            p0 = nengo.Probe(model.stim.output, synapse=None)

            add_to_anim_config(anim_config, key='vis',
                               data_func_name='generic_single',
                               data_func_params={'data': p0},
                               plot_type_name='imshow',
                               plot_type_params={'shape': (28, 28)})
        else:
            p0 = 0

        if hasattr(model, 'vis') and True:
            pvs1 = nengo.Probe(model.vis.output)
            pvs2 = nengo.Probe(model.vis.neg_attention)
            pvs3 = nengo.Probe(model.vis.am_utilities)
            pvs4 = nengo.Probe(model.vis.mb_output)
            pvs5 = nengo.Probe(model.vis.vis_out)

            # probes = gen_graph_list(['vis', p0, pvs1, pvs2, pvs3])
            # vocab_dict[idstr(pvs1)] = vis_vocab

            add_to_graph_list(graph_list, ['vis', p0, pvs1, pvs2, pvs3, 0,
                                           'vis net', pvs4, pvs5])
            add_to_vocab_dict(vocab_dict, {pvs1: vis_vocab})

        # ############ FOR DEBUGGING VIS DETECT SYSTEM ########################
        # if hasattr(model, 'vis') and True:
        #     pvsd1 = nengo.Probe(model.vis.detect_change_net.input_diff)
        #     pvsd2 = nengo.Probe(model.vis.detect_change_net.item_detect)
        #     pvsd3 = nengo.Probe(model.vis.detect_change_net.blank_detect)

        #     probes = gen_graph_list(['vis detect', p0, pvsd1, pvsd2, pvsd3])
        #     graph_list.extend(probes)

        if hasattr(model, 'ps') and True:
            pps1 = nengo.Probe(model.ps.task)
            pps2 = nengo.Probe(model.ps.state)
            pps3 = nengo.Probe(model.ps.dec)

            pps4 = nengo.Probe(model.ps.ps_task_mb.mem1.output)
            pps5 = nengo.Probe(model.ps.ps_task_mb.mem2.output)
            pps6 = nengo.Probe(model.ps.ps_task_mb.mem1.input, synapse=None)
            pps6b = nengo.Probe(model.ps.task_init.output)

            pps7 = nengo.Probe(model.ps.ps_state_mb.mem1.output)
            pps8 = nengo.Probe(model.ps.ps_state_mb.mem2.output)
            pps9 = nengo.Probe(model.ps.ps_state_mb.mem1.input, synapse=None)

            pps10 = nengo.Probe(model.ps.ps_dec_mb.mem1.output)
            pps11 = nengo.Probe(model.ps.ps_dec_mb.mem2.output)
            pps12 = nengo.Probe(model.ps.ps_dec_mb.mem1.input, synapse=None)

            pps13 = nengo.Probe(model.ps.ps_task_mb.gate)
            pps14 = nengo.Probe(model.ps.ps_state_mb.gate)
            pps15 = nengo.Probe(model.ps.ps_dec_mb.gate)

            # probes = gen_graph_list(['ps', p0, pps1, pps2, pps3, 0,
            #                          'ps_task', p0, pps1, pps6, pps4, pps5, pps6b, pps13, 0,  # noqa
            #                          'ps_state', p0, pps2, pps9, pps7, pps8, pps14, 0,  # noqa
            #                          'ps_dec', p0, pps3, pps12, pps10, pps11, pps15],  # noqa
            #                         [pps1, pps2, pps3, pps4, pps5, pps6])
            # graph_list.extend(probes)
            # vocab_dict[idstr(pps1)] = ps_task_vocab
            # vocab_dict[idstr(pps2)] = ps_state_vocab
            # vocab_dict[idstr(pps3)] = ps_dec_vocab
            # vocab_dict[idstr(pps4)] = ps_task_vocab
            # vocab_dict[idstr(pps5)] = ps_task_vocab
            # vocab_dict[idstr(pps6)] = ps_task_vocab
            # vocab_dict[idstr(pps7)] = ps_state_vocab
            # vocab_dict[idstr(pps8)] = ps_state_vocab
            # vocab_dict[idstr(pps9)] = ps_state_vocab
            # vocab_dict[idstr(pps10)] = ps_dec_vocab
            # vocab_dict[idstr(pps11)] = ps_dec_vocab
            # vocab_dict[idstr(pps12)] = ps_dec_vocab

            add_to_graph_list(graph_list,
                              ['ps', p0, pps1, pps2, pps3, 0,
                               'ps_task', p0, pps1, pps6, pps4, pps5, pps6b, pps13, 0,  # noqa
                               'ps_state', p0, pps2, pps9, pps7, pps8, pps14, 0,  # noqa
                               'ps_dec', p0, pps3, pps12, pps10, pps11, pps15],  # noqa
                              [pps1, pps2, pps3, pps4, pps5, pps6])
            add_to_vocab_dict(vocab_dict, {pps1: ps_task_vocab,
                                           pps2: ps_state_vocab,
                                           pps3: ps_dec_vocab,
                                           pps4: ps_task_vocab,
                                           pps5: ps_task_vocab,
                                           pps6: ps_task_vocab,
                                           pps7: ps_state_vocab,
                                           pps8: ps_state_vocab,
                                           pps9: ps_state_vocab,
                                           pps10: ps_dec_vocab,
                                           pps11: ps_dec_vocab,
                                           pps12: ps_dec_vocab})

        if hasattr(model, 'enc') and True:
            pen1 = nengo.Probe(model.enc.pos_inc.gate)
            # pen2 = nengo.Probe(model.enc.pos_mb.gateX)
            # pen3 = nengo.Probe(model.enc.pos_mb.gateN)
            pen4 = nengo.Probe(model.enc.pos_inc.output)
            # pen5 = nengo.Probe(model.enc.pos_mb.mem1.output)
            # pen6 = nengo.Probe(model.enc.pos_mb.am.output)
            pen6 = nengo.Probe(model.enc.pos_inc.reset)
            pen7 = nengo.Probe(model.enc.pos_mb_acc.output)
            pen7a = nengo.Probe(model.enc.pos_mb_acc.input)
            pen8 = nengo.Probe(model.enc.pos_output)

            # probes = gen_graph_list(['enc', p0, pen4, pen7, pen6, 0,
            #                          'enc gate', pen1, pen2, pen3],
            #                         [pen4, pen7])
            # graph_list.extend(probes)
            # vocab_dict[idstr(pen4)] = pos_vocab
            # vocab_dict[idstr(pen7)] = pos_vocab

            add_to_graph_list(graph_list,
                              ['enc', p0, pen1, pen4, pen7, pen7a, pen6],
                              [pen4])
            add_to_vocab_dict(vocab_dict, {pen4: pos_vocab,
                                           pen7: pos_vocab,
                                           pen7a: pos_vocab,
                                           pen8: pos_vocab})

        if hasattr(model, 'mem') and True:
            pmm1 = nengo.Probe(model.mem.mb1)
            pmm1a = nengo.Probe(model.mem.mb1_net.mb_reh)
            pmm1b = nengo.Probe(model.mem.mb1_net.mb_dcy)
            pmm2 = nengo.Probe(model.mem.mb1_net.gate)
            pmm3 = nengo.Probe(model.mem.mb1_net.reset)
            pmm4 = nengo.Probe(model.mem.mb2)
            pmm5 = nengo.Probe(model.mem.mb2_net.gate)
            pmm6 = nengo.Probe(model.mem.mb2_net.reset)
            pmm7 = nengo.Probe(model.mem.mb3)
            pmm8 = nengo.Probe(model.mem.mb3_net.gate)
            pmm9 = nengo.Probe(model.mem.mb3_net.reset)

            # probes = gen_graph_list(['mb1', p0, pmm1, pmm2, pmm3, 0,
            #                          'mb2', p0, pmm4, pmm5, pmm6, 0,
            #                          'mb3', p0, pmm7, pmm8, pmm9],
            #                         [pmm1, pmm4, pmm7])
            # graph_list.extend(probes)
            # vocab_dict[idstr(pmm1)] = sub_vocab1
            # vocab_dict[idstr(pmm4)] = sub_vocab1
            # vocab_dict[idstr(pmm7)] = sub_vocab1

            # add_to_graph_list(graph_list,
            #                   ['mb1', p0, pmm1, pmm2, pmm3, 0,
            #                    'mb2', p0, pmm4, pmm5, pmm6, 0,
            #                    'mb3', p0, pmm7, pmm8, pmm9],
            #                   [pmm1, pmm4, pmm7])
            add_to_graph_list(graph_list,
                              ['mb1', p0, pmm1, pmm1a, pmm1b, pmm2, pmm3, 0,
                               'mb2', p0, pmm4, pmm5, pmm6, 0,
                               'mb3', p0, pmm7, pmm8, pmm9])
            add_to_vocab_dict(vocab_dict, {pmm1: vocab_seq_list,
                                           pmm1a: vocab_seq_list,
                                           pmm1b: vocab_seq_list,
                                           pmm4: vocab_seq_list,
                                           pmm7: vocab_seq_list})

        if hasattr(model, 'mem') and True:
            pmm10 = nengo.Probe(model.mem.mbave_net.input)
            pmm11 = nengo.Probe(model.mem.mbave_net.gate)
            pmm12 = nengo.Probe(model.mem.mbave_net.reset)
            pmm13 = nengo.Probe(model.mem.mbave)
            # pmm14 = nengo.Probe(model.mem.mbave_norm.output)
            # pmm15 = nengo.Probe(model.mem.mbave_in_init.output)

            # probes = gen_graph_list(['mbave', p0, pmm10, pmm11, pmm12, pmm13,
            #                          pmm15, pmm14],
            #                         pmm10)
            # graph_list.extend(probes)
            # vocab_dict[idstr(pmm10)] = sub_vocab2
            # vocab_dict[idstr(pmm13)] = sub_vocab2
            # vocab_dict[idstr(pmm15)] = sub_vocab2

            add_to_graph_list(graph_list,
                              ['mbave', p0, pmm10, pmm11, pmm12, pmm13],
                              [pmm10])
            add_to_vocab_dict(vocab_dict, {pmm10: sub_vocab2,
                                           pmm13: sub_vocab3})

        # if (hasattr(model, 'mem') and not isinstance(model.mem,
        #                                              WorkingMemoryDummy)):
        #     mem = model.mem.mb3a
        #     pmm11 = nengo.Probe(mem.gateX)
        #     pmm12 = nengo.Probe(mem.gateN)
        #     pmm13 = nengo.Probe(mem.gate)
        #     pmm14 = nengo.Probe(mem.reset)
        #     pmm15 = nengo.Probe(model.mem.gate_sig_bias.output)
        #     pmm16 = nengo.Probe(mem.input)

        #     probes = gen_graph_list(['mb3a sigs', p0, pmm11, pmm12, pmm13,
        #                              pmm14, pmm15, pmm16],
        #                             [pmm16])
        #     graph_list.extend(probes)
        #     vocab_dict[idstr(pmm16)] = sub_vocab1

        if hasattr(model, 'trfm') and \
           not isinstance(model.trfm, TransformationSystemDummy):
            ptf1 = nengo.Probe(model.trfm.select_in_a.output)
            ptf2 = nengo.Probe(model.trfm.select_in_b.output)
            ptf3 = nengo.Probe(model.trfm.cconv1.output)
            ptf3b = nengo.Probe(model.trfm.cconv1.output)
            ptf3c = nengo.Probe(model.trfm.cconv1.output)
            ptf3d = nengo.Probe(model.trfm.cconv1.output)
            ptf4 = nengo.Probe(model.trfm.output)
            ptf4b = nengo.Probe(model.trfm.output)
            ptf5 = nengo.Probe(model.trfm.compare.output)
            ptf6 = nengo.Probe(model.trfm.norm_a.output)
            ptf7 = nengo.Probe(model.trfm.norm_b.output)
            ptf8 = nengo.Probe(model.trfm.norm_a.input)
            ptf9 = nengo.Probe(model.trfm.norm_b.input)
            ptf10 = nengo.Probe(model.trfm.compare.dot_prod)
            ptf11a = nengo.Probe(model.trfm.cconv1.A)
            ptf11b = nengo.Probe(model.trfm.cconv1.B)

            # probes = gen_graph_list(['trfm io', p0, ptf1, ptf2, ptf4, ptf4b, 0,
            #                          'trfm cc', p0, ptf3, ptf3b, ptf3c, ptf3d, 0, # noqa
            #                          'trfm cmp', ptf5, ptf8, ptf6, ptf9, ptf7,
            #                          ptf10],
            #                         [ptf1, ptf2, ptf3, ptf3b, ptf3c, ptf4,
            #                          ptf4b, ptf6, ptf7])
            # graph_list.extend(probes)

            # vocab_dict[idstr(ptf1)] = sub_vocab1
            # vocab_dict[idstr(ptf2)] = sub_vocab3
            # vocab_dict[idstr(ptf3)] = item_vocab
            # vocab_dict[idstr(ptf3b)] = pos_vocab
            # vocab_dict[idstr(ptf3c)] = sub_vocab2
            # vocab_dict[idstr(ptf3d)] = sub_vocab1
            # vocab_dict[idstr(ptf4)] = sub_vocab2
            # vocab_dict[idstr(ptf4b)] = sub_vocab1
            # vocab_dict[idstr(ptf5)] = ps_cmp_vocab
            # vocab_dict[idstr(ptf6)] = sub_vocab1
            # vocab_dict[idstr(ptf7)] = sub_vocab1
            # vocab_dict[idstr(ptf8)] = sub_vocab1
            # vocab_dict[idstr(ptf9)] = sub_vocab1

            add_to_graph_list(graph_list,
                              ['trfm io', p0, ptf1, ptf2, ptf4, 0,
                               'trfm cc', p0, pmm11, ptf3, ptf11a, ptf11b, pmm13, 0, # noqa
                               'trfm cmp', ptf5, ptf8, ptf6, ptf9, ptf7, ptf10], # noqa
                              [ptf1, ptf4, ptf6, ptf7])
            # add_to_vocab_dict(vocab_dict, {ptf1: sub_vocab1,
            #                                ptf2: sub_vocab3,
            #                                ptf3: item_vocab,
            #                                ptf3b: pos_vocab,
            #                                ptf3c: sub_vocab2,
            #                                ptf3d: sub_vocab1,
            #                                ptf4: sub_vocab2,
            #                                ptf4b: sub_vocab1,
            #                                ptf5: ps_cmp_vocab,
            #                                ptf6: sub_vocab1,
            #                                ptf7: sub_vocab1,
            #                                ptf8: sub_vocab1,
            #                                ptf9: sub_vocab1})
            add_to_vocab_dict(vocab_dict, {ptf1: vocab_rpm,
                                           ptf2: vocab_rpm,
                                           ptf3: vocab_rpm,
                                           ptf4: vocab_rpm,
                                           ptf5: ps_cmp_vocab,
                                           ptf6: sub_vocab1,
                                           ptf7: sub_vocab1,
                                           ptf8: sub_vocab1,
                                           ptf9: sub_vocab1,
                                           ptf11a: vocab_rpm,
                                           ptf11b: vocab_rpm})

        if hasattr(model, 'trfm') and \
                not isinstance(model.trfm, TransformationSystemDummy):
            ptf5 = nengo.Probe(model.trfm.am_trfms.pos1_to_pos)
            ptf6 = nengo.Probe(model.trfm.am_trfms.pos1_to_num)
            ptf7 = nengo.Probe(model.trfm.am_trfms.num_to_pos1)
            ptf8 = nengo.Probe(model.trfm.am_trfms.pos_to_pos1)
            # ptf9 = nengo.Probe(model.trfm.vis_trfm_utils)
            # ptf10 = nengo.Probe(model.trfm.vis_trfm_in)

            # probes = gen_graph_list(['trfm am1', p0, ptf5, ptf6, 0,
            #                          'trfm am2', p0, ptf7, ptf8],
            #                         [ptf5, ptf6, ptf7, ptf8])
            # graph_list.extend(probes)

            # vocab_dict[idstr(ptf5)] = pos_vocab
            # vocab_dict[idstr(ptf6)] = item_vocab
            # vocab_dict[idstr(ptf7)] = pos1_vocab
            # vocab_dict[idstr(ptf8)] = pos1_vocab

            add_to_graph_list(graph_list,
                              ['trfm am1', p0, ptf5, ptf6, 0,
                               'trfm am2', p0, ptf7, ptf8, 0],
                              # 'trfm vis', p0, ptf9, ptf10],
                              [ptf5, ptf6, ptf7, ptf8])
            add_to_vocab_dict(vocab_dict, {ptf5: pos_vocab,
                                           ptf6: item_vocab,
                                           ptf7: pos1_vocab,
                                           ptf8: pos1_vocab})

        if hasattr(model, 'bg') and True:
            pbg1 = nengo.Probe(model.bg.input)
            pbg2 = nengo.Probe(model.bg.output)

            # probes = gen_graph_list(['bg', p0, pbg1, pbg2])
            # graph_list.extend(probes)

            add_to_graph_list(graph_list, ['bg', p0, pbg1, pbg2])

        if hasattr(model, 'dec') and True:
            pde1 = nengo.Probe(model.dec.item_dcconv)
            # pde2 = nengo.Probe(model.dec.select_am)
            # pde3 = nengo.Probe(model.dec.select_vis)
            pde4 = nengo.Probe(model.dec.am_out, synapse=0.01)
            # pde5 = nengo.Probe(model.dec.vt_out)
            pde6 = nengo.Probe(model.dec.pos_mb_gate_sig.output)
            # pde7 = nengo.Probe(model.dec.util_diff_neg)
            pde8 = nengo.Probe(model.dec.am_utils)
            pde9 = nengo.Probe(model.dec.am2_utils)
            pde10 = nengo.Probe(model.dec.util_diff)
            pde11 = nengo.Probe(model.dec.pos_recall_mb)
            pde11a = nengo.Probe(model.dec.pos_recall_mb_in)
            pde11b = nengo.Probe(model.dec.fr_recall_mb.gate)
            pde11c = nengo.Probe(model.dec.fr_recall_mb.reset)
            pde11d = nengo.Probe(model.dec.fr_recall_mb.mem1.input)
            pde11e = nengo.Probe(model.dec.fr_recall_mb.mem2.input)

            # pde12 = nengo.Probe(model.dec.recall_mb.gateX)
            # pde13 = nengo.Probe(model.dec.recall_mb.gateN)
            # pde14a = nengo.Probe(model.dec.recall_mb.mem1.input)
            # pde14b = nengo.Probe(model.dec.recall_mb.mem1.output)
            # pde14c = nengo.Probe(model.dec.recall_mb.mem2.input)
            # pde14d = nengo.Probe(model.dec.recall_mb.mem2.output)
            # pde14e = nengo.Probe(model.dec.recall_mb.mem1.diff.output)
            # pde14f = nengo.Probe(model.dec.recall_mb.reset)
            # pde14g = nengo.Probe(model.dec.recall_mb.mem1.gate)
            # pde12 = nengo.Probe(model.dec.dec_am_fr.output)
            # pde13 = nengo.Probe(model.dec.dec_am.item_output)
            # pde14 = nengo.Probe(model.dec.recall_mb.mem1.output)
            pde15 = nengo.Probe(model.dec.output_know)
            pde16 = nengo.Probe(model.dec.output_unk)
            pde18 = nengo.Probe(model.dec.output_stop)
            # pde19 = nengo.Probe(model.dec.am_th_utils)
            # pde20 = nengo.Probe(model.dec.fr_th_utils)
            pde21 = nengo.Probe(model.dec.output)
            # pde22 = nengo.Probe(model.dec.dec_am_fr.input)
            # pde23 = nengo.Probe(model.dec.am_def_th_utils)
            # pde24 = nengo.Probe(model.dec.fr_def_th_utils)
            pde25 = nengo.Probe(model.dec.fr_utils)
            pde26 = nengo.Probe(model.dec.pos_mb_gate_bias.output)
            pde27 = nengo.Probe(model.dec.pos_acc_input)
            pde28 = nengo.Probe(model.dec.item_dcconv_a)
            pde29 = nengo.Probe(model.dec.item_dcconv_b)

            pde30 = nengo.Probe(model.dec.sel_signals)
            pde31 = nengo.Probe(model.dec.select_out.input0)
            pde32 = nengo.Probe(model.dec.select_out.input1)
            pde33 = nengo.Probe(model.dec.select_out.input3)

            pde34 = nengo.Probe(model.dec.out_class_sr_y)
            pde35 = nengo.Probe(model.dec.out_class_sr_diff)
            pde36 = nengo.Probe(model.dec.out_class_sr_n)

            sel_out_vocab = Vocabulary(5)
            for n in range(5):
                vec = np.zeros(5)
                vec[n] = 1
                sel_out_vocab.add('SEL%d' % n, vec)

            # probes = gen_graph_list(['dec decconv', pde1, pde4, pde21, 0,
            #                          'dec kn unk st', pde15, pde16, pde18, 0,
            #                          'dec am utils', pde8, pde9, pde25, 0,
            #                          'dec sigs', pde6, pde26, pde11, pde27],
            #                         [pde21])
            # # 'dec mb sigs', pde12, pde13, pde14f, pde14g, pde14a, pde14e, pde14b, pde14c, pde14d],  # noqa
            # graph_list.extend(probes)

            # vocab_dict[idstr(pde1)] = item_vocab
            # vocab_dict[idstr(pde4)] = mtr_vocab
            # vocab_dict[idstr(pde21)] = mtr_vocab
            # vocab_dict[idstr(pde11)] = pos_vocab
            # vocab_dict[idstr(pde27)] = pos_vocab
            # # vocab_dict[idstr(pde14a)] = pos_vocab
            # # vocab_dict[idstr(pde14b)] = pos_vocab
            # # vocab_dict[idstr(pde14c)] = pos_vocab
            # # vocab_dict[idstr(pde14d)] = pos_vocab

            add_to_graph_list(graph_list,
                              ['dec decconv', pde28, pde29, pde1, pde4, pde21, 0,
                               'dec kn unk st', pde15, pde16, pde18, 0,
                               'dec am utils', pde8, pde9, pde10, pde25, 0,
                               'dec sigs', pde6, pde26, pde11, pde27, 0,
                               'dec fr', pde11b, pde11c, pde11, pde11a, pde11d, pde11e, 0,
                               'dec sel', pde30, pde31, pde32, pde33, 0,
                               'dec out class', pde34, pde35, pde36],
                              [pde21, pde30])
            add_to_vocab_dict(vocab_dict, {pde1: item_vocab,
                                           pde4: mtr_vocab,
                                           pde21: mtr_vocab,
                                           pde11: pos_vocab,
                                           pde11a: pos_vocab,
                                           pde11d: pos_vocab,
                                           pde11e: pos_vocab,
                                           pde27: pos_vocab,
                                           pde28: sub_vocab2,
                                           pde29: pos_vocab,
                                           pde30: sel_out_vocab,
                                           pde31: mtr_disp_vocab,
                                           pde32: mtr_disp_vocab,
                                           pde33: mtr_disp_vocab})

        if hasattr(model, 'mtr'):
            pmt1 = nengo.Probe(model.mtr.ramp)
            pmt2 = nengo.Probe(model.mtr.ramp_reset_hold)
            pmt3 = nengo.Probe(model.mtr.motor_stop_input.output)
            pmt4 = nengo.Probe(model.mtr.motor_init.output)
            pmt5 = nengo.Probe(model.mtr.motor_go)
            pmt6 = nengo.Probe(model.mtr.ramp_sig.stop)
            # pmt6 = nengo.Probe(model.mtr.ramp_int_stop)
            pmt7a = nengo.Probe(model.mtr.arm_px_node)
            pmt7b = nengo.Probe(model.mtr.arm_py_node)
            pmt8 = nengo.Probe(model.mtr.pen_down)
            pmt9 = nengo.Probe(model.mtr.zero_centered_arm_ee_loc,
                               synapse=0.01)
            pmt10 = nengo.Probe(model.mtr.zero_centered_tgt_ee_loc,
                                synapse=0.03)
            pmt11 = nengo.Probe(model.mtr.motor_bypass.output)

            add_to_graph_list(graph_list,
                              ['mtr', p0, pmt1, pmt2, pmt6, pmt3, pmt4, pmt5, pmt11, 0,  # noqa
                               'arm', pmt7a, pmt7b, pmt8])

            add_to_anim_config(anim_config, key='mtr',
                               data_func_name='arm_path',
                               data_func_params={'ee_path_data': pmt9,
                                                 'target_path_data': pmt10,
                                                 'pen_status_data': pmt8},
                                                 # 'arm_posx_data': pmt7a,
                                                 # 'arm_posy_data': pmt7b,
                                                 # 'arm_pos_bias': [cfg.mtr_arm_rest_x_bias, cfg.mtr_arm_rest_y_bias]}, # noqa
                               plot_type_name='arm_path_plot',
                               plot_type_params={'show_tick_labels': True,
                                                 'xlim': (-mtr_sp_scale_factor, mtr_sp_scale_factor),  # noqa
                                                 'ylim': (-mtr_sp_scale_factor, mtr_sp_scale_factor)})  # noqa

        # --------------------- ANIMATION CONFIGURATION -----------------------
        anim_config.append({'subplot_width': 5,
                            'subplot_height': 5,
                            'max_subplot_cols': 4,
                            'generator_func_params': {'t_index_step': 10}})

    return (graph_list[:-1], vocab_dict, anim_config)
def generate(input_signal, alpha=1000.0):
    beta = alpha / 4.0

    # Read in the class mean for numbers from vision network
    weights_data = np.load('models/mnist_vision_data/params.npz')
    weights = weights_data['Wc']
    means_data = np.load('models/mnist_vision_data/class_means.npz')
    means = np.matrix(1.0 / means_data['means'])
    sps = np.multiply(weights.T, means.T)[:10]
    sps_labels = [
        'ZERO', 'ONE', 'TWO', 'THREE', 'FOUR',
        'FIVE', 'SIX', 'SEVEN', 'EIGHT', 'NINE']
    dimensions = weights.shape[0]

    # generate the Function Space
    forces, _, _ = forcing_functions.load_folder(
        'models/handwriting_trajectories', rhythmic=True,
        alpha=alpha, beta=beta)
    # make an array out of all the possible functions we want to represent
    force_space = np.vstack(forces)
    # use this array as our space to perform svd over
    fs = nengo.FunctionSpace(space=force_space, n_basis=10)

    # store the weights for each trajectory
    weights_x = []
    weights_y = []
    for ii in range(len(forces)):
        forces = force_space[ii*2:ii*2+2]
        # load up the forces to be output by the forcing function
        # calculate the corresponding weights over the basis functions
        weights_x.append(np.dot(fs.basis.T, forces[0]))
        weights_y.append(np.dot(fs.basis.T, forces[1]))

    # Create our vocabularies
    rng = np.random.RandomState(0)
    vocab_vision = Vocabulary(dimensions=dimensions, rng=rng)
    vocab_dmp_weights_x = Vocabulary(dimensions=fs.n_basis, rng=rng)
    vocab_dmp_weights_y = Vocabulary(dimensions=fs.n_basis, rng=rng)
    for label, sp, wx, wy in zip(
            sps_labels, sps, weights_x, weights_y):
        vocab_vision.add(
            label, np.array(sp)[0] / np.linalg.norm(np.array(sp)[0]))
        vocab_dmp_weights_x.add(label, wx)
        vocab_dmp_weights_y.add(label, wy)


    net = spa.SPA()
    # net.config[nengo.Ensemble].neuron_type = nengo.Direct()
    with net:

        # --------------------- Inputs --------------------------
        # def input_func(t):
        #     return vocab_vision.parse(input_signal).v
        # net.input = nengo.Node(input_func, label='input')
        net.input = spa.State(dimensions, subdimensions=10,
                              vocab=vocab_vision)

        number = nengo.Node(output=[0], label='number')

        # ------------------- Point Attractors --------------------
        net.x = point_attractor.generate(
            n_neurons=1000, alpha=alpha, beta=beta)
        net.y = point_attractor.generate(
            n_neurons=1000, alpha=alpha, beta=beta)

        # -------------------- Oscillators ----------------------
        kick = nengo.Node(nengo.utils.functions.piecewise({0: 1, .05: 0}),
                          label='kick')
        osc = oscillator.generate(net, n_neurons=2000, speed=.05)
        osc.label = 'oscillator'
        nengo.Connection(kick, osc[0])

        # ------------------- Forcing Functions --------------------

        net.assoc_mem_x = spa.AssociativeMemory(
            input_vocab=vocab_vision,
            output_vocab=vocab_dmp_weights_x,
            wta_output=False)
        nengo.Connection(net.input.output, net.assoc_mem_x.input)

        net.assoc_mem_y = spa.AssociativeMemory(
            input_vocab=vocab_vision,
            output_vocab=vocab_dmp_weights_y,
            wta_output=False)
        nengo.Connection(net.input.output, net.assoc_mem_y.input)

        # -------------------- Product for decoding -----------------------

        net.product_x = nengo.Network('Product X')
        nengo.networks.Product(n_neurons=1000,
                               dimensions=fs.n_basis,
                               net=net.product_x,
                               input_magnitude=1.0)
        net.product_y = nengo.Network('Product Y')
        nengo.networks.Product(n_neurons=1000,
                               dimensions=fs.n_basis,
                               net=net.product_y,
                               input_magnitude=1.0)

        # get the largest basis function value for normalization
        max_basis = np.max(fs.basis*fs.scale)
        domain = np.linspace(-np.pi, np.pi, fs.basis.shape[0])
        domain_cossin = np.array([np.cos(domain), np.sin(domain)]).T
        for ff, product in zip([net.assoc_mem_x.output,
                                net.assoc_mem_y.output],
                               [net.product_x, net.product_y]):
            for ii in range(fs.n_basis):
                # find the value of a basis function at a value of (x, y)
                target_function = nengo.utils.connection.target_function(
                    domain_cossin, fs.basis[:, ii]*fs.scale/max_basis)
                nengo.Connection(osc, product.B[ii], **target_function)
                # multiply the value of each basis function at x by its weight
            nengo.Connection(ff, product.A)

        nengo.Connection(net.product_x.output, net.x.input[1],
                         transform=np.ones((1, fs.n_basis)) * max_basis,
                         synapse=None)
        nengo.Connection(net.product_y.output, net.y.input[1],
                         transform=np.ones((1, fs.n_basis)) * max_basis,
                         synapse=None)

        # -------------------- Output ------------------------------

        net.output = nengo.Node(size_in=2, label='output')
        nengo.Connection(net.x.output, net.output[0])
        nengo.Connection(net.y.output, net.output[1])

        # create a node to give a plot of the represented function
        ff_plot = fs.make_plot_node(domain=domain, lines=2,
                                    ylim=[-1000000, 1000000])
        nengo.Connection(net.assoc_mem_x.output,
                         ff_plot[:fs.n_basis], synapse=0.1)
        nengo.Connection(net.assoc_mem_y.output,
                         ff_plot[fs.n_basis:], synapse=0.1)

    return net
Beispiel #16
0
def test_add(rng):
    v = Vocabulary(3, rng=rng)
    v.add('A', [1, 2, 3])
    v.add('B', [4, 5, 6])
    v.add('C', [7, 8, 9])
    assert np.allclose(v.vectors, [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
Beispiel #17
0
    def initialize_probes(self):
        # ===================== MAKE DISPLAY VOCABS ===========================
        sub_vocab1 = self.v.enum.create_subset(
            ['POS1*ONE', 'POS2*TWO', 'POS3*THR', 'POS4*FOR', 'POS5*FIV'])

        sub_vocab2 = self.v.main.create_subset(['ADD'])
        sub_vocab2.readonly = False
        sub_vocab2.add('N_ADD', self.v.main.parse('~ADD'))
        sub_vocab2.add('ADD*ADD', self.v.main.parse('ADD*ADD'))
        sub_vocab2.add('ADD*ADD*ADD', self.v.main.parse('ADD*ADD*ADD'))
        # sub_vocab2.add('ADD*ADD*ADD*ADD',
        #                self.v.main.parse('ADD*ADD*ADD*ADD'))
        # sub_vocab2.add('ADD*ADD*ADD*ADD*ADD',
        #                self.v.main.parse('ADD*ADD*ADD*ADD*ADD'))

        sub_vocab3 = self.v.main.create_subset([])
        sub_vocab3.readonly = False
        # sub_vocab3.add('N_POS1*ONE', self.v.main.parse('~(POS1*ONE)'))
        # sub_vocab3.add('N_POS1*TWO', self.v.main.parse('~(POS1*TWO)'))
        # sub_vocab3.add('N_POS1*THR', self.v.main.parse('~(POS1*THR)'))
        # sub_vocab3.add('N_POS1*FOR', self.v.main.parse('~(POS1*FOR)'))
        # sub_vocab3.add('N_POS1*FIV', self.v.main.parse('~(POS1*FIV)'))
        sub_vocab3.add('ADD', self.v.main.parse('ADD'))
        sub_vocab3.add('INC', self.v.main.parse('INC'))

        vocab_seq_list = self.v.main.create_subset([])
        vocab_seq_list.readonly = False
        for sp_str in [
                'POS1*ONE', 'POS2*TWO', 'POS3*THR', 'POS4*FOR', 'POS5*FIV',
                'POS6*SIX', 'POS7*SEV', 'POS8*EIG'
        ]:
            vocab_seq_list.add(sp_str, self.v.main.parse(sp_str))

        vocab_rpm = self.v.main.create_subset([])
        vocab_rpm.readonly = False
        for i in [1, 3, 8]:
            sp_str = self.v.num_sp_strs[i]
            vocab_rpm.add(
                'A_(P1+P2+P3)*%s' % sp_str,
                self.v.main.parse('POS1*%s+POS2*%s+POS3*%s' %
                                  (sp_str, sp_str, sp_str)))
            vocab_rpm.add(
                'N_(P1+P2+P3)*%s' % sp_str,
                self.v.main.parse('~(POS1*%s+POS2*%s+POS3*%s)' %
                                  (sp_str, sp_str, sp_str)))

        vocab_pos1 = self.v.main.create_subset([])
        vocab_pos1.readonly = False
        for sp_str in self.v.num_sp_strs:
            p1_str = 'POS1*%s' % sp_str
            vocab_pos1.add(p1_str, self.v.main.parse(p1_str))

        # ----------- Default vocabs ------------------
        mem_vocab = vocab_seq_list
        # mem_vocab = vocab_pos1
        # vocab_seq_list = vocab_rpm

        # ========================= MAKE PROBES ===============================
        if hasattr(self.m, 'stim'):
            p0 = self.probe_image(self.m.stim.output,
                                  synapse=None,
                                  shape=(28, 28))
        else:
            with self.m:
                self.m.null_node = nengo.Node(0)
            p0 = self.probe_value(self.m.null_node)

        if hasattr(self.m, 'vis') and True:
            net = self.m.vis
            pvs1 = self.probe_value(net.output, vocab=self.v.vis_main)
            pvs2 = self.probe_value(net.neg_attention)
            pvs3 = self.probe_value(net.am_utilities)
            pvs4 = self.probe_value(net.mb_output)
            pvs5 = self.probe_value(net.vis_out)

            self.add_graph('vis', [p0, pvs1, pvs2, pvs3])
            self.add_graph('vis net', [pvs4, pvs5])

        # ############ FOR DEBUGGING VIS DETECT SYSTEM ########################
        # if hasattr(self.m, 'vis') and True:
        #     net = self.m.vis
        #     pvsd1 = self.probe_value(net.detect_change_net.input_diff)
        #     pvsd2 = self.probe_value(net.detect_change_net.item_detect)
        #     pvsd3 = self.probe_value(net.detect_change_net.blank_detect)
        #
        #     self.add_graph('vis detect', [p0, pvsd1, pvsd2, pvsd3])

        if hasattr(self.m, 'ps') and True:
            net = self.m.ps
            pps1 = self.probe_value(net.task, vocab=self.v.ps_task)
            pps2 = self.probe_value(net.state, vocab=self.v.ps_state)
            pps3 = self.probe_value(net.dec, vocab=self.v.ps_dec)

            pps4 = self.probe_value(net.task_mb.mem1.output,
                                    vocab=self.v.ps_task)
            pps5 = self.probe_value(net.task_mb.mem2.output,
                                    vocab=self.v.ps_task)
            pps6 = self.probe_value(net.task_mb.mem1.input,
                                    synapse=None,
                                    vocab=self.v.ps_task)
            pps6b = self.probe_value(net.task_init.output)

            pps7 = self.probe_value(net.state_mb.mem1.output,
                                    vocab=self.v.ps_state)
            pps8 = self.probe_value(net.state_mb.mem2.output,
                                    vocab=self.v.ps_state)
            pps9 = self.probe_value(net.state_mb.mem1.input,
                                    synapse=None,
                                    vocab=self.v.ps_state)

            pps10 = self.probe_value(net.dec_mb.mem1.output,
                                     vocab=self.v.ps_dec)
            pps11 = self.probe_value(net.dec_mb.mem2.output,
                                     vocab=self.v.ps_dec)
            pps12 = self.probe_value(net.dec_mb.mem1.input,
                                     synapse=None,
                                     vocab=self.v.ps_dec)

            pps13 = self.probe_value(net.task_mb.gate)
            pps14 = self.probe_value(net.state_mb.gate)
            pps15 = self.probe_value(net.dec_mb.gate)
            pps13b = self.probe_value(net.task_mb.mem1.gate)
            pps14b = self.probe_value(net.state_mb.mem1.gate)
            pps15b = self.probe_value(net.dec_mb.mem1.gate)

            pps13r = self.probe_value(net.task_mb.reset)
            pps14r = self.probe_value(net.state_mb.reset)
            pps15r = self.probe_value(net.dec_mb.reset)

            pps16 = self.probe_value(net.action, vocab=self.v.ps_action_learn)
            pps17 = self.probe_value(net.action_in,
                                     vocab=self.v.ps_action_learn)

            self.add_graph('ps', [p0, pps1, pps2, pps3], [pps1, pps2, pps3])
            self.add_graph(
                'ps_task',
                [p0, pps6, pps4, pps5, pps6b, pps13, pps13b, pps13r],
                [pps4, pps5, pps6])
            self.add_graph('ps_state',
                           [p0, pps9, pps7, pps8, pps14, pps14b, pps14r])
            self.add_graph('ps_dec',
                           [p0, pps12, pps10, pps11, pps15, pps15b, pps15r])
            self.add_graph('ps_action', [p0, pps17, pps16], [pps16])

        if hasattr(self.m, 'enc') and True:
            net = self.m.enc
            pen1 = self.probe_value(net.pos_inc.gate)
            pen2 = self.probe_value(net.pos_inc.pos_mb.gateX)
            pen4 = self.probe_value(net.pos_inc.output, vocab=self.v.pos)
            pen5 = self.probe_value(net.pos_inc.pos_mb.mem1.output,
                                    vocab=self.v.pos)
            pen5b = self.probe_value(net.pos_inc.pos_mb.mem2.output,
                                     vocab=self.v.pos)
            pen6 = self.probe_value(net.pos_inc.reset)
            # pen7 = self.probe_value(net.pos_mb_acc.output, vocab=self.v.pos)
            # pen7a = self.probe_value(net.pos_mb_acc.input, vocab=self.v.pos)
            # pen8 = self.probe_value(net.pos_output, vocab=self.v.pos)

            self.add_graph('enc', [p0, pen1, pen2, pen4, pen5, pen5b, pen6],
                           [pen4])

        if hasattr(self.m, 'mem') and True:
            net = self.m.mem
            pmm1 = self.probe_value(net.mb1, vocab=mem_vocab)
            pmm1a = self.probe_value(net.mb1_net.mb_reh, vocab=mem_vocab)
            pmm1b = self.probe_value(net.mb1_net.mb_dcy, vocab=mem_vocab)
            pmm2 = self.probe_value(net.mb1_net.gate)
            pmm3 = self.probe_value(net.mb1_net.reset)
            pmm4 = self.probe_value(net.mb2, vocab=vocab_seq_list)
            pmm5 = self.probe_value(net.mb2_net.gate)
            pmm6 = self.probe_value(net.mb2_net.reset)
            pmm7 = self.probe_value(net.mb3, vocab=vocab_seq_list)
            pmm8 = self.probe_value(net.mb3_net.gate)
            pmm9 = self.probe_value(net.mb3_net.reset)

            self.add_graph('mb1', [p0, pmm1, pmm1a, pmm1b, pmm2, pmm3])
            self.add_graph('mb2', [p0, pmm4, pmm5, pmm6])
            self.add_graph('mb3', [p0, pmm7, pmm8, pmm9])

        if hasattr(self.m, 'mem') and True:
            net = self.m.mem
            pmm1i = self.probe_value(net.input, vocab=mem_vocab)
            pmm1ai = self.probe_value(net.mb1_net.mba.mem1.input,
                                      vocab=mem_vocab)
            pmm1bi = self.probe_value(net.mb1_net.mba.mem2.input,
                                      vocab=mem_vocab)
            pmm1g = self.probe_value(net.mb1_net.gate)
            pmm1gx = self.probe_value(net.mb1_net.mba.gateX)
            pmm1gn = self.probe_value(net.mb1_net.mba.gateN)
            pmm1ag = self.probe_value(net.mb1_net.mba.mem1.gate)
            pmm1bg = self.probe_value(net.mb1_net.mba.mem2.gate)

            self.add_graph('mb1 details', [
                pmm1i, pmm1ai, pmm1bi, pmm1a, pmm1g, pmm1gx, pmm1gn, pmm1ag,
                pmm1bg
            ])

        if hasattr(self.m, 'mem') and True:
            net = self.m.mem
            pmm10 = self.probe_value(net.mbave_net.input, vocab=sub_vocab2)
            pmm11 = self.probe_value(net.mbave_net.gate)
            pmm12 = self.probe_value(net.mbave_net.reset)
            pmm13 = self.probe_value(net.mbave, vocab=sub_vocab2)
            pmm13a = self.probe_value(net.mbave)

            self.add_graph('mbave', [p0, pmm10, pmm11, pmm12, pmm13, pmm13a],
                           [pmm10])

        if hasattr(self.m, 'trfm') and \
           not isinstance(self.m.trfm, TransformationSystemDummy):
            net = self.m.trfm
            ptf1 = self.probe_value(net.select_in_a.output, vocab=mem_vocab)
            ptf2 = self.probe_value(net.select_in_b.output, vocab=mem_vocab)
            ptf3 = self.probe_value(net.cconv1.output, vocab=vocab_rpm)
            ptf3b = self.probe_value(ptf3)
            # ptf3c = self.probe_value(ptf3)
            # ptf3d = self.probe_value(ptf3)
            ptf4 = self.probe_value(net.output, vocab=mem_vocab)
            # ptf4b = self.probe_value(net.output)
            ptf5 = self.probe_value(net.compare.output, vocab=self.v.ps_cmp)
            ptf6 = self.probe_value(net.norm_a.output, vocab=sub_vocab1)
            ptf7 = self.probe_value(net.norm_b.output, vocab=sub_vocab1)
            ptf8 = self.probe_value(net.norm_a.input, vocab=sub_vocab1)
            ptf9 = self.probe_value(net.norm_b.input, vocab=sub_vocab1)
            ptf10 = self.probe_value(net.compare.dot_prod)
            ptf11a = self.probe_value(net.cconv1.A, vocab=vocab_rpm)
            ptf11b = self.probe_value(net.cconv1.B, vocab=vocab_rpm)

            self.add_graph('trfm io', [p0, ptf1, ptf2, ptf4], [ptf1, ptf4])
            self.add_graph('trfm cc', [p0, pmm11, ptf3, ptf3b, ptf11a, ptf11b])
            self.add_graph('trfm cmp', [ptf5, ptf8, ptf6, ptf9, ptf7, ptf10],
                           [ptf6, ptf7])

        if hasattr(self.m, 'trfm') and \
           not isinstance(self.m.trfm, TransformationSystemDummy):
            nt = self.m.trfm
            ptf5 = self.probe_value(nt.am_trfms.pos1_to_pos, vocab=self.v.pos)
            ptf6 = self.probe_value(nt.am_trfms.pos1_to_num, vocab=self.v.item)
            ptf7 = self.probe_value(nt.am_trfms.num_to_pos1, vocab=self.v.pos1)
            ptf8 = self.probe_value(nt.am_trfms.pos_to_pos1, vocab=self.v.pos1)

            self.add_graph('trfm ams', [p0, ptf5, ptf6, ptf7, ptf8],
                           [ptf5, ptf6, ptf7, ptf8])

        if hasattr(self.m, 'bg') and True:
            pbg1 = self.probe_value(self.m.bg.input)
            pbg2 = self.probe_value(self.m.bg.output)

            self.add_graph('bg', [p0, pbg1, pbg2])

        if hasattr(self.m, 'dec') and True:
            net = self.m.dec
            pde1 = self.probe_value(net.item_dcconv, vocab=self.v.item)
            # pde2 = self.probe_value(net.select_am)
            # pde3 = self.probe_value(net.select_vis)
            pde4 = self.probe_value(net.am_out, synapse=0.01, vocab=self.v.mtr)
            # pde5 = self.probe_value(net.vt_out)
            pde6 = self.probe_value(net.pos_mb_gate_sig.output)
            # pde7 = self.probe_value(net.util_diff_neg)
            pde8 = self.probe_value(net.am_utils)
            pde9 = self.probe_value(net.am2_utils)
            pde10 = self.probe_value(net.util_diff)
            pde11 = self.probe_value(net.pos_recall_mb, vocab=self.v.pos)
            # pde11a = self.probe_value(net.pos_recall_mb_in, vocab=self.v.pos)
            pde11b = self.probe_value(net.fr_recall_mb.gate)
            # pde11c = self.probe_value(net.fr_recall_mb.reset)
            # pde11d = self.probe_value(net.fr_recall_mb.mem1.input,
            #                           vocab=self.v.pos)
            # pde11e = self.probe_value(net.fr_recall_mb.mem2.input,
            #                           vocab=self.v.pos)
            pde12 = self.probe_value(net.fr_dcconv.A, vocab=mem_vocab)
            pde13 = self.probe_value(net.fr_dcconv.B, vocab=self.v.pos)
            pde14 = self.probe_value(net.fr_dcconv.output, vocab=self.v.item)

            # pde12 = self.probe_value(net.recall_mb.gateX)
            # pde13 = self.probe_value(net.recall_mb.gateN)
            # pde14a = self.probe_value(net.recall_mb.mem1.input)
            # pde14b = self.probe_value(net.recall_mb.mem1.output)
            # pde14c = self.probe_value(net.recall_mb.mem2.input)
            # pde14d = self.probe_value(net.recall_mb.mem2.output)
            # pde14e = self.probe_value(net.recall_mb.mem1.diff.output)
            # pde14f = self.probe_value(net.recall_mb.reset)
            # pde14g = self.probe_value(net.recall_mb.mem1.gate)
            # pde12 = self.probe_value(net.dec_am_fr.output)
            # pde13 = self.probe_value(net.dec_am.item_output)
            # pde14 = self.probe_value(net.recall_mb.mem1.output)
            pde15 = self.probe_value(net.output_know)
            pde16 = self.probe_value(net.output_unk)
            pde18 = self.probe_value(net.output_stop)
            # pde19 = self.probe_value(net.am_th_utils)
            # pde20 = self.probe_value(net.fr_th_utils)
            pde21 = self.probe_value(net.output, vocab=self.v.mtr)
            # pde22 = self.probe_value(net.dec_am_fr.input)
            # pde23 = self.probe_value(net.am_def_th_utils)
            # pde24 = self.probe_value(net.fr_def_th_utils)
            pde25 = self.probe_value(net.fr_utils)
            pde26 = self.probe_value(net.pos_mb_gate_bias.output)
            pde27 = self.probe_value(net.pos_acc_input, vocab=self.v.pos)
            pde28 = self.probe_value(net.item_dcconv_a, vocab=mem_vocab)
            pde29 = self.probe_value(net.item_dcconv_b, vocab=self.v.pos)

            sel_out_vocab = Vocabulary(5)
            for n in range(5):
                vec = np.zeros(5)
                vec[n] = 1
                sel_out_vocab.add('SEL%d' % n, vec)

            pde30 = self.probe_value(net.sel_signals, vocab=sel_out_vocab)
            pde31 = self.probe_value(net.select_out.input0,
                                     vocab=self.v.mtr_disp)
            pde32 = self.probe_value(net.select_out.input1,
                                     vocab=self.v.mtr_disp)
            pde33 = self.probe_value(net.select_out.input3,
                                     vocab=self.v.mtr_disp)

            pde34 = self.probe_value(net.out_class_sr_y)
            pde35 = self.probe_value(net.out_class_sr_diff)
            pde36 = self.probe_value(net.out_class_sr_n)
            pde39 = self.probe_value(net.output_classify.fr_utils_n)

            pde37 = self.probe_value(net.serial_decode.inhibit)
            pde38 = self.probe_value(net.free_recall_decode.inhibit)

            pde39 = self.probe_value(net.pos_change.output, label='POS Change')

            self.add_graph('dec decconv', [pde28, pde29, pde1, pde4, pde21],
                           [pde21])
            self.add_graph('dec kn unk st', [pde15, pde16, pde18])
            self.add_graph('dec am utils', [pde8, pde9, pde10, pde25])
            self.add_graph('dec sigs', [pde6, pde26, pde11, pde27, pde39])
            self.add_graph('dec sr', [p0, pde37, pde38])
            self.add_graph('dec fr', [pde11b, pde11, pde12, pde13, pde14])
            self.add_graph('dec sel', [pde30, pde31, pde32, pde33], [pde30])
            self.add_graph('dec out class', [pde34, pde35, pde36, pde39])

        if hasattr(self.m, 'mtr'):
            net = self.m.mtr
            pmt1 = self.probe_value(net.ramp)
            pmt2 = self.probe_value(net.ramp_reset_hold)
            pmt2b = self.probe_value(net.ramp_sig.init_hold)
            pmt3 = self.probe_value(net.motor_stop_input.output)
            pmt4 = self.probe_value(net.motor_init.output)
            pmt5 = self.probe_value(net.motor_go)
            pmt6 = self.probe_value(net.ramp_sig.stop)
            pmt7a = self.probe_value(net.arm_px_node)
            pmt7b = self.probe_value(net.arm_py_node)
            pmt8 = self.probe_value(net.pen_down, synapse=0.05)
            pmt11 = self.probe_value(net.motor_bypass.output)

            if hasattr(net, 'zero_centered_arm_ee_loc'):
                pmt12 = self.probe_path(net.zero_centered_arm_ee_loc,
                                        pmt8,
                                        synapse=0.05)
            else:
                pmt12 = self.probe_null()

            self.add_graph(
                'mtr', [p0, pmt1, pmt2, pmt2b, pmt6, pmt3, pmt4, pmt5, pmt11])
            self.add_graph('arm', [pmt7a, pmt7b, pmt8, pmt12])
Beispiel #18
0
def test_add():
    v = Vocabulary(3)
    v.add('A', [1, 2, 3])
    v.add('B', [4, 5, 6])
    v.add('C', [7, 8, 9])
    assert np.allclose(v.vectors, [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
Beispiel #19
0
    def initialize_probes(self):
        # ===================== MAKE DISPLAY VOCABS ===========================
        sub_vocab1 = self.v.enum.create_subset(['POS1*ONE', 'POS2*TWO',
                                                'POS3*THR', 'POS4*FOR',
                                                'POS5*FIV'])

        sub_vocab2 = self.v.main.create_subset(['ADD'])
        sub_vocab2.readonly = False
        sub_vocab2.add('N_ADD', self.v.main.parse('~ADD'))
        sub_vocab2.add('ADD*ADD', self.v.main.parse('ADD*ADD'))
        sub_vocab2.add('ADD*ADD*ADD', self.v.main.parse('ADD*ADD*ADD'))
        # sub_vocab2.add('ADD*ADD*ADD*ADD',
        #                self.v.main.parse('ADD*ADD*ADD*ADD'))
        # sub_vocab2.add('ADD*ADD*ADD*ADD*ADD',
        #                self.v.main.parse('ADD*ADD*ADD*ADD*ADD'))

        sub_vocab3 = self.v.main.create_subset([])
        sub_vocab3.readonly = False
        # sub_vocab3.add('N_POS1*ONE', self.v.main.parse('~(POS1*ONE)'))
        # sub_vocab3.add('N_POS1*TWO', self.v.main.parse('~(POS1*TWO)'))
        # sub_vocab3.add('N_POS1*THR', self.v.main.parse('~(POS1*THR)'))
        # sub_vocab3.add('N_POS1*FOR', self.v.main.parse('~(POS1*FOR)'))
        # sub_vocab3.add('N_POS1*FIV', self.v.main.parse('~(POS1*FIV)'))
        sub_vocab3.add('ADD', self.v.main.parse('ADD'))
        sub_vocab3.add('INC', self.v.main.parse('INC'))

        vocab_seq_list = self.v.main.create_subset([])
        vocab_seq_list.readonly = False
        for sp_str in ['POS1*ONE', 'POS2*TWO', 'POS3*THR', 'POS4*FOR',
                       'POS5*FIV', 'POS6*SIX', 'POS7*SEV', 'POS8*EIG']:
            vocab_seq_list.add(sp_str, self.v.main.parse(sp_str))

        vocab_rpm = self.v.main.create_subset([])
        vocab_rpm.readonly = False
        for i in [1, 3, 8]:
            sp_str = self.v.num_sp_strs[i]
            vocab_rpm.add('A_(P1+P2+P3)*%s' % sp_str,
                          self.v.main.parse('POS1*%s+POS2*%s+POS3*%s' %
                                            (sp_str, sp_str, sp_str)))
            vocab_rpm.add('N_(P1+P2+P3)*%s' % sp_str,
                          self.v.main.parse('~(POS1*%s+POS2*%s+POS3*%s)' %
                                            (sp_str, sp_str, sp_str)))

        vocab_pos1 = self.v.main.create_subset([])
        vocab_pos1.readonly = False
        for sp_str in self.v.num_sp_strs:
            p1_str = 'POS1*%s' % sp_str
            vocab_pos1.add(p1_str, self.v.main.parse(p1_str))

        # ----------- Default vocabs ------------------
        mem_vocab = vocab_seq_list
        # mem_vocab = vocab_pos1
        # vocab_seq_list = vocab_rpm

        # ========================= MAKE PROBES ===============================
        if hasattr(self.m, 'stim'):
            p0 = self.probe_image(self.m.stim.output, synapse=None,
                                  shape=(28, 28))
        else:
            with self.m:
                self.m.null_node = nengo.Node(0)
            p0 = self.probe_value(self.m.null_node)

        if hasattr(self.m, 'vis') and True:
            net = self.m.vis
            pvs1 = self.probe_value(net.output, vocab=self.v.vis_main)
            pvs2 = self.probe_value(net.neg_attention)
            pvs3 = self.probe_value(net.am_utilities)
            pvs4 = self.probe_value(net.mb_output)
            pvs5 = self.probe_value(net.vis_out)

            self.add_graph('vis', [p0, pvs1, pvs2, pvs3])
            self.add_graph('vis net', [pvs4, pvs5])

        # ############ FOR DEBUGGING VIS DETECT SYSTEM ########################
        # if hasattr(self.m, 'vis') and True:
        #     net = self.m.vis
        #     pvsd1 = self.probe_value(net.detect_change_net.input_diff)
        #     pvsd2 = self.probe_value(net.detect_change_net.item_detect)
        #     pvsd3 = self.probe_value(net.detect_change_net.blank_detect)
        #
        #     self.add_graph('vis detect', [p0, pvsd1, pvsd2, pvsd3])

        if hasattr(self.m, 'ps') and True:
            net = self.m.ps
            pps1 = self.probe_value(net.task, vocab=self.v.ps_task)
            pps2 = self.probe_value(net.state, vocab=self.v.ps_state)
            pps3 = self.probe_value(net.dec, vocab=self.v.ps_dec)

            pps4 = self.probe_value(net.task_mb.mem1.output,
                                    vocab=self.v.ps_task)
            pps5 = self.probe_value(net.task_mb.mem2.output,
                                    vocab=self.v.ps_task)
            pps6 = self.probe_value(net.task_mb.mem1.input, synapse=None,
                                    vocab=self.v.ps_task)
            pps6b = self.probe_value(net.task_init.output)

            pps7 = self.probe_value(net.state_mb.mem1.output,
                                    vocab=self.v.ps_state)
            pps8 = self.probe_value(net.state_mb.mem2.output,
                                    vocab=self.v.ps_state)
            pps9 = self.probe_value(net.state_mb.mem1.input, synapse=None,
                                    vocab=self.v.ps_state)

            pps10 = self.probe_value(net.dec_mb.mem1.output,
                                     vocab=self.v.ps_dec)
            pps11 = self.probe_value(net.dec_mb.mem2.output,
                                     vocab=self.v.ps_dec)
            pps12 = self.probe_value(net.dec_mb.mem1.input, synapse=None,
                                     vocab=self.v.ps_dec)

            pps13 = self.probe_value(net.task_mb.gate)
            pps14 = self.probe_value(net.state_mb.gate)
            pps15 = self.probe_value(net.dec_mb.gate)
            pps13b = self.probe_value(net.task_mb.mem1.gate)
            pps14b = self.probe_value(net.state_mb.mem1.gate)
            pps15b = self.probe_value(net.dec_mb.mem1.gate)

            pps13r = self.probe_value(net.task_mb.reset)
            pps14r = self.probe_value(net.state_mb.reset)
            pps15r = self.probe_value(net.dec_mb.reset)

            pps16 = self.probe_value(net.action, vocab=self.v.ps_action_learn)
            pps17 = self.probe_value(net.action_in,
                                     vocab=self.v.ps_action_learn)

            self.add_graph('ps', [p0, pps1, pps2, pps3], [pps1, pps2, pps3])
            self.add_graph(
                'ps_task',
                [p0, pps6, pps4, pps5, pps6b, pps13, pps13b, pps13r],
                [pps4, pps5, pps6])
            self.add_graph(
                'ps_state', [p0, pps9, pps7, pps8, pps14, pps14b, pps14r])
            self.add_graph(
                'ps_dec', [p0, pps12, pps10, pps11, pps15, pps15b, pps15r])
            self.add_graph('ps_action', [p0, pps17, pps16], [pps16])

        if hasattr(self.m, 'enc') and True:
            net = self.m.enc
            pen1 = self.probe_value(net.pos_inc.gate)
            pen2 = self.probe_value(net.pos_inc.pos_mb.gateX)
            pen4 = self.probe_value(net.pos_inc.output, vocab=self.v.pos)
            pen5 = self.probe_value(net.pos_inc.pos_mb.mem1.output,
                                    vocab=self.v.pos)
            pen5b = self.probe_value(net.pos_inc.pos_mb.mem2.output,
                                     vocab=self.v.pos)
            pen6 = self.probe_value(net.pos_inc.reset)
            # pen7 = self.probe_value(net.pos_mb_acc.output, vocab=self.v.pos)
            # pen7a = self.probe_value(net.pos_mb_acc.input, vocab=self.v.pos)
            # pen8 = self.probe_value(net.pos_output, vocab=self.v.pos)

            self.add_graph('enc', [p0, pen1, pen2, pen4, pen5, pen5b, pen6],
                           [pen4])

        if hasattr(self.m, 'mem') and True:
            net = self.m.mem
            pmm1 = self.probe_value(net.mb1, vocab=mem_vocab)
            pmm1a = self.probe_value(net.mb1_net.mb_reh, vocab=mem_vocab)
            pmm1b = self.probe_value(net.mb1_net.mb_dcy, vocab=mem_vocab)
            pmm2 = self.probe_value(net.mb1_net.gate)
            pmm3 = self.probe_value(net.mb1_net.reset)
            pmm4 = self.probe_value(net.mb2, vocab=vocab_seq_list)
            pmm5 = self.probe_value(net.mb2_net.gate)
            pmm6 = self.probe_value(net.mb2_net.reset)
            pmm7 = self.probe_value(net.mb3, vocab=vocab_seq_list)
            pmm8 = self.probe_value(net.mb3_net.gate)
            pmm9 = self.probe_value(net.mb3_net.reset)

            self.add_graph('mb1', [p0, pmm1, pmm1a, pmm1b, pmm2, pmm3])
            self.add_graph('mb2', [p0, pmm4, pmm5, pmm6])
            self.add_graph('mb3', [p0, pmm7, pmm8, pmm9])

        if hasattr(self.m, 'mem') and True:
            net = self.m.mem
            pmm1i = self.probe_value(net.input, vocab=mem_vocab)
            pmm1ai = self.probe_value(net.mb1_net.mba.mem1.input,
                                      vocab=mem_vocab)
            pmm1bi = self.probe_value(net.mb1_net.mba.mem2.input,
                                      vocab=mem_vocab)
            pmm1g = self.probe_value(net.mb1_net.gate)
            pmm1gx = self.probe_value(net.mb1_net.mba.gateX)
            pmm1gn = self.probe_value(net.mb1_net.mba.gateN)
            pmm1ag = self.probe_value(net.mb1_net.mba.mem1.gate)
            pmm1bg = self.probe_value(net.mb1_net.mba.mem2.gate)

            self.add_graph(
                'mb1 details',
                [pmm1i, pmm1ai, pmm1bi, pmm1a, pmm1g, pmm1gx, pmm1gn, pmm1ag,
                 pmm1bg])

        if hasattr(self.m, 'mem') and True:
            net = self.m.mem
            pmm10 = self.probe_value(net.mbave_net.input, vocab=sub_vocab2)
            pmm11 = self.probe_value(net.mbave_net.gate)
            pmm12 = self.probe_value(net.mbave_net.reset)
            pmm13 = self.probe_value(net.mbave, vocab=sub_vocab2)
            pmm13a = self.probe_value(net.mbave)

            self.add_graph('mbave', [p0, pmm10, pmm11, pmm12, pmm13, pmm13a],
                           [pmm10])

        if hasattr(self.m, 'trfm') and \
           not isinstance(self.m.trfm, TransformationSystemDummy):
            net = self.m.trfm
            ptf1 = self.probe_value(net.select_in_a.output, vocab=mem_vocab)
            ptf2 = self.probe_value(net.select_in_b.output, vocab=mem_vocab)
            ptf3 = self.probe_value(net.cconv1.output, vocab=vocab_rpm)
            ptf3b = self.probe_value(ptf3)
            # ptf3c = self.probe_value(ptf3)
            # ptf3d = self.probe_value(ptf3)
            ptf4 = self.probe_value(net.output, vocab=mem_vocab)
            # ptf4b = self.probe_value(net.output)
            ptf5 = self.probe_value(net.compare.output, vocab=self.v.ps_cmp)
            ptf6 = self.probe_value(net.norm_a.output, vocab=sub_vocab1)
            ptf7 = self.probe_value(net.norm_b.output, vocab=sub_vocab1)
            ptf8 = self.probe_value(net.norm_a.input, vocab=sub_vocab1)
            ptf9 = self.probe_value(net.norm_b.input, vocab=sub_vocab1)
            ptf10 = self.probe_value(net.compare.dot_prod)
            ptf11a = self.probe_value(net.cconv1.A, vocab=vocab_rpm)
            ptf11b = self.probe_value(net.cconv1.B, vocab=vocab_rpm)

            self.add_graph('trfm io', [p0, ptf1, ptf2, ptf4], [ptf1, ptf4])
            self.add_graph('trfm cc', [p0, pmm11, ptf3, ptf3b, ptf11a, ptf11b])
            self.add_graph(
                'trfm cmp', [ptf5, ptf8, ptf6, ptf9, ptf7, ptf10],
                [ptf6, ptf7])

        if hasattr(self.m, 'trfm') and \
           not isinstance(self.m.trfm, TransformationSystemDummy):
            nt = self.m.trfm
            ptf5 = self.probe_value(nt.am_trfms.pos1_to_pos, vocab=self.v.pos)
            ptf6 = self.probe_value(nt.am_trfms.pos1_to_num, vocab=self.v.item)
            ptf7 = self.probe_value(nt.am_trfms.num_to_pos1, vocab=self.v.pos1)
            ptf8 = self.probe_value(nt.am_trfms.pos_to_pos1, vocab=self.v.pos1)

            self.add_graph('trfm ams', [p0, ptf5, ptf6, ptf7, ptf8],
                           [ptf5, ptf6, ptf7, ptf8])

        if hasattr(self.m, 'bg') and True:
            pbg1 = self.probe_value(self.m.bg.input)
            pbg2 = self.probe_value(self.m.bg.output)

            self.add_graph('bg', [p0, pbg1, pbg2])

        if hasattr(self.m, 'dec') and True:
            net = self.m.dec
            pde1 = self.probe_value(net.item_dcconv, vocab=self.v.item)
            # pde2 = self.probe_value(net.select_am)
            # pde3 = self.probe_value(net.select_vis)
            pde4 = self.probe_value(net.am_out, synapse=0.01, vocab=self.v.mtr)
            # pde5 = self.probe_value(net.vt_out)
            pde6 = self.probe_value(net.pos_mb_gate_sig.output)
            # pde7 = self.probe_value(net.util_diff_neg)
            pde8 = self.probe_value(net.am_utils)
            pde9 = self.probe_value(net.am2_utils)
            pde10 = self.probe_value(net.util_diff)
            pde11 = self.probe_value(net.pos_recall_mb, vocab=self.v.pos)
            # pde11a = self.probe_value(net.pos_recall_mb_in, vocab=self.v.pos)
            pde11b = self.probe_value(net.fr_recall_mb.gate)
            # pde11c = self.probe_value(net.fr_recall_mb.reset)
            # pde11d = self.probe_value(net.fr_recall_mb.mem1.input,
            #                           vocab=self.v.pos)
            # pde11e = self.probe_value(net.fr_recall_mb.mem2.input,
            #                           vocab=self.v.pos)
            pde12 = self.probe_value(net.fr_dcconv.A, vocab=mem_vocab)
            pde13 = self.probe_value(net.fr_dcconv.B, vocab=self.v.pos)
            pde14 = self.probe_value(net.fr_dcconv.output, vocab=self.v.item)

            # pde12 = self.probe_value(net.recall_mb.gateX)
            # pde13 = self.probe_value(net.recall_mb.gateN)
            # pde14a = self.probe_value(net.recall_mb.mem1.input)
            # pde14b = self.probe_value(net.recall_mb.mem1.output)
            # pde14c = self.probe_value(net.recall_mb.mem2.input)
            # pde14d = self.probe_value(net.recall_mb.mem2.output)
            # pde14e = self.probe_value(net.recall_mb.mem1.diff.output)
            # pde14f = self.probe_value(net.recall_mb.reset)
            # pde14g = self.probe_value(net.recall_mb.mem1.gate)
            # pde12 = self.probe_value(net.dec_am_fr.output)
            # pde13 = self.probe_value(net.dec_am.item_output)
            # pde14 = self.probe_value(net.recall_mb.mem1.output)
            pde15 = self.probe_value(net.output_know)
            pde16 = self.probe_value(net.output_unk)
            pde18 = self.probe_value(net.output_stop)
            # pde19 = self.probe_value(net.am_th_utils)
            # pde20 = self.probe_value(net.fr_th_utils)
            pde21 = self.probe_value(net.output, vocab=self.v.mtr)
            # pde22 = self.probe_value(net.dec_am_fr.input)
            # pde23 = self.probe_value(net.am_def_th_utils)
            # pde24 = self.probe_value(net.fr_def_th_utils)
            pde25 = self.probe_value(net.fr_utils)
            pde26 = self.probe_value(net.pos_mb_gate_bias.output)
            pde27 = self.probe_value(net.pos_acc_input, vocab=self.v.pos)
            pde28 = self.probe_value(net.item_dcconv_a, vocab=mem_vocab)
            pde29 = self.probe_value(net.item_dcconv_b, vocab=self.v.pos)

            sel_out_vocab = Vocabulary(5)
            for n in range(5):
                vec = np.zeros(5)
                vec[n] = 1
                sel_out_vocab.add('SEL%d' % n, vec)

            pde30 = self.probe_value(net.sel_signals, vocab=sel_out_vocab)
            pde31 = self.probe_value(net.select_out.input0,
                                     vocab=self.v.mtr_disp)
            pde32 = self.probe_value(net.select_out.input1,
                                     vocab=self.v.mtr_disp)
            pde33 = self.probe_value(net.select_out.input3,
                                     vocab=self.v.mtr_disp)

            pde34 = self.probe_value(net.out_class_sr_y)
            pde35 = self.probe_value(net.out_class_sr_diff)
            pde36 = self.probe_value(net.out_class_sr_n)
            pde39 = self.probe_value(net.output_classify.fr_utils_n)

            pde37 = self.probe_value(net.serial_decode.inhibit)
            pde38 = self.probe_value(net.free_recall_decode.inhibit)

            pde39 = self.probe_value(net.pos_change.output, label='POS Change')

            self.add_graph(
                'dec decconv', [pde28, pde29, pde1, pde4, pde21], [pde21])
            self.add_graph('dec kn unk st', [pde15, pde16, pde18])
            self.add_graph('dec am utils', [pde8, pde9, pde10, pde25])
            self.add_graph('dec sigs', [pde6, pde26, pde11, pde27, pde39])
            self.add_graph('dec sr', [p0, pde37, pde38])
            self.add_graph('dec fr', [pde11b, pde11, pde12, pde13, pde14])
            self.add_graph('dec sel', [pde30, pde31, pde32, pde33], [pde30])
            self.add_graph('dec out class', [pde34, pde35, pde36, pde39])

        if hasattr(self.m, 'mtr'):
            net = self.m.mtr
            pmt1 = self.probe_value(net.ramp)
            pmt2 = self.probe_value(net.ramp_reset_hold)
            pmt2b = self.probe_value(net.ramp_sig.init_hold)
            pmt3 = self.probe_value(net.motor_stop_input.output)
            pmt4 = self.probe_value(net.motor_init.output)
            pmt5 = self.probe_value(net.motor_go)
            pmt6 = self.probe_value(net.ramp_sig.stop)
            pmt7a = self.probe_value(net.arm_px_node)
            pmt7b = self.probe_value(net.arm_py_node)
            pmt8 = self.probe_value(net.pen_down, synapse=0.05)
            pmt11 = self.probe_value(net.motor_bypass.output)

            pmt12 = self.probe_path(net.zero_centered_arm_ee_loc,
                                    pmt8, synapse=0.05)

            self.add_graph(
                'mtr', [p0, pmt1, pmt2, pmt2b, pmt6, pmt3, pmt4, pmt5, pmt11])
            self.add_graph('arm', [pmt7a, pmt7b, pmt8, pmt12])
def main():

    model = spa.SPA(label="Vector Storage")
    with model:

        # Dimensionality of each representation
        num_dimensions = 30
        sub_dimensions = 1

        # Create the vocabulary
        vocab = Vocabulary(num_dimensions, randomize=False)

        # Form the inputs manually by directly defining their vectors
        stored_value_1 = np.random.rand(num_dimensions) - [0.5
                                                           ] * num_dimensions
        stored_value_1 = [
            s / np.linalg.norm(stored_value_1) for s in stored_value_1
        ]
        vocab.add("Stored_value_1", stored_value_1)

        stored_value_2 = np.random.rand(num_dimensions) - [0.5
                                                           ] * num_dimensions
        stored_value_2 = [
            s / np.linalg.norm(stored_value_2) for s in stored_value_2
        ]
        vocab.add("Stored_value_2", stored_value_2)

        stored_value_3 = np.random.rand(num_dimensions) - [0.5
                                                           ] * num_dimensions
        stored_value_3 = [
            s / np.linalg.norm(stored_value_3) for s in stored_value_3
        ]
        vocab.add("Stored_value_3", stored_value_3)

        # Create a semantic pointer corresponding to the "correct" answer for the operation
        sum_vector = np.subtract(np.add(stored_value_1, stored_value_2),
                                 stored_value_3)
        sum_vector = sum_vector / np.linalg.norm(sum_vector)
        vocab.add("Sum", sum_vector)

        # Define the control signal inputs as random vectors
        r1 = [1] * num_dimensions
        r1 = r1 / np.linalg.norm(r1)
        r2 = [(-1)**k for k in range(num_dimensions)]
        r2 = r2 / np.linalg.norm(r2)
        vocab.add("Hold_signal", r1)
        vocab.add("Start_signal", r2)

        # Control when the vector operation takes place
        def control_input(t):
            if t < 1:
                return "Hold_signal"
            else:
                return "Start_signal"

        # inputs to the input word buffers
        def first_input(t):
            return "Stored_value_1"

        def second_input(t):
            return "Stored_value_2"

        def third_input(t):
            return "Stored_value_3"

        # Control buffer
        model.control = spa.Buffer(dimensions=num_dimensions,
                                   subdimensions=sub_dimensions,
                                   neurons_per_dimension=200,
                                   direct=True,
                                   vocab=vocab)
        control_probe = nengo.Probe(model.control.state.output)

        # Buffers to store the inputs: e.g., King, Man, Woman
        model.word_buffer1 = spa.Buffer(dimensions=num_dimensions,
                                        subdimensions=sub_dimensions,
                                        neurons_per_dimension=200,
                                        direct=True,
                                        vocab=vocab)
        model.word_buffer2 = spa.Buffer(dimensions=num_dimensions,
                                        subdimensions=sub_dimensions,
                                        neurons_per_dimension=200,
                                        direct=True,
                                        vocab=vocab)
        model.word_buffer3 = spa.Buffer(dimensions=num_dimensions,
                                        subdimensions=sub_dimensions,
                                        neurons_per_dimension=200,
                                        direct=True,
                                        vocab=vocab)

        # Probe to visualize the values stored in the buffers
        buffer_1_probe = nengo.Probe(model.word_buffer1.state.output)
        buffer_2_probe = nengo.Probe(model.word_buffer2.state.output)
        buffer_3_probe = nengo.Probe(model.word_buffer3.state.output)

        # Buffer to hold the result: e.g. Queen
        model.result = spa.Buffer(dimensions=num_dimensions,
                                  subdimensions=sub_dimensions,
                                  neurons_per_dimension=200,
                                  direct=True,
                                  vocab=vocab)
        result_probe = nengo.Probe(model.result.state.output)

        # Control system
        actions = spa.Actions(
            'dot(control, Start_signal) --> result = word_buffer1 + word_buffer2 - word_buffer3'
        )
        model.bg = spa.BasalGanglia(actions)
        model.thalamus = spa.Thalamus(model.bg, subdim_channel=sub_dimensions)

        # Connect up the inputs
        model.input = spa.Input(control=control_input,
                                word_buffer1=first_input,
                                word_buffer2=second_input,
                                word_buffer3=third_input)

    # Start the simulator
    sim = nengo.Simulator(model)

    # Dynamic plotting
    plt.ion()  # Dynamic updating of plots
    fig = plt.figure(figsize=(15, 8))
    plt.show()
    ax = fig.gca()
    ax.set_title("Vector Storage")

    while True:
        sim.run(1)  # Run for an additional 1 second
        plt.clf()  # Clear the figure
        plt.plot(sim.trange(),
                 similarity(sim.data, result_probe, vocab),
                 label="Buffer 3 Value")
        plt.legend(vocab.keys * 3, loc=2)
        plt.draw()  # Re-draw

        print sim.data[buffer_1_probe][-1]
        print sim.data[buffer_2_probe][-1]
        print sim.data[buffer_3_probe][-1]
        print sim.data[result_probe][-1]
        print "\n"

    plt.show()
Beispiel #21
0
# --- Unitary semantic pointers
unitary_sp_strs = [num_sp_strs[0], pos_sp_strs[0]]
unitary_sp_strs.extend(ops_sp_strs)


# ####################### Vocabulary definitions ##############################
# --- Primary vocabulary ---
vocab = Vocabulary(cfg.sp_dim, unitary=unitary_sp_strs, rng=cfg.rng)

# --- Add numerical sp's ---
vocab.parse('%s+%s' % (ops_sp_strs[0], num_sp_strs[0]))
add_sp = vocab[ops_sp_strs[0]]
num_sp = vocab[num_sp_strs[0]].copy()
for i in range(len(num_sp_strs) - 1):
    num_sp = num_sp.copy() * add_sp
    vocab.add(num_sp_strs[i + 1], num_sp)

# --- Add positional sp's ---
vocab.parse('%s+%s' % (ops_sp_strs[1], pos_sp_strs[0]))
inc_sp = vocab[ops_sp_strs[1]]
pos_sp = vocab[pos_sp_strs[0]].copy()
for i in range(len(pos_sp_strs) - 1):
    pos_sp = pos_sp.copy() * inc_sp
    vocab.add(pos_sp_strs[i + 1], pos_sp)

# --- Add other visual sp's ---
vocab.parse('+'.join(misc_vis_sp_strs))
vocab.parse('+'.join(ps_task_vis_sp_strs))

# --- Add production system sp's ---
vocab.parse('+'.join(ps_task_sp_strs))
Beispiel #22
0
def test_add(rng):
    v = Vocabulary(3, rng=rng)
    v.add("A", [1, 2, 3])
    v.add("B", [4, 5, 6])
    v.add("C", [7, 8, 9])
    assert np.allclose(v.vectors, [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
Beispiel #23
0
def main():

    print "Loading Word2Vec model..."
    word2vec_model = gensim.models.Word2Vec.load("word2vec_model_1_cleaned")
    word2vec_model.init_sims(replace=True)
    word2vec_vocab = word2vec_model.index2word

    import readline
    readline.parse_and_bind("tab: complete")

    def complete(text, state):
        results = [x for x in word2vec_vocab if x.startswith(text)] + [None]
        return results[state]

    readline.set_completer(complete)

    print "This program uses an SPA network in Nengo to perform vector operations on a semantically structured word-vector space *learned* from a sentence corpus."
    print "When trained on a large corpus of English sentences, for example, it should produce: Vector[king] - Vector[man] + Vector[woman] = Vector[king]"

    print "For now, it just does subtraction..."
    print "\nPress <tab> twice to see all your autocomplete options."
    print "_______________________________________________________"
    line1 = raw_input('\nFirst word:> ')
    line2 = raw_input('\nSecond word:> ')

    if line1 and line2 in word2vec_vocab:
        val1 = word2vec_model[line1]
        val2 = word2vec_model[line2]
        diff = val1 - val2
        dot_products = [
            np.dot(word2vec_model[word2vec_model.index2word[i]], diff)
            for i in range(len(word2vec_model.index2word))
        ]
        closest_word = word2vec_model.index2word[dot_products.index(
            max(dot_products))]
        print "\nWhat the Nengo model SHOULD return is something like: %s" % closest_word

    print "\nDefining SPA network..."
    model = spa.SPA(label="Vector Storage")
    with model:

        # Dimensionality of each representation
        num_dimensions = 100
        sub_dimensions = 1

        # Create the vocabulary
        vocab = Vocabulary(num_dimensions, randomize=False)

        stored_value_1 = val1
        vocab.add("Stored_value_1", stored_value_1)

        stored_value_2 = val2
        vocab.add("Stored_value_2", stored_value_2)

        # Create a semantic pointer corresponding to the "correct" answer for the operation
        sum_vector = np.subtract(stored_value_1, stored_value_2)
        sum_vector = sum_vector / np.linalg.norm(sum_vector)
        vocab.add("Correct_target", sum_vector)

        # Define the control signal inputs as random vectors
        r1 = [1] * num_dimensions
        r1 = r1 / np.linalg.norm(r1)
        r2 = [(-1)**k for k in range(num_dimensions)]
        r2 = r2 / np.linalg.norm(r2)
        vocab.add("Hold_signal", r1)
        vocab.add("Start_signal", r2)

        # Control when the vector operation takes place
        def control_input(t):
            if t < 1:
                return "Hold_signal"
            else:
                return "Start_signal"

        # Control buffer
        model.control = spa.Buffer(dimensions=num_dimensions,
                                   subdimensions=sub_dimensions,
                                   neurons_per_dimension=200,
                                   direct=True,
                                   vocab=vocab)
        control_probe = nengo.Probe(model.control.state.output)

        # Inputs to the word input buffers
        def first_input(t):
            return "Stored_value_1"

        def second_input(t):
            return "Stored_value_2"

        # Buffers to store the inputs:
        model.word_buffer1 = spa.Buffer(dimensions=num_dimensions,
                                        subdimensions=sub_dimensions,
                                        neurons_per_dimension=200,
                                        direct=True,
                                        vocab=vocab)
        model.word_buffer2 = spa.Buffer(dimensions=num_dimensions,
                                        subdimensions=sub_dimensions,
                                        neurons_per_dimension=200,
                                        direct=True,
                                        vocab=vocab)

        # Probe to visualize the values stored in the buffers
        buffer_1_probe = nengo.Probe(model.word_buffer1.state.output)
        buffer_2_probe = nengo.Probe(model.word_buffer2.state.output)

        # Buffer to hold the result:
        model.result = spa.Buffer(dimensions=num_dimensions,
                                  subdimensions=sub_dimensions,
                                  neurons_per_dimension=200,
                                  direct=True,
                                  vocab=vocab)
        result_probe = nengo.Probe(model.result.state.output)

        # Control system
        actions = spa.Actions(
            'dot(control, Start_signal) --> result = word_buffer1 - word_buffer2',
            'dot(control, Hold_signal) --> result = Hold_signal')
        model.bg = spa.BasalGanglia(actions)
        model.thalamus = spa.Thalamus(model.bg, subdim_channel=sub_dimensions)

        # Connect up the inputs
        model.input = spa.Input(control=control_input,
                                word_buffer1=first_input,
                                word_buffer2=second_input)

    # Start the simulator
    sim = nengo.Simulator(model)

    # Dynamic plotting
    plt.ion()  # Dynamic updating of plots
    fig = plt.figure(figsize=(15, 8))
    plt.show()
    ax = fig.gca()
    ax.set_title("Vector Storage")

    while True:
        sim.run(0.5)  # Run for an additional 1 second
        plt.clf()  # Clear the figure
        plt.plot(sim.trange(),
                 similarity(sim.data, result_probe, vocab),
                 label="Buffer 3 Value")
        legend_symbols = vocab.keys
        plt.legend(legend_symbols, loc=2)
        plt.draw()  # Re-draw

        # Go back to our manually-stored vocabulary and see how well it did
        diff = sim.data[result_probe][-1]
        dot_products = [
            np.dot(word2vec_model[word2vec_model.index2word[i]], diff)
            for i in range(len(word2vec_model.index2word))
        ]
        closest_word = word2vec_model.index2word[dot_products.index(
            max(dot_products))]
        print "Time: %f" % sim.trange()[-1]
        print "\nWhat the Nengo model DID return is something like: %s" % closest_word
        print "\n"

    plt.show()