Пример #1
0
def AuditoryPeriphery(freqs,
                      sound_process,
                      auditory_filter,
                      neurons_per_freq=12,
                      fs=50000.,
                      adaptive_neurons=False,
                      net=None):
    if net is None:
        net = nengo.Network(label="Auditory Periphery")

    net.freqs = freqs
    net.sound_process = sound_process
    net.auditory_filter = auditory_filter
    net.fs = fs

    with net:
        # Inner hair cell activity
        net.fb = AuditoryFilterBank(freqs,
                                    sound_process,
                                    filterbank=auditory_filter,
                                    samplerate=fs)
        net.ihc = nengo.Node(output=net.fb, size_out=freqs.size)

        # Cochlear neurons projecting down auditory nerve
        neuron_type = nengo.AdaptiveLIF() if adaptive_neurons else nengo.LIF()
        net.an = nengo.networks.EnsembleArray(neurons_per_freq,
                                              freqs.size,
                                              intercepts=Uniform(-0.1, 0.5),
                                              encoders=Choice([[1]]),
                                              neuron_type=neuron_type)
        nengo.Connection(net.ihc, net.an.input)
    return net
Пример #2
0
def test_merged_generic(Simulator, seed):
    with nengo.Network(seed=seed) as net:
        nodes = []
        ensembles = []
        probes = []
        for _ in range(2):
            nodes.append(nengo.Node([1]))
            ensembles.append(nengo.Ensemble(10, 1, neuron_type=nengo.AdaptiveLIF()))
            nengo.Connection(nodes[-1], ensembles[-1], synapse=None)
            probes.append(nengo.Probe(ensembles[-1].neurons))

    with nengo.Simulator(net) as canonical:
        canonical.run_steps(100)

    with Simulator(net) as sim:
        ops = [
            ops
            for ops in sim.tensor_graph.plan
            if isinstance(ops[0], nengo.builder.neurons.SimNeurons)
        ]
        assert len(ops) == 1
        assert isinstance(
            sim.tensor_graph.op_builder.op_builds[ops[0]].built_neurons,
            neuron_builders.GenericNeuronBuilder,
        )

        sim.run_steps(100)

        for p in probes:
            assert np.allclose(sim.data[p], canonical.data[p])
Пример #3
0
 def convert_dense(self, model, pre_layer, input_shape, index,
                   onnx_model_graph):
     onnx_model_graph_node = onnx_model_graph.node
     node_info = onnx_model_graph_node[index]
     dense_num = self.get_dense_num(node_info, onnx_model_graph)
     neuron_type = self.get_neuronType(index, onnx_model_graph_node)
     with model:
         x = nengo_dl.tensor_layer(pre_layer,
                                   tf.layers.dense,
                                   units=dense_num)
         if neuron_type != "softmax":
             if neuron_type == "lif":
                 x = nengo_dl.tensor_layer(
                     x, nengo.LIF(amplitude=self.amplitude))
             elif neuron_type == "lifrate":
                 x = nengo_dl.tensor_layer(
                     x, nengo.LIFRate(amplitude=self.amplitude))
             elif neuron_type == "adaptivelif":
                 x = nengo_dl.tensor_layer(
                     x, nengo.AdaptiveLIF(amplitude=self.amplitude))
             elif neuron_type == "adaptivelifrate":
                 x = nengo_dl.tensor_layer(
                     x, nengo.AdaptiveLIFRate(amplitude=self.amplitude))
             elif neuron_type == "izhikevich":
                 x = nengo_dl.tensor_layer(
                     x, nengo.Izhikevich(amplitude=self.amplitude))
             elif neuron_type == "softlifrate":
                 x = nengo_dl.tensor_layer(
                     x,
                     nengo_dl.neurons.SoftLIFRate(amplitude=self.amplitude))
             elif neuron_type == None:  #default neuron_type = LIF
                 x = nengo_dl.tensor_layer(
                     x, nengo.LIF(amplitude=self.amplitude))
     output_shape = [dense_num, 1]
     return model, output_shape, x
Пример #4
0
def test_configure_all_nengo_parameters():

    # make up a non-default value for the parameter
    conv_func = {
        params.BoolParam:
        lambda attr: not attr.default,
        params.NumberParam:
        lambda attr: (1 if attr.default is None else attr.default + 1),
        params.StringParam:
        lambda attr: "abc",
        params.NdarrayParam:
        lambda attr: np.zeros([1] * len(attr.shape)),
        nengo.base.ProcessParam:
        lambda attr: nengo.processes.WhiteNoise(),
        nengo.node.OutputParam:
        lambda attr: lambda x: x + 1,
        nengo.synapses.SynapseParam:
        lambda attr: nengo.synapses.Alpha(0.1),
        nengo.solvers.SolverParam:
        lambda attr: nengo.solvers.LstsqL2nz(weights=isinstance(
            attr, nengo.connection.ConnectionSolverParam)),
        nengo.connection.ConnectionFunctionParam:
        lambda attr: lambda x: x + 1,
        nengo.connection.ConnectionTransformParam:
        lambda attr: 2.0,
        nengo.learning_rules.LearningRuleTypeParam:
        (lambda attr: nengo.learning_rules.PES()),
        nengo.neurons.NeuronTypeParam:
        lambda attr: nengo.AdaptiveLIF(),
    }

    net = nengo.Network()

    for obj in net.objects:
        for name, attr in obj.__dict__.items():
            if (not isinstance(attr, params.Parameter)
                    or attr.default is params.Unconfigurable):
                continue

            for param, func in iteritems(conv_func):
                if isinstance(attr, param):
                    val = func(attr)
                    break
            else:
                raise NotImplementedError

            try:
                # manually set the attribute to its default
                setattr(net.config[obj], name, attr.default)

                # set it to a non-default value
                setattr(net.config[obj], name, val)
                assert getattr(net.config[obj], name) == val

            except Exception:
                print("Error setting %s.%s" % (obj, name))
                raise
Пример #5
0
def test_alif(Simulator):
    """Test ALIF and ALIFRate by comparing them to each other"""

    n = 100
    max_rates = 50 * np.ones(n)
    intercepts = np.linspace(-0.99, 0.99, n)
    encoders = np.ones((n, 1))
    nparams = dict(tau_n=1, inc_n=10e-3)
    eparams = dict(n_neurons=n,
                   max_rates=max_rates,
                   intercepts=intercepts,
                   encoders=encoders)

    model = nengo.Network()
    with model:
        u = nengo.Node(output=0.5)
        a = nengo.Ensemble(neuron_type=nengo.AdaptiveLIFRate(**nparams),
                           dimensions=1,
                           **eparams)
        b = nengo.Ensemble(neuron_type=nengo.AdaptiveLIF(**nparams),
                           dimensions=1,
                           **eparams)
        nengo.Connection(u, a, synapse=0)
        nengo.Connection(u, b, synapse=0)
        ap = nengo.Probe(a, "spikes", synapse=0)
        bp = nengo.Probe(b, "spikes", synapse=0)

    dt = 1e-3
    sim = Simulator(model, dt=dt)
    sim.run(2.)

    t = sim.trange()
    a_rates = sim.data[ap] / dt
    spikes = sim.data[bp]
    b_rates = rates_kernel(t, spikes)

    tmask = (t > 0.1) & (t < 1.7)
    rel_rmse = rms(b_rates[tmask] - a_rates[tmask]) / rms(a_rates[tmask])

    with Plotter(Simulator) as plt:
        ax = plt.subplot(311)
        implot(plt, t, intercepts[::-1], a_rates.T, ax=ax)
        ax.set_ylabel('input')
        ax = plt.subplot(312)
        implot(plt, t, intercepts[::-1], b_rates.T, ax=ax)
        ax.set_ylabel('input')
        ax = plt.subplot(313)
        implot(plt, t, intercepts[::-1], (b_rates - a_rates)[tmask].T, ax=ax)
        ax.set_xlabel('time [s]')
        ax.set_ylabel('input')
        plt.savefig('test_neurons.test_alif.pdf')
        plt.close()

    assert rel_rmse < 0.07
Пример #6
0
 def convert_conv2d(self, model, pre_layer, input_shape, index,
                    onnx_model_graph):
     onnx_model_graph_node = onnx_model_graph.node
     node_info = onnx_model_graph_node[index]
     neuron_type = self.get_neuronType(index, onnx_model_graph_node)
     filters = self.get_filterNum(node_info, onnx_model_graph)
     for index in range(len(node_info.attribute)):
         if node_info.attribute[index].name == "kernel_shape":
             kernel_size = node_info.attribute[index].ints[0]
         elif node_info.attribute[index].name == "strides":
             strides = node_info.attribute[index].ints[0]
         elif node_info.attribute[index].name == "auto_pad":
             padding = node_info.attribute[index].s.decode('ascii').lower()
             if padding != "valid":
                 padding = "same"
     if padding == "same":
         output_shape = [input_shape[0], input_shape[1], filters]
     else:
         output_shape = [
             int((input_shape[0] - kernel_size) / strides + 1),
             int((input_shape[1] - kernel_size) / strides + 1), filters
         ]
     with model:
         x = nengo_dl.tensor_layer(pre_layer,
                                   tf.layers.conv2d,
                                   shape_in=(input_shape[0], input_shape[1],
                                             input_shape[2]),
                                   filters=filters,
                                   kernel_size=kernel_size,
                                   padding=padding)
         if neuron_type == "lif":
             x = nengo_dl.tensor_layer(x,
                                       nengo.LIF(amplitude=self.amplitude))
         elif neuron_type == "lifrate":
             x = nengo_dl.tensor_layer(
                 x, nengo.LIFRate(amplitude=self.amplitude))
         elif neuron_type == "adaptivelif":
             x = nengo_dl.tensor_layer(
                 x, nengo.AdaptiveLIF(amplitude=self.amplitude))
         elif neuron_type == "adaptivelifrate":
             x = nengo_dl.tensor_layer(
                 x, nengo.AdaptiveLIFRate(amplitude=self.amplitude))
         elif neuron_type == "izhikevich":
             x = nengo_dl.tensor_layer(
                 x, nengo.Izhikevich(amplitude=self.amplitude))
         elif neuron_type == "softlifrate":
             x = nengo_dl.tensor_layer(
                 x, nengo_dl.neurons.SoftLIFRate(amplitude=self.amplitude))
         elif neuron_type == None:  #default neuron_type = LIF
             x = nengo_dl.tensor_layer(x,
                                       nengo.LIF(amplitude=self.amplitude))
     return model, output_shape, x
Пример #7
0
    def build_classifier(self, net):
        intercepts = Exponential(0.15, self.classifier.reset_th, 1.0)
        net.classifier = nengo.Ensemble(20,
                                        dimensions=1,
                                        encoders=Choice([[1]]),
                                        neuron_type=nengo.AdaptiveLIF(),
                                        intercepts=intercepts)

        for dmp in net.syllables:
            transform = -self.classifier.inhib_scale * np.ones(
                (dmp.state.n_neurons, net.classifier.n_neurons))
            nengo.Connection(net.classifier.neurons,
                             dmp.state.neurons,
                             transform=transform,
                             synapse=0.01)
Пример #8
0
def test_minibatch(Simulator, seed):
    with nengo.Network(seed=seed) as net:
        inp = [
            nengo.Node(output=[0.5]),
            nengo.Node(output=np.sin),
            nengo.Node(output=nengo.processes.WhiteSignal(5, 0.5, seed=seed))
        ]

        ens = [
            nengo.Ensemble(10, 1, neuron_type=nengo.AdaptiveLIF()),
            nengo.Ensemble(10, 1, neuron_type=nengo.LIFRate()),
            nengo.Ensemble(10, 2, noise=nengo.processes.WhiteNoise(seed=seed))
        ]

        nengo.Connection(inp[0], ens[0])
        nengo.Connection(inp[1], ens[1], synapse=None)
        nengo.Connection(inp[2],
                         ens[2],
                         synapse=nengo.Alpha(0.1),
                         transform=[[1], [1]])
        conn = nengo.Connection(ens[0], ens[1], learning_rule_type=nengo.PES())
        nengo.Connection(inp[0], conn.learning_rule)

        ps = [nengo.Probe(e) for e in ens]

    with Simulator(net, minibatch_size=None) as sim:
        probe_data = [[] for _ in ps]
        for i in range(5):
            sim.run_steps(100)

            for j, p in enumerate(ps):
                probe_data[j] += [sim.data[p]]

            sim.reset()

        probe_data = [np.stack(x, axis=0) for x in probe_data]

    with Simulator(net, minibatch_size=5) as sim:
        sim.run_steps(100)

    assert np.allclose(sim.data[ps[0]], probe_data[0], atol=1e-6)
    assert np.allclose(sim.data[ps[1]], probe_data[1], atol=1e-6)
    assert np.allclose(sim.data[ps[2]], probe_data[2], atol=1e-6)
Пример #9
0
def test_temporal_solver(plt, Simulator, seed):
    plt.subplot(3, 1, 1)
    for weights in (False, True):
        assert 1.2 < _test_temporal_solver(  # 1.5153... at dev time
            plt, Simulator, seed, nengo.LIF(), 0.005, lambda x: x,
            nengo.solvers.LstsqL2(weights=weights))

    # LIFRate has no internal dynamics, and so the two solvers
    # are actually numerically equivalent
    plt.subplot(3, 1, 2)
    assert np.allclose(
        1,
        _test_temporal_solver(plt, Simulator, seed, nengo.LIFRate(), None,
                              lambda x: 1 - 2 * x**2, nengo.solvers.LstsqL2()))

    # We'll need to overfit slightly (small reg) to see the improvement for
    # AdaptiveLIF (see thesis for a more principled way to improve)
    plt.subplot(3, 1, 3)
    assert 2.0 < _test_temporal_solver(  # 2.2838... at dev time
        plt, Simulator, seed, nengo.AdaptiveLIF(), 0.1, np.sin,
        nengo.solvers.LstsqL2(reg=1e-5))
    def convert_dense(self, model, pre_layer, input_shape, index,
                      onnx_model_graph):
        onnx_model_graph_node = onnx_model_graph.node
        node_info = onnx_model_graph_node[index]
        dense_num = self.get_dense_num(node_info, onnx_model_graph)
        neuron_type = self.get_neuronType(
            index,
            onnx_model_graph_node)  # node들 지나다니면서 - neuron이 op_type이 어떤건지 찾음

        with model:
            x = nengo_dl.Layer(
                tf.keras.layers.Dense(units=dense_num))(pre_layer)
            if neuron_type != "softmax":
                if neuron_type == "lif":
                    x = nengo_dl.Layer(nengo.LIF(amplitude=self.amplitude))(x)
                elif neuron_type == "lifrate":
                    x = nengo_dl.Layer(
                        nengo.LIFRate(amplitude=self.amplitude))(x)
                elif neuron_type == "adaptivelif":
                    x = nengo_dl.Layer(
                        nengo.AdaptiveLIF(amplitude=self.amplitude))(x)
                elif neuron_type == "adaptivelifrate":
                    x = nengo_dl.Layer(
                        nengo.AdaptiveLIFRate(amplitude=self.amplitude))(x)
                elif neuron_type == "izhikevich":
                    x = nengo_dl.Layer(
                        nengo.Izhikevich(amplitude=self.amplitude))(x)
                elif neuron_type == "softlifrate":
                    x = nengo_dl.Layer(
                        nengo_dl.neurons.SoftLIFRate(
                            amplitude=self.amplitude))(x)
                elif neuron_type == None:  # default neuron_type = LIF
                    x = nengo_dl.Layer(nengo.LIF(amplitude=self.amplitude))(x)
        output_shape = [dense_num, 1]
        print('convert Dense finish')
        return model, output_shape, x  # x를 return 하면서 모델을 계속 쌓아감
Пример #11
0
def create_model():

    #print trial_info
    print('---- INTIALIZING MODEL ----')
    global model

    model = spa.SPA()
    with model:

        #display current stimulus pair (not part of model)
        if nengo_gui_on and True:
            model.pair_input = nengo.Node(present_pair)
            model.pair_display = nengo.Node(
                display_func,
                size_in=model.pair_input.size_out)  # to show input
            nengo.Connection(model.pair_input,
                             model.pair_display,
                             synapse=None)

        # control
        model.control_net = nengo.Network()
        with model.control_net:
            #assuming the model knows which hand to use (which was blocked)
            model.hand_input = nengo.Node(get_hand)
            model.target_hand = spa.State(Dmid, vocab=vocab_motor, feedback=1)
            nengo.Connection(model.hand_input,
                             model.target_hand.input,
                             synapse=None)

            model.attend = spa.State(D, vocab=vocab_attend,
                                     feedback=.5)  # vocab_attend
            model.goal = spa.State(Dlow, vocab=vocab_goal,
                                   feedback=.7)  # current goal

        ### vision ###

        # set up network parameters
        n_vis = X_train.shape[1]  # nr of pixels, dimensions of network
        n_hid = 1000  # nr of gabor encoders/neurons

        # random state to start
        rng = np.random.RandomState(9)
        encoders = Gabor().generate(
            n_hid, (4, 4), rng=rng)  # gabor encoders, 11x11 apparently, why?
        encoders = Mask(
            (14, 90)).populate(encoders, rng=rng,
                               flatten=True)  # use them on part of the image

        model.visual_net = nengo.Network()
        with model.visual_net:

            #represent currently attended item
            model.attended_item = nengo.Node(present_item2, size_in=D)
            nengo.Connection(model.attend.output, model.attended_item)

            model.vision_gabor = nengo.Ensemble(
                n_hid,
                n_vis,
                eval_points=X_train,
                #    neuron_type=nengo.LIF(),
                neuron_type=nengo.AdaptiveLIF(
                    tau_n=.01, inc_n=.05
                ),  #to get a better fit, use more realistic neurons that adapt to input
                intercepts=nengo.dists.Uniform(-0.1, 0.1),
                #intercepts=nengo.dists.Choice([-0.5]), #should we comment this out? not sure what's happening
                #max_rates=nengo.dists.Choice([100]),
                encoders=encoders)
            #recurrent connection (time constant 500 ms)
            # strength = 1 - (100/500) = .8

            zeros = np.zeros_like(X_train)
            nengo.Connection(
                model.vision_gabor,
                model.vision_gabor,
                synapse=0.005,  #.1
                eval_points=np.vstack(
                    [X_train, zeros,
                     np.random.randn(*X_train.shape)]),
                transform=.5)

            model.visual_representation = nengo.Ensemble(n_hid,
                                                         dimensions=Dmid)

            model.visconn = nengo.Connection(
                model.vision_gabor,
                model.visual_representation,
                synapse=0.005,  #was .005
                eval_points=X_train,
                function=train_targets,
                solver=nengo.solvers.LstsqL2(reg=0.01))
            nengo.Connection(model.attended_item,
                             model.vision_gabor,
                             synapse=.02)  #.03) #synapse?

            # display attended item, only in gui
            if nengo_gui_on:
                # show what's being looked at
                model.display_attended = nengo.Node(
                    display_func,
                    size_in=model.attended_item.size_out)  # to show input
                nengo.Connection(model.attended_item,
                                 model.display_attended,
                                 synapse=None)
                #add node to plot total visual activity
                model.visual_activation = nengo.Node(None, size_in=1)
                nengo.Connection(model.vision_gabor.neurons,
                                 model.visual_activation,
                                 transform=np.ones((1, n_hid)),
                                 synapse=None)

        ### central cognition ###

        ##### Concepts #####
        model.concepts = spa.AssociativeMemory(
            vocab_all_words,  #vocab_concepts,
            wta_output=True,
            wta_inhibit_scale=1,  #was 1
            #default_output_key='NONE', #what to say if input doesn't match
            threshold=0.3
        )  # how strong does input need to be for it to recognize
        nengo.Connection(
            model.visual_representation,
            model.concepts.input,
            transform=.8 * vision_mapping
        )  #not too fast to concepts, might have to be increased to have model react faster to first word.

        #concepts accumulator
        model.concepts_evidence = spa.State(
            1, feedback=1, feedback_synapse=0.005
        )  #the lower the synapse, the faster it accumulates (was .1)
        concepts_evidence_scale = 2.5
        nengo.Connection(model.concepts.am.elem_output,
                         model.concepts_evidence.input,
                         transform=concepts_evidence_scale * np.ones(
                             (1, model.concepts.am.elem_output.size_out)),
                         synapse=0.005)

        #concepts switch
        model.do_concepts = spa.AssociativeMemory(vocab_reset,
                                                  default_output_key='CLEAR',
                                                  threshold=.2)
        nengo.Connection(
            model.do_concepts.am.ensembles[-1],
            model.concepts_evidence.all_ensembles[0].neurons,
            transform=np.ones(
                (model.concepts_evidence.all_ensembles[0].n_neurons, 1)) * -10,
            synapse=0.005)

        ###### Visual Representation ######
        model.vis_pair = spa.State(
            D, vocab=vocab_all_words, feedback=1.0, feedback_synapse=.05
        )  #was 2, 1.6 works ok, but everything gets activated.

        ##### Familiarity #####

        # Assoc Mem with Learned Words
        # - familiarity signal should be continuous over all items, so no wta
        model.dm_learned_words = spa.AssociativeMemory(vocab_learned_words,
                                                       threshold=.2)
        nengo.Connection(model.dm_learned_words.output,
                         model.dm_learned_words.input,
                         transform=.4,
                         synapse=.02)

        # Familiarity Accumulator

        model.familiarity = spa.State(
            1, feedback=.9,
            feedback_synapse=0.1)  #fb syn influences speed of acc
        #familiarity_scale = 0.2 #keep stable for negative fam

        # familiarity accumulator switch
        model.do_fam = spa.AssociativeMemory(vocab_reset,
                                             default_output_key='CLEAR',
                                             threshold=.2)
        # reset
        nengo.Connection(
            model.do_fam.am.ensembles[-1],
            model.familiarity.all_ensembles[0].neurons,
            transform=np.ones(
                (model.familiarity.all_ensembles[0].n_neurons, 1)) * -10,
            synapse=0.005)

        #first a sum to represent summed similarity
        model.summed_similarity = nengo.Ensemble(n_neurons=100, dimensions=1)
        nengo.Connection(
            model.dm_learned_words.am.elem_output,
            model.summed_similarity,
            transform=np.ones(
                (1,
                 model.dm_learned_words.am.elem_output.size_out)))  #take sum

        #then a connection to accumulate this summed sim
        def familiarity_acc_transform(summed_sim):
            fam_scale = .5
            fam_threshold = 0  #actually, kind of bias
            fam_max = 1
            return fam_scale * (2 * ((summed_sim - fam_threshold) /
                                     (fam_max - fam_threshold)) - 1)

        nengo.Connection(model.summed_similarity,
                         model.familiarity.input,
                         function=familiarity_acc_transform)

        ##### Recollection & Representation #####

        model.dm_pairs = spa.AssociativeMemory(
            vocab_learned_pairs, wta_output=True)  #input_keys=list_of_pairs
        nengo.Connection(model.dm_pairs.output,
                         model.dm_pairs.input,
                         transform=.5,
                         synapse=.05)

        #representation
        rep_scale = 0.5
        model.representation = spa.State(D,
                                         vocab=vocab_all_words,
                                         feedback=1.0)
        model.rep_filled = spa.State(
            1, feedback=.9,
            feedback_synapse=.1)  #fb syn influences speed of acc
        model.do_rep = spa.AssociativeMemory(vocab_reset,
                                             default_output_key='CLEAR',
                                             threshold=.2)
        nengo.Connection(
            model.do_rep.am.ensembles[-1],
            model.rep_filled.all_ensembles[0].neurons,
            transform=np.ones(
                (model.rep_filled.all_ensembles[0].n_neurons, 1)) * -10,
            synapse=0.005)

        nengo.Connection(model.representation.output,
                         model.rep_filled.input,
                         transform=rep_scale *
                         np.reshape(sum(vocab_learned_pairs.vectors),
                                    ((1, D))))

        ###### Comparison #####

        model.comparison = spa.Compare(D,
                                       vocab=vocab_all_words,
                                       neurons_per_multiply=500,
                                       input_magnitude=.3)

        #turns out comparison is not an accumulator - we also need one of those.
        model.comparison_accumulator = spa.State(
            1, feedback=.9,
            feedback_synapse=0.05)  #fb syn influences speed of acc
        model.do_compare = spa.AssociativeMemory(vocab_reset,
                                                 default_output_key='CLEAR',
                                                 threshold=.2)

        #reset
        nengo.Connection(
            model.do_compare.am.ensembles[-1],
            model.comparison_accumulator.all_ensembles[0].neurons,
            transform=np.ones(
                (model.comparison_accumulator.all_ensembles[0].n_neurons, 1)) *
            -10,
            synapse=0.005)

        #error because we apply a function to a 'passthrough' node, inbetween ensemble as a solution:
        model.comparison_result = nengo.Ensemble(n_neurons=100, dimensions=1)
        nengo.Connection(model.comparison.output, model.comparison_result)

        def comparison_acc_transform(comparison):
            comparison_scale = .6
            comparison_threshold = 0  #actually, kind of bias
            comparison_max = .6
            return comparison_scale * (2 * (
                (comparison - comparison_threshold) /
                (comparison_max - comparison_threshold)) - 1)

        nengo.Connection(model.comparison_result,
                         model.comparison_accumulator.input,
                         function=comparison_acc_transform)

        #motor
        model.motor_net = nengo.Network()
        with model.motor_net:

            #input multiplier
            model.motor_input = spa.State(Dmid, vocab=vocab_motor)

            #higher motor area (SMA?)
            model.motor = spa.State(Dmid, vocab=vocab_motor, feedback=.7)

            #connect input multiplier with higher motor area
            nengo.Connection(model.motor_input.output,
                             model.motor.input,
                             synapse=.1,
                             transform=2)

            #finger area
            model.fingers = spa.AssociativeMemory(
                vocab_fingers,
                input_keys=['L1', 'L2', 'R1', 'R2'],
                wta_output=True)
            nengo.Connection(model.fingers.output,
                             model.fingers.input,
                             synapse=0.1,
                             transform=0.3)  #feedback

            #conncetion between higher order area (hand, finger), to lower area
            nengo.Connection(model.motor.output,
                             model.fingers.input,
                             transform=.25 * motor_mapping)  #was .2

            #finger position (spinal?)
            model.finger_pos = nengo.networks.EnsembleArray(n_neurons=50,
                                                            n_ensembles=4)
            nengo.Connection(model.finger_pos.output,
                             model.finger_pos.input,
                             synapse=0.1,
                             transform=0.8)  #feedback

            #connection between finger area and finger position
            nengo.Connection(model.fingers.am.elem_output,
                             model.finger_pos.input,
                             transform=1.0 *
                             np.diag([0.55, .54, .56, .55]))  #fix these

        model.bg = spa.BasalGanglia(
            spa.Actions(
                #wait & start
                a_aa_wait='dot(goal,WAIT) - .9 --> goal=0',
                a_attend_item1=
                'dot(goal,DO_TASK) - .0 --> goal=RECOG, attend=ITEM1, do_concepts=GO',

                #attend words
                b_attending_item1=
                'dot(goal,RECOG) + dot(attend,ITEM1) - concepts_evidence - .3 --> goal=RECOG, attend=ITEM1, do_concepts=GO',  # vis_pair=2.5*(ITEM1*concepts)',
                c_attend_item2=
                'dot(goal,RECOG) + dot(attend,ITEM1) + concepts_evidence - 1.6 --> goal=RECOG2, attend=ITEM2, vis_pair=3*(ITEM1*concepts)',
                d_attending_item2=
                'dot(goal,RECOG2+RECOG) + dot(attend,ITEM2) - concepts_evidence - .4 --> goal=RECOG2, attend=ITEM2, do_concepts=GO, dm_learned_words=1.0*(~ITEM1*vis_pair)',  #vis_pair=1.2*(ITEM2*concepts)
                e_start_familiarity=
                'dot(goal,RECOG2) + dot(attend,ITEM2) + concepts_evidence - 1.8 --> goal=FAMILIARITY, do_fam=GO, vis_pair=1.9*(ITEM2*concepts), dm_learned_words=2.0*(~ITEM1*vis_pair+~ITEM2*vis_pair)',

                #judge familiarity
                f_accumulate_familiarity=
                '1.1*dot(goal,FAMILIARITY) - 0.2 --> goal=FAMILIARITY-RECOG2, do_fam=GO, dm_learned_words=.8*(~ITEM1*vis_pair+~ITEM2*vis_pair)',
                g_respond_unfamiliar=
                'dot(goal,FAMILIARITY) - familiarity - .5*dot(fingers,L1+L2+R1+R2) - .6 --> goal=RESPOND_MISMATCH-FAMILIARITY, do_fam=GO, motor_input=1.6*(target_hand+MIDDLE)',
                #g2_respond_familiar =   'dot(goal,FAMILIARITY) + familiarity - .5*dot(fingers,L1+L2+R1+R2) - .6 --> goal=RESPOND, do_fam=GO, motor_input=1.6*(target_hand+INDEX)',

                #recollection & representation
                h_recollection=
                'dot(goal,FAMILIARITY) + familiarity - .5*dot(fingers,L1+L2+R1+R2) - .6 --> goal=RECOLLECTION-FAMILIARITY, dm_pairs = vis_pair',
                i_representation=
                'dot(goal,RECOLLECTION) - rep_filled - .1 --> goal=RECOLLECTION, dm_pairs = vis_pair, representation=3*dm_pairs, do_rep=GO',

                #comparison & respond
                j_10_compare_word1=
                'dot(goal,RECOLLECTION+1.4*COMPARE_ITEM1) + rep_filled - .9 --> goal=COMPARE_ITEM1-RECOLLECTION, do_rep=GO, do_compare=GO, comparison_A = ~ITEM1*vis_pair, comparison_B = ~ITEM1*representation',
                k_11_match_word1=
                'dot(goal,COMPARE_ITEM1) + comparison_accumulator - .7 --> goal=COMPARE_ITEM2-COMPARE_ITEM1, do_rep=GO, comparison_A = ~ITEM1*vis_pair, comparison_B = ~ITEM1*representation',
                l_12_mismatch_word1=
                'dot(goal,COMPARE_ITEM1) + .4 * dot(goal,RESPOND_MISMATCH) - comparison_accumulator - .7 --> goal=RESPOND_MISMATCH-COMPARE_ITEM1, do_rep=GO, motor_input=1.6*(target_hand+MIDDLE), do_compare=GO, comparison_A = ~ITEM1*vis_pair, comparison_B = ~ITEM1*representation',
                compare_word2=
                'dot(goal,COMPARE_ITEM2) - .5 --> goal=COMPARE_ITEM2, do_compare=GO, comparison_A = ~ITEM2*vis_pair, comparison_B = ~ITEM2*representation',
                m_match_word2=
                'dot(goal,COMPARE_ITEM2) + comparison_accumulator - .7 --> goal=RESPOND_MATCH-COMPARE_ITEM2, motor_input=1.6*(target_hand+INDEX), do_compare=GO, comparison_A = ~ITEM2*vis_pair, comparison_B = ~ITEM2*representation',
                n_mismatch_word2=
                'dot(goal,COMPARE_ITEM2) - comparison_accumulator - dot(fingers,L1+L2+R1+R2)- .7 --> goal=RESPOND_MISMATCH-COMPARE_ITEM2, motor_input=1.6*(target_hand+MIDDLE),do_compare=GO, comparison_A = ~ITEM2*vis_pair, comparison_B = ~ITEM2*representation',

                #respond
                o_respond_match=
                'dot(goal,RESPOND_MATCH) - .1 --> goal=RESPOND_MATCH, motor_input=1.6*(target_hand+INDEX)',
                p_respond_mismatch=
                'dot(goal,RESPOND_MISMATCH) - .1 --> goal=RESPOND_MISMATCH, motor_input=1.6*(target_hand+MIDDLE)',

                #finish
                x_response_done=
                'dot(goal,RESPOND_MATCH) + dot(goal,RESPOND_MISMATCH) + 2*dot(fingers,L1+L2+R1+R2) - .7 --> goal=2*END',
                y_end=
                'dot(goal,END)-.1 --> goal=END-RESPOND_MATCH-RESPOND_MISMATCH',
                z_threshold='.05 --> goal=0'

                #possible to match complete buffer, ie is representation filled?
                # motor_input=1.5*target_hand+MIDDLE,
            ))

        print(model.bg.actions.count)
        #print(model.bg.dimensions)

        model.thalamus = spa.Thalamus(model.bg)

        model.cortical = spa.Cortical(  # cortical connection: shorthand for doing everything with states and connections
            spa.Actions(
                #  'motor_input = .04*target_hand',
                # 'dm_learned_words = .1*vis_pair',
                #'dm_pairs = 2*stimulus'
                #'vis_pair = 2*attend*concepts+concepts',
                #fam 'comparison_A = 2*vis_pair',
                #fam 'comparison_B = 2*representation*~attend',
            ))

        #probes
        model.pr_motor_pos = nengo.Probe(
            model.finger_pos.output,
            synapse=.01)  #raw vector (dimensions x time)
        model.pr_motor = nengo.Probe(model.fingers.output, synapse=.01)
        #model.pr_motor1 = nengo.Probe(model.motor.output, synapse=.01)

        if not nengo_gui_on:
            model.pr_vision_gabor = nengo.Probe(
                model.vision_gabor.neurons, synapse=.005
            )  #do we need synapse, or should we do something with the spikes
            model.pr_familiarity = nengo.Probe(
                model.dm_learned_words.am.elem_output,
                synapse=.01)  #element output, don't include default
            model.pr_concepts = nengo.Probe(
                model.concepts.am.elem_output,
                synapse=.01)  # element output, don't include default

        #multiply spikes with the connection weights

        #input
        model.input = spa.Input(goal=goal_func)

        #print(sum(ens.n_neurons for ens in model.all_ensembles))

        #return model

        #to show select BG rules
        # get names rules
        if nengo_gui_on:
            vocab_actions = spa.Vocabulary(model.bg.output.size_out)
            for i, action in enumerate(model.bg.actions.actions):
                vocab_actions.add(action.name.upper(),
                                  np.eye(model.bg.output.size_out)[i])
            model.actions = spa.State(model.bg.output.size_out,
                                      subdimensions=model.bg.output.size_out,
                                      vocab=vocab_actions)
            nengo.Connection(model.thalamus.output, model.actions.input)

            for net in model.networks:
                if net.label is not None and net.label.startswith('channel'):
                    net.label = ''
            label='Rate Signal',
            neuron_type=nengo.RectifiedLinear(),
            seed=2)
    nengo.Connection(a, b_rate)

    # and another ensemble with spiking neurons
    b_spike = nengo.Ensemble(n_neurons, 1,
            label='LIF',
            neuron_type=nengo.LIF(), 
            seed=2)
    nengo.Connection(a, b_spike)
    
    c_spike = nengo.Ensemble(n_neurons, 1,
            label='Spiking Rectified Linear',
            neuron_type=nengo.SpikingRectifiedLinear(), 
            seed=2)
    nengo.Connection(a, c_spike)
    
    d_spike = nengo.Ensemble(n_neurons, 1,
            label='Adaptive LIF',
            neuron_type=nengo.AdaptiveLIF(), 
            seed=2)
    nengo.Connection(a, d_spike)
    
    e_spike = nengo.Ensemble(n_neurons, 1,
            label='Izhikevich',
            neuron_type=nengo.Izhikevich(), 
            seed=2)
    nengo.Connection(a, e_spike)