Example #1
0
def make_gabors():
    from nengo.dists import Uniform
    from nengo_extras.matplotlib import tile
    from nengo_extras.vision import Gabor
    from hunse_thesis.dists import LogUniform

    rng = np.random.RandomState(3)

    # r, c = 10, 20
    r, c = 9, 12

    # gabor = Gabor()
    # gabor = Gabor(freq=Uniform(0.5, 1.5))
    gabor = Gabor(freq=LogUniform(np.log(0.5), np.log(1.5)))

    gabors = gabor.generate(r * c, (32, 32), rng=rng)

    tile(gabors, rows=r, cols=c)

    plt.savefig('gabors.pdf')
Example #2
0
def make_vision_system(images, outputs, n_neurons = 1000, AIT_V1_strength = 0.06848695023305285, V1_r_transform = 0.11090645719111913, AIT_r_transform = 0.8079719992231219):

     #represent currently attended item
    vision_system = nengo.Network(label = 'vision_system')
    with vision_system:
        presentation_node = nengo.Node(None, size_in = images.shape[1], label = 'presentation_node')
        vision_system.presentation_node = presentation_node
        rng = np.random.RandomState(9)
        encoders = Gabor().generate(n_neurons, (11, 11), rng=rng)  # gabor encoders, work better, 11,11 apparently, why?
        encoders = Mask((14, 90)).populate(encoders, rng=rng, flatten=True)
        
        V1 = nengo.Ensemble(n_neurons, images.shape[1], eval_points=images,
                                                neuron_type=nengo.LIFRate(),
                                                intercepts=nengo.dists.Choice([-0.5]), #can switch these off
                                                max_rates=nengo.dists.Choice([100]),  # why?
                                                encoders=encoders,
                                                label = 'V1')
                                                                    #  1000 neurons, nrofpix = dimensions
        # visual_representation = nengo.Node(size_in=Dmid) #output, in this case 466 outputs
        AIT = nengo.Ensemble(n_neurons, dimensions=outputs.shape[1], label = 'AIT')  # output, in this case 466 outputs
        
        visconn = nengo.Connection(V1, AIT, synapse=0.005,
                                        eval_points = images, function=outputs,
                                        solver=nengo.solvers.LstsqL2(reg=0.01))
        Ait_V1_backwardsconn = nengo.Connection(AIT,V1, synapse = 0.005, 
                                        eval_points = outputs, function = images,
                                        solver=nengo.solvers.LstsqL2(reg=0.01), transform = AIT_V1_strength) #Transform makes this connection a lot weaker then the forwards conneciton
        nengo.Connection(presentation_node, V1, synapse=None)
        nengo.Connection(AIT, AIT, synapse = 0.1, transform = AIT_r_transform)
        nengo.Connection(V1, V1, synapse = 0.1, transform = V1_r_transform)
        
        # display attended item
        display_node = nengo.Node(display_func, size_in=presentation_node.size_out, label = 'display_node')  # to show input
        nengo.Connection(presentation_node, display_node, synapse=None)
        
        # THESE PIECES MAKE EVERYTHING WORK please dont touch them
        vision_system.AIT = AIT
        vision_system.V1 = V1
        
    return vision_system
Example #3
0
def test_conv2d_weights(channels_last, hw_opts, request, plt, seed, rng,
                        allclose):
    def loihi_rates_n(neuron_type, x, gain, bias, dt):
        """Compute Loihi rates on higher dimensional inputs"""
        y = x.reshape(-1, x.shape[-1])
        gain = np.asarray(gain)
        bias = np.asarray(bias)
        if gain.ndim == 0:
            gain = gain * np.ones(x.shape[-1])
        if bias.ndim == 0:
            bias = bias * np.ones(x.shape[-1])
        rates = loihi_rates(neuron_type, y, gain, bias, dt)
        return rates.reshape(*x.shape)

    if channels_last:
        plt.saveas = None
        pytest.xfail("Blocked by CxBase cannot be > 256 bug")

    target = request.config.getoption("--target")
    if target != 'loihi' and len(hw_opts) > 0:
        pytest.skip("Hardware options only available on hardware")

    pop_type = 32

    # load data
    with open(os.path.join(test_dir, 'mnist10.pkl'), 'rb') as f:
        test10 = pickle.load(f)

    test_x = test10[0][0].reshape(28, 28)
    test_x = test_x[3:24, 3:24]
    test_x = 1.999 * test_x - 0.999

    filters = Gabor(freq=Uniform(0.5, 1)).generate(8, (7, 7), rng=rng)
    sti, stj = 2, 2
    tau_rc = 0.02
    tau_ref = 0.002
    tau_s = 0.005
    dt = 0.001

    encode_type = nengo.SpikingRectifiedLinear()
    encode_gain = 1. / dt
    encode_bias = 0.
    neuron_type = nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref)
    neuron_gain = 1.
    neuron_bias = 1.

    pres_time = 0.2

    # --- compute ideal outputs
    def conv_pm(x, kernel):
        y0 = scipy.signal.correlate2d(x[0], kernel, mode='valid')[::sti, ::stj]
        y1 = scipy.signal.correlate2d(x[1], kernel, mode='valid')[::sti, ::stj]
        return [y0, -y1]

    ref_out = np.array([test_x, -test_x])
    ref_out = loihi_rates_n(encode_type, ref_out, encode_gain, encode_bias, dt)
    ref_out = ref_out / encode_gain
    ref_out = np.array([conv_pm(ref_out, kernel) for kernel in filters])
    ref_out = ref_out.sum(axis=1)  # sum positive and negative parts
    ref_out = loihi_rates_n(neuron_type, ref_out, neuron_gain, neuron_bias, dt)

    # --- compute nengo_loihi outputs
    inp_biases = np.stack([test_x, -test_x], axis=-1 if channels_last else 0)
    inp_shape = nengo_transforms.ChannelShape(inp_biases.shape,
                                              channels_last=channels_last)

    kernel = np.array([filters, -filters])  # two channels, pos and neg
    kernel = np.transpose(kernel, (2, 3, 0, 1))
    conv2d_transform = nengo_transforms.Convolution(
        8,
        inp_shape,
        strides=(sti, stj),
        channels_last=channels_last,
        kernel_size=(7, 7),
        init=kernel)

    out_size = ref_out.size
    nf, nyi, nyj = ref_out.shape
    assert out_size <= 1024

    model = Model()

    # input block
    inp = LoihiBlock(inp_shape.size, label='inp')
    assert inp.n_neurons <= 1024
    inp.compartment.configure_relu()
    inp.compartment.bias[:] = inp_biases.ravel()

    inp_ax = Axon(np.prod(inp_shape.spatial_shape), label='inp_ax')
    inp_ax.set_compartment_axon_map(target_axons=conv.pixel_idxs(inp_shape),
                                    atoms=conv.channel_idxs(inp_shape))
    inp.add_axon(inp_ax)

    model.add_block(inp)

    # conv block
    neurons = LoihiBlock(out_size, label='neurons')
    assert neurons.n_neurons <= 1024
    neurons.compartment.configure_lif(tau_rc=tau_rc, tau_ref=tau_ref, dt=dt)
    neurons.compartment.configure_filter(tau_s, dt=dt)
    neurons.compartment.bias[:] = neuron_bias

    synapse = Synapse(np.prod(inp_shape.spatial_shape), label='synapse')
    weights, indices, axon_to_weight_map, bases = conv.conv2d_loihi_weights(
        conv2d_transform)
    synapse.set_population_weights(weights,
                                   indices,
                                   axon_to_weight_map,
                                   bases,
                                   pop_type=pop_type)

    neurons.add_synapse(synapse)

    out_probe = Probe(target=neurons, key='spiked')
    neurons.add_probe(out_probe)

    inp_ax.target = synapse
    model.add_block(neurons)

    # simulation
    discretize_model(model)

    n_steps = int(pres_time / dt)
    if target == 'loihi':
        with HardwareInterface(model, use_snips=False, seed=seed,
                               **hw_opts) as sim:
            sim.run_steps(n_steps)
            sim_out = sim.get_probe_output(out_probe)
    else:
        with EmulatorInterface(model, seed=seed) as sim:
            sim.run_steps(n_steps)
            sim_out = sim.get_probe_output(out_probe)

    sim_out = np.sum(sim_out, axis=0) / pres_time
    if channels_last:
        sim_out.shape = (nyi, nyj, nf)
        sim_out = np.transpose(sim_out, (2, 0, 1))
    else:
        sim_out.shape = (nf, nyi, nyj)

    out_max = max(ref_out.max(), sim_out.max())

    # --- plot results
    rows = 2
    cols = 2

    ax = plt.subplot(rows, cols, 1)
    tile(filters, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 2)
    tile(ref_out, vmin=0, vmax=out_max, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 3)
    plt.hist(ref_out.ravel(), bins=31)
    plt.hist(sim_out.ravel(), bins=31)

    ax = plt.subplot(rows, cols, 4)
    # tile(sim_out, vmin=0, vmax=1, cols=8, ax=ax)
    tile(sim_out, vmin=0, vmax=out_max, cols=8, ax=ax)

    assert allclose(sim_out, ref_out, atol=10, rtol=1e-3)
Example #4
0
                yield cropped.reshape((per_batch, n_vis)), targets


# --- set up network parameters
method = 'gabor'

print("Encoders (n_hid = %d, method=%r)" % (n_hid, method))

if method == 'full':
    encoders = rng.normal(size=(n_hid, ) + shape).reshape(n_hid, -1)
elif method == 'mask':
    encoders = Mask(shape).populate(rng.normal(size=(n_hid, c, 9, 9)),
                                    rng=rng,
                                    flatten=True)
elif method == 'gabor':
    gabors = Gabor().generate(n_hid, (13, 13), rng=rng)
    colors = nengo.dists.UniformHypersphere(surface=True).sample(n_hid,
                                                                 c,
                                                                 rng=rng)
    gabors = gabors[:, None, :, :] * colors[:, :, None, None]
    encoders = Mask(shape).populate(gabors, rng=rng, flatten=True)
else:
    raise ValueError(method)

encoded = np.dot(batches().next()[0], encoders.T)

neuron_type = nengo.LIFRate()
intercepts = np.percentile(encoded, 50, axis=0)
max_rates = 100 * np.ones(n_hid)
gain, bias = neuron_type.gain_bias(max_rates, intercepts)
print("Intercepts: %0.2e (%0.2e)" % (intercepts.mean(), intercepts.std()))
Example #5
0
def generate_gabors():

    global e_cued
    global U_cued
    global compressed_im_cued

    global e_uncued
    global U_uncued
    global compressed_im_uncued

    #to speed things up, load previously generated ones
    if load_gabors_svd & os.path.isfile('Stimuli/gabors_svd_cued.npz'):
        gabors_svd_cued = np.load(
            'Stimuli/gabors_svd_cued.npz')  #load stims if previously generated
        e_cued = gabors_svd_cued['e_cued']
        U_cued = gabors_svd_cued['U_cued']
        compressed_im_cued = gabors_svd_cued['compressed_im_cued']
        print("SVD cued loaded")

    else:  #or generate and save

        #cued module
        #for each neuron in the sensory layer, generate a Gabor of 1/3 of the image size
        gabors_cued = Gabor().generate(Ns, (col / 3, row / 3))
        #put gabors on image and make them the same shape as the stimuli
        gabors_cued = Mask((col, row)).populate(gabors_cued,
                                                flatten=True).reshape(Ns, -1)
        #normalize
        gabors_cued = gabors_cued / abs(
            max(np.amax(gabors_cued), abs(np.amin(gabors_cued))))
        #gabors are added to imagearr for SVD
        x_cued = np.vstack((imagearr, gabors_cued))

        #SVD
        print("SVD cued started...")
        U_cued, S_cued, V_cued = np.linalg.svd(x_cued.T)
        print("SVD cued done")

        #Use result of SVD to create encoders
        e_cued = np.dot(gabors_cued, U_cued[:, :D])  #encoders
        compressed_im_cued = np.dot(
            imagearr[:1800, :] / 100,
            U_cued[:, :D])  #D-dimensional vector reps of the images
        compressed_im_cued = np.vstack(
            (compressed_im_cued, np.dot(imagearr[-1, :] / 50, U_cued[:, :D])))

        np.savez('Stimuli/gabors_svd_cued.npz',
                 e_cued=e_cued,
                 U_cued=U_cued,
                 compressed_im_cued=compressed_im_cued)

    #same for uncued module
    if uncued:

        if load_gabors_svd & os.path.isfile('Stimuli/gabors_svd_uncued.npz'):
            gabors_svd_uncued = np.load('Stimuli/gabors_svd_uncued.npz'
                                        )  #load stims if previously generated
            e_uncued = gabors_svd_uncued['e_uncued']
            U_uncued = gabors_svd_uncued['U_uncued']
            compressed_im_uncued = gabors_svd_uncued['compressed_im_uncued']
            print("SVD uncued loaded")
        else:
            gabors_uncued = Gabor().generate(
                Ns, (col / 3, row / 3))  #.reshape(N, -1)
            gabors_uncued = Mask(
                (col, row)).populate(gabors_uncued,
                                     flatten=True).reshape(Ns, -1)
            gabors_uncued = gabors_uncued / abs(
                max(np.amax(gabors_uncued), abs(np.amin(gabors_uncued))))
            x_uncued = np.vstack((imagearr, gabors_uncued))

            print("SVD uncued started...")
            U_uncued, S_uncued, V_uncued = np.linalg.svd(x_uncued.T)
            print("SVD uncued done")
            e_uncued = np.dot(gabors_uncued, U_uncued[:, :D])
            compressed_im_uncued = np.dot(imagearr[:1800, :] / 100,
                                          U_uncued[:, :D])
            compressed_im_uncued = np.vstack((compressed_im_uncued,
                                              np.dot(imagearr[-1, :] / 50,
                                                     U_uncued[:, :D])))

            np.savez('Stimuli/gabors_svd_uncued.npz',
                     e_uncued=e_uncued,
                     U_uncued=U_uncued,
                     compressed_im_uncued=compressed_im_uncued)
def generate_gabors(load_gabors_svd=False, Ns=None, D=None):

    # global e_first
    # global U_first
    # global compressed_im_first

    # global e_second
    # global U_second
    # global compressed_im_second

    #to speed things up, load previously generated ones
    if load_gabors_svd & os.path.isfile('Stimuli/gabors_svd_first_exp2.npz'):
        gabors_svd_first = np.load('Stimuli/gabors_svd_first_exp2.npz'
                                   )  #load stims if previously generated
        e_first = gabors_svd_first['e_first']
        U_first = gabors_svd_first['U_first']
        compressed_im_first = gabors_svd_first['compressed_im_first']
        print("SVD first loaded")

    else:  #or generate and save

        #cued module
        #for each neuron in the sensory layer, generate a Gabor of 1/3 of the image size
        gabors_first = Gabor().generate(Ns, (int(col / 3), int(row / 3)))
        #put gabors on image and make them the same shape as the stimuli
        gabors_first = Mask((col, row)).populate(gabors_first,
                                                 flatten=True).reshape(Ns, -1)
        #normalize
        gabors_first = gabors_first / abs(
            max(np.amax(gabors_first), abs(np.amin(gabors_first))))
        #gabors are added to imagearr for SVD
        x_first = np.vstack((imagearr, gabors_first))

        #SVD
        print("SVD first started...")
        U_first, S_first, V_first = np.linalg.svd(x_first.T)
        print("SVD first done")

        #Use result of SVD to create encoders
        e_first = np.dot(gabors_first, U_first[:, :D])  #encoders
        compressed_im_first = np.dot(
            imagearr[:1800, :] / 100,
            U_first[:, :D])  #D-dimensional vector reps of the images
        compressed_im_first = np.vstack(
            (compressed_im_first, np.dot(imagearr[-1, :] / 50,
                                         U_first[:, :D])))

        np.savez('Stimuli/gabors_svd_first_exp2.npz',
                 e_first=e_first,
                 U_first=U_first,
                 compressed_im_first=compressed_im_first)

    #same for secondary module

    if load_gabors_svd & os.path.isfile('Stimuli/gabors_svd_second_exp2.npz'):
        gabors_svd_second = np.load('Stimuli/gabors_svd_second_exp2.npz'
                                    )  #load stims if previously generated
        e_second = gabors_svd_second['e_second']
        U_second = gabors_svd_second['U_second']
        compressed_im_second = gabors_svd_second['compressed_im_second']
        print("SVD second loaded")
    else:
        gabors_second = Gabor().generate(
            Ns, (int(col / 3), int(row / 3)))  #.reshape(N, -1)
        gabors_second = Mask(
            (col, row)).populate(gabors_second, flatten=True).reshape(Ns, -1)
        gabors_second = gabors_second / abs(
            max(np.amax(gabors_second), abs(np.amin(gabors_second))))
        x_second = np.vstack((imagearr, gabors_second))

        print("SVD second started...")
        U_second, S_second, V_second = np.linalg.svd(x_second.T)
        print("SVD second done")
        e_second = np.dot(gabors_second, U_second[:, :D])
        compressed_im_second = np.dot(imagearr[:1800, :] / 100,
                                      U_second[:, :D])
        compressed_im_second = np.vstack(
            (compressed_im_second, np.dot(imagearr[-1, :] / 50,
                                          U_second[:, :D])))

        np.savez('Stimuli/gabors_svd_second_exp2.npz',
                 e_second=e_second,
                 U_second=U_second,
                 compressed_im_second=compressed_im_second)

    return e_first, U_first, compressed_im_first, e_second, U_second, compressed_im_second
Example #7
0
X_train = 2 * X_train - 1  # normalize to -1 to 1
X_test = 2 * X_test - 1  # normalize to -1 to 1

train_targets = one_hot(y_train, 10)
test_targets = one_hot(y_test, 10)

# --- set up network parameters
n_vis = X_train.shape[1]
n_out = train_targets.shape[1]
# n_hid = 300
n_hid = 1000
# n_hid = 3000

# encoders = rng.normal(size=(n_hid, 11, 11))
encoders = Gabor().generate(n_hid, (11, 11), rng=rng)
encoders = Mask((28, 28)).populate(encoders, rng=rng, flatten=True)

ens_params = dict(
    eval_points=X_train,
    neuron_type=nengo.LIFRate(),
    intercepts=nengo.dists.Choice([-0.5]),
    max_rates=nengo.dists.Choice([100]),
    encoders=encoders,
    )

solver = nengo.solvers.LstsqL2(reg=0.01)
# solver = nengo.solvers.LstsqL2(reg=0.0001)

with nengo.Network(seed=3) as model:
    a = nengo.Ensemble(n_hid, n_vis, **ens_params)
Example #8
0
import matplotlib.pyplot as plt
import numpy as np

from nengo.dists import Uniform
from nengo_extras.matplotlib import tile
from nengo_extras.vision import Gabor
from hunse_thesis.dists import LogUniform

rng = np.random.RandomState(3)

# r, c = 10, 20
r, c = 9, 12

# gabor = Gabor()
# gabor = Gabor(freq=Uniform(0.5, 1.5))
gabor = Gabor(freq=LogUniform(np.log(0.5), np.log(1.5)))

gabors = gabor.generate(r * c, (32, 32), rng=rng)

tile(gabors, rows=r, cols=c)

plt.savefig('gabors.pdf')
plt.show()
def create_model():

    #print trial_info
    print '---- INTIALIZING MODEL ----'
    global model

    model = spa.SPA()
    with model:

        #display current stimulus pair (not part of model)
        if nengo_gui_on:
            model.pair_input = nengo.Node(present_pair)
            model.pair_display = nengo.Node(
                display_func,
                size_in=model.pair_input.size_out)  # to show input
            nengo.Connection(model.pair_input,
                             model.pair_display,
                             synapse=None)

        # control
        model.control_net = nengo.Network()
        with model.control_net:
            #assuming the model knows which hand to use (which was blocked)
            model.hand_input = nengo.Node(get_hand)
            model.target_hand = spa.State(Dmid, vocab=vocab_motor, feedback=1)
            nengo.Connection(model.hand_input,
                             model.target_hand.input,
                             synapse=None)

            model.attend = spa.State(D, vocab=vocab_attend,
                                     feedback=.5)  # vocab_attend
            model.goal = spa.State(D, vocab_goal, feedback=1)  # current goal

        ### vision ###

        # set up network parameters
        n_vis = X_train.shape[1]  # nr of pixels, dimensions of network
        n_hid = 1000  # nr of gabor encoders/neurons

        # random state to start
        rng = np.random.RandomState(9)
        encoders = -1 * Gabor().generate(
            n_hid, (9, 9),
            rng=rng)  # gabor encoders, 11x11 apparently, why? maybe smaller
        encoders = Mask(
            (14, 90)).populate(encoders, rng=rng,
                               flatten=True)  # use them on part of the image

        model.visual_net = nengo.Network()
        with model.visual_net:

            #represent currently attended item
            model.attended_item = nengo.Node(present_item, size_in=D)
            nengo.Connection(model.attend.output, model.attended_item)

            model.vision_gabor = nengo.Ensemble(
                n_hid,
                n_vis,
                eval_points=X_train,
                #neuron_type=nengo.LIFRate(),
                neuron_type=nengo.LIF(),
                #intercepts=nengo.dists.Choice([-0.5]), #should we comment this out? not sure what's happening
                intercepts=nengo.dists.Uniform(-0.1, 0.1),
                #max_rates=nengo.dists.Choice([100]),
                encoders=encoders)

            model.visual_representation = nengo.Ensemble(n_hid,
                                                         dimensions=Dmid)

            model.visconn = nengo.Connection(
                model.vision_gabor,
                model.visual_representation,
                synapse=0.01,  #was .005
                eval_points=X_train,
                function=train_targets,
                solver=nengo.solvers.LstsqL2(reg=0.01))
            nengo.Connection(model.attended_item,
                             model.vision_gabor,
                             synapse=None)  #synapse?

            # display attended item, only in gui
            if nengo_gui_on:
                model.display_attended = nengo.Node(
                    display_func,
                    size_in=model.attended_item.size_out)  # to show input
                nengo.Connection(model.attended_item,
                                 model.display_attended,
                                 synapse=None)

        #print(model.vision_gabor.neurons.probeable)

        ### central cognition ###

        # concepts
        #model.concepts = spa.AssociativeMemory(vocab_concepts,
        #                                       wta_output=True,
        #                              wta_inhibit_scale=1, #was 1
        #                              default_output_key='NONE', #what to say if input doesn't match
        #                              threshold=0.3)  # how strong does input need to be for it to recognize
        #nengo.Connection(model.visual_representation, model.concepts.input, transform=.8*vision_mapping) #not too fast to concepts, might have to be increased to have model react faster to first word.

        #concepts accumulator
        #model.concepts_evidence = spa.State(1, feedback=1, feedback_synapse=0.03) #the lower the synapse, the faster it accumulates (was .1)
        #concepts_evidence_scale = 2.5
        #nengo.Connection(model.concepts.am.elem_output, model.concepts_evidence.input,
        #    transform=concepts_evidence_scale * np.ones((1, model.concepts.am.elem_output.size_out)),synapse=0.005)

        #reset if concepts is NONE (default)
        #nengo.Connection(model.concepts.am.ensembles[-1], model.concepts_evidence.all_ensembles[0].neurons,
        #       transform=np.ones((model.concepts_evidence.all_ensembles[0].n_neurons, 1)) * -40, # was -10
        #       synapse=0.005) #lower synapse gives shorter impact of reset - makes the reaction a little slower

        # pair representation
        #model.vis_pair = spa.State(D, vocab=vocab_concepts, feedback=1.4) #was 2, 1.6 works ok, but everything gets activated.

        #model.dm_learned_words = spa.AssociativeMemory(vocab_learned_words,default_output_key='NONE',threshold=.3) #familiarity should be continuous over all items, so no wta
        #nengo.Connection(model.dm_learned_words.output,model.dm_learned_words.input,transform=.4,synapse=.01)

        # this stores the accumulated evidence for or against familiarity
        #model.familiarity = spa.State(1, feedback=1, feedback_synapse=0.1) #fb syn influences speed of acc
        #familiarity_scale = 0.2
        #nengo.Connection(model.dm_learned_words.am.ensembles[-1], model.familiarity.input, transform=-familiarity_scale) #accumulate to -1
        #nengo.Connection(model.dm_learned_words.am.elem_output, model.familiarity.input, #am.element_output == all outputs, we sum
        #                transform=familiarity_scale * np.ones((1, model.dm_learned_words.am.elem_output.size_out))) #accumulate to 1

        #model.do_fam = spa.AssociativeMemory(vocab_reset, default_output_key='CLEAR', threshold=.2)
        #nengo.Connection(model.do_fam.am.ensembles[-1], model.familiarity.all_ensembles[0].neurons,
        #                transform=np.ones((model.familiarity.all_ensembles[0].n_neurons, 1)) * -10,
        #               synapse=0.005)

        #fam model.dm_pairs = spa.AssociativeMemory(vocab_learned_pairs, input_keys=list_of_pairs,wta_output=True)
        #fam nengo.Connection(model.dm_pairs.output,model.dm_pairs.input,transform=.5)

        #this works:
        #fam model.representation = spa.AssociativeMemory(vocab_learned_pairs, input_keys=list_of_pairs, wta_output=True)
        #fam nengo.Connection(model.representation.output, model.representation.input, transform=2)
        #fam model.rep_filled = spa.State(1,feedback_synapse=.005) #no fb syn specified
        #fam nengo.Connection(model.representation.am.elem_output,model.rep_filled.input, #am.element_output == all outputs, we sum
        #fam                  transform=.8*np.ones((1,model.representation.am.elem_output.size_out)),synapse=0)

        #this doesn't:
        #model.representation = spa.State(D,feedback=1)
        #model.rep_filled = spa.State(1,feedback_synapse=.005) #no fb syn specified
        #nengo.Connection(model.representation.output,model.rep_filled.input, #am.element_output == all outputs, we sum
        #                 transform=.8*np.ones((1,model.representation.output.size_out)),synapse=0)

        # this shouldn't really be fixed I think
        #fam model.comparison = spa.Compare(D, vocab=vocab_concepts)

        #motor
        #motor model.motor_net = nengo.Network()
        #motor with model.motor_net:

        #motor    #input multiplier
        #motor     model.motor_input = spa.State(Dmid,vocab=vocab_motor)

        #motor    #higher motor area (SMA?)
        #motor    model.motor = spa.State(Dmid, vocab=vocab_motor,feedback=.7)

        #motor    #connect input multiplier with higher motor area
        #motor     nengo.Connection(model.motor_input.output,model.motor.input,synapse=.1,transform=2)

        #motor    #finger area
        #motor    model.fingers = spa.AssociativeMemory(vocab_fingers, input_keys=['L1', 'L2', 'R1', 'R2'], wta_output=True)

        #motor    #conncetion between higher order area (hand, finger), to lower area
        #motor    nengo.Connection(model.motor.output, model.fingers.input, transform=.2*motor_mapping)

        #motor    #finger position (spinal?)
        #motor    model.finger_pos = nengo.networks.EnsembleArray(n_neurons=50, n_ensembles=4)
        #motor    nengo.Connection(model.finger_pos.output, model.finger_pos.input, synapse=0.1, transform=0.3) #feedback

        #motor    #connection between finger area and finger position
        #motor    nengo.Connection(model.fingers.am.elem_output, model.finger_pos.input, transform=1.5*np.diag([0.55, .54, .56, .55])) #fix these

        #model.bg = spa.BasalGanglia(
        #   spa.Actions(
        # a_attend_item1    =    'dot(goal,DO_TASK) - dot(attend,ITEM1) --> goal=RECOG, attend=ITEM1',
        # b_attending_item1 =    'dot(goal,RECOG) + dot(attend,ITEM1) - concepts_evidence - .2 --> goal=RECOG, attend=ITEM1, vis_pair=2*attend*concepts+2*concepts', #, dm_learned_words=vis_pair',
        #c_attend_item2    =    'dot(goal,RECOG) + dot(attend,ITEM1) + concepts_evidence - 1.8 --> goal=RECOG, attend=ITEM2, vis_pair=2*attend*concepts+2*concepts, dm_learned_words=vis_pair',

        #d_attending_item2 =    'dot(goal,RECOG) + dot(attend,ITEM2) - concepts_evidence - .3 --> goal=RECOG, attend=ITEM2, vis_pair=2*attend*concepts+2*concepts, dm_learned_words=vis_pair',
        #e_judge_familiarity =  'dot(goal,RECOG) + dot(attend,ITEM2) + concepts_evidence - 2.1 --> goal=FAMILIARITY, attend=ITEM2, vis_pair=2*attend*concepts+2*concepts, dm_learned_words=vis_pair, do_fam=GO',

        #   fa_judge_familiarityA = 'dot(goal,FAMILIARITY) - .0 --> goal=FAMILIARITY, dm_learned_words=vis_pair, do_fam=GO',

        #motor   g_respond_unfamiliar = 'dot(goal,FAMILIARITY+RESPOND) - familiarity - .9 --> goal=RESPOND, dm_learned_words=vis_pair, do_fam=GO, motor_input=1.5*target_hand+MIDDLE',
        #motor h_respond_familiar =   'dot(goal,FAMILIARITY+RESPOND) + familiarity - .9 --> goal=RESPOND, dm_learned_words=vis_pair, do_fam=GO, motor_input=1.5*target_hand+INDEX,vis_pair=dm_learned_words',

        #fam 'dot(goal,RECOG2)+dot(attend,ITEM2)+familiarity-1.3 --> goal=RECOLLECTION,dm_pairs = 2*vis_pair, representation=3*dm_pairs',# vis_pair=ITEM2*concepts',
        #fam 'dot(goal,RECOLLECTION) - .5 --> goal=RECOLLECTION, representation=2*dm_pairs',

        #fam 'dot(goal,RECOLLECTION) + 2*rep_filled - 1.3 --> goal=COMPARE_ITEM1, attend=ITEM1, comparison_A = 2*vis_pair,comparison_B = 2*representation*~attend',
        #fam 'dot(goal,COMPARE_ITEM1) + rep_filled + comparison -1 --> goal=COMPARE_ITEM2, attend=ITEM2, comparison_A = 2*vis_pair',#comparison_B = 2*representation*~attend',
        #fam 'dot(goal,COMPARE_ITEM1) + rep_filled + (1-comparison) -1 --> goal=RESPOND,motor_input=1.0*target_hand+MIDDLE',#comparison_A = 2*vis_pair,comparison_B = 2*representation*~attend',
        #fam 'dot(goal,COMPARE_ITEM2) + rep_filled + comparison - 1 --> goal=RESPOND,motor_input=1.0*target_hand+INDEX',#comparison_A = 2*vis_pair,comparison_B = 2*representation*~attend',
        #fam 'dot(goal,COMPARE_ITEM2) + rep_filled + (1-comparison) -1 --> goal=RESPOND,motor_input=1.0*target_hand+MIDDLE',#comparison_A = 2*vis_pair,comparison_B = 2*representation*~attend',

        #fam 'dot(goal,RESPOND) + comparison - 1 --> goal=RESPOND, motor_input=1.0*target_hand+INDEX', #comparison_A = 2*vis_pair,comparison_B = 2*representation*~attend',
        #fam 'dot(goal,RESPOND) + (1-comparison) - 1 --> goal=RESPOND, motor_input=1.0*target_hand+MIDDLE', #comparison_A = 2*vis_pair,comparison_B = 2*representation*~attend',

        # 'dot(goal,RECOLLECTION) + (1 - dot(representation,vis_pair)) - 1.3 --> goal=RESPOND, motor_input=1.0*target_hand+MIDDLE',
        #motor x_response_done = 'dot(goal,RESPOND) + dot(motor,MIDDLE+INDEX) - .5 --> goal=END',
        #motor y_end = 'dot(goal,END)-1 --> goal=END',
        #z_threshold = '.1 -->'

        #possible to match complete buffer, ie is representation filled?
        # motor_input=1.5*target_hand+MIDDLE,

        # ))

        #'dot(attention, W1) - evidence - 0.8 --> motor=NO, attention=W1',
        #'dot(attention, W1) + evidence - 0.8 --> attention=W2, reset=EVIDENCE',
        #'dot(attention, W1) --> attention=W1',  # if we don't set attention it goes back to 0
        #'dot(attention, W2) - evidence - 0.8 --> motor=NO, attention=W2',
        #'dot(attention, W2) + evidence - 0.8 --> motor=YES, attention=W2',
        #'dot(attention, W2) --> attention=W2',  # option might be feedback on attention, then no rule 3/6 but default rule

        #model.thalamus = spa.Thalamus(model.bg)

        #model.cortical = spa.Cortical( # cortical connection: shorthand for doing everything with states and connections
        #   spa.Actions(
        #  'motor_input = .04*target_hand',
        #'dm_learned_words = .8*concepts', #.5
        #'dm_pairs = 2*stimulus'
        #'vis_pair = 2*attend*concepts+concepts',
        #fam 'comparison_A = 2*vis_pair',
        #fam 'comparison_B = 2*representation*~attend',

        # ))

        #probes
        #model.pr_goal = nengo.Probe(model.goal.output,synapse=.01)
        #motor model.pr_motor_pos = nengo.Probe(model.finger_pos.output,synapse=.01) #raw vector (dimensions x time)
        #motor model.pr_motor = nengo.Probe(model.fingers.output,synapse=.01)
        #model.pr_motor1 = nengo.Probe(model.motor.output, synapse=.01)
        #model.pr_target = nengo.Probe(model.target_hand.output, synapse=.01)
        #model.pr_attend = nengo.Probe(model.attend.output, synapse=.01)
        model.pr_vision_gabor = nengo.Probe(model.vision_gabor.neurons)
        # ,synapse=.01) #do we need synapse, or should we do something with the spikes
        #model.pr_familiarity = nengo.Probe(model.dm_learned_words.am.elem_output,synapse=.01) #element output, don't include default

        #multiply spikes with the connection weights

        #input
        model.input = spa.Input(goal=lambda t: 'DO_TASK'
                                if t < 0.02 else '0', )
Example #10
0
def create_model():

    #print trial_info
    print('---- INTIALIZING MODEL ----')
    global model

    model = spa.SPA()
    with model:

        #display current stimulus pair (not part of model)
        if nengo_gui_on and True:
            model.pair_input = nengo.Node(present_pair)
            model.pair_display = nengo.Node(
                display_func,
                size_in=model.pair_input.size_out)  # to show input
            nengo.Connection(model.pair_input,
                             model.pair_display,
                             synapse=None)

        # control
        model.control_net = nengo.Network()
        with model.control_net:
            #assuming the model knows which hand to use (which was blocked)
            model.hand_input = nengo.Node(get_hand)
            model.target_hand = spa.State(Dmid, vocab=vocab_motor, feedback=1)
            nengo.Connection(model.hand_input,
                             model.target_hand.input,
                             synapse=None)

            model.attend = spa.State(D, vocab=vocab_attend,
                                     feedback=.5)  # vocab_attend
            model.goal = spa.State(Dlow, vocab=vocab_goal,
                                   feedback=.7)  # current goal

        ### vision ###

        # set up network parameters
        n_vis = X_train.shape[1]  # nr of pixels, dimensions of network
        n_hid = 1000  # nr of gabor encoders/neurons

        # random state to start
        rng = np.random.RandomState(9)
        encoders = Gabor().generate(
            n_hid, (4, 4), rng=rng)  # gabor encoders, 11x11 apparently, why?
        encoders = Mask(
            (14, 90)).populate(encoders, rng=rng,
                               flatten=True)  # use them on part of the image

        model.visual_net = nengo.Network()
        with model.visual_net:

            #represent currently attended item
            model.attended_item = nengo.Node(present_item2, size_in=D)
            nengo.Connection(model.attend.output, model.attended_item)

            model.vision_gabor = nengo.Ensemble(
                n_hid,
                n_vis,
                eval_points=X_train,
                #    neuron_type=nengo.LIF(),
                neuron_type=nengo.AdaptiveLIF(
                    tau_n=.01, inc_n=.05
                ),  #to get a better fit, use more realistic neurons that adapt to input
                intercepts=nengo.dists.Uniform(-0.1, 0.1),
                #intercepts=nengo.dists.Choice([-0.5]), #should we comment this out? not sure what's happening
                #max_rates=nengo.dists.Choice([100]),
                encoders=encoders)
            #recurrent connection (time constant 500 ms)
            # strength = 1 - (100/500) = .8

            zeros = np.zeros_like(X_train)
            nengo.Connection(
                model.vision_gabor,
                model.vision_gabor,
                synapse=0.005,  #.1
                eval_points=np.vstack(
                    [X_train, zeros,
                     np.random.randn(*X_train.shape)]),
                transform=.5)

            model.visual_representation = nengo.Ensemble(n_hid,
                                                         dimensions=Dmid)

            model.visconn = nengo.Connection(
                model.vision_gabor,
                model.visual_representation,
                synapse=0.005,  #was .005
                eval_points=X_train,
                function=train_targets,
                solver=nengo.solvers.LstsqL2(reg=0.01))
            nengo.Connection(model.attended_item,
                             model.vision_gabor,
                             synapse=.02)  #.03) #synapse?

            # display attended item, only in gui
            if nengo_gui_on:
                # show what's being looked at
                model.display_attended = nengo.Node(
                    display_func,
                    size_in=model.attended_item.size_out)  # to show input
                nengo.Connection(model.attended_item,
                                 model.display_attended,
                                 synapse=None)
                #add node to plot total visual activity
                model.visual_activation = nengo.Node(None, size_in=1)
                nengo.Connection(model.vision_gabor.neurons,
                                 model.visual_activation,
                                 transform=np.ones((1, n_hid)),
                                 synapse=None)

        ### central cognition ###

        ##### Concepts #####
        model.concepts = spa.AssociativeMemory(
            vocab_all_words,  #vocab_concepts,
            wta_output=True,
            wta_inhibit_scale=1,  #was 1
            #default_output_key='NONE', #what to say if input doesn't match
            threshold=0.3
        )  # how strong does input need to be for it to recognize
        nengo.Connection(
            model.visual_representation,
            model.concepts.input,
            transform=.8 * vision_mapping
        )  #not too fast to concepts, might have to be increased to have model react faster to first word.

        #concepts accumulator
        model.concepts_evidence = spa.State(
            1, feedback=1, feedback_synapse=0.005
        )  #the lower the synapse, the faster it accumulates (was .1)
        concepts_evidence_scale = 2.5
        nengo.Connection(model.concepts.am.elem_output,
                         model.concepts_evidence.input,
                         transform=concepts_evidence_scale * np.ones(
                             (1, model.concepts.am.elem_output.size_out)),
                         synapse=0.005)

        #concepts switch
        model.do_concepts = spa.AssociativeMemory(vocab_reset,
                                                  default_output_key='CLEAR',
                                                  threshold=.2)
        nengo.Connection(
            model.do_concepts.am.ensembles[-1],
            model.concepts_evidence.all_ensembles[0].neurons,
            transform=np.ones(
                (model.concepts_evidence.all_ensembles[0].n_neurons, 1)) * -10,
            synapse=0.005)

        ###### Visual Representation ######
        model.vis_pair = spa.State(
            D, vocab=vocab_all_words, feedback=1.0, feedback_synapse=.05
        )  #was 2, 1.6 works ok, but everything gets activated.

        ##### Familiarity #####

        # Assoc Mem with Learned Words
        # - familiarity signal should be continuous over all items, so no wta
        model.dm_learned_words = spa.AssociativeMemory(vocab_learned_words,
                                                       threshold=.2)
        nengo.Connection(model.dm_learned_words.output,
                         model.dm_learned_words.input,
                         transform=.4,
                         synapse=.02)

        # Familiarity Accumulator

        model.familiarity = spa.State(
            1, feedback=.9,
            feedback_synapse=0.1)  #fb syn influences speed of acc
        #familiarity_scale = 0.2 #keep stable for negative fam

        # familiarity accumulator switch
        model.do_fam = spa.AssociativeMemory(vocab_reset,
                                             default_output_key='CLEAR',
                                             threshold=.2)
        # reset
        nengo.Connection(
            model.do_fam.am.ensembles[-1],
            model.familiarity.all_ensembles[0].neurons,
            transform=np.ones(
                (model.familiarity.all_ensembles[0].n_neurons, 1)) * -10,
            synapse=0.005)

        #first a sum to represent summed similarity
        model.summed_similarity = nengo.Ensemble(n_neurons=100, dimensions=1)
        nengo.Connection(
            model.dm_learned_words.am.elem_output,
            model.summed_similarity,
            transform=np.ones(
                (1,
                 model.dm_learned_words.am.elem_output.size_out)))  #take sum

        #then a connection to accumulate this summed sim
        def familiarity_acc_transform(summed_sim):
            fam_scale = .5
            fam_threshold = 0  #actually, kind of bias
            fam_max = 1
            return fam_scale * (2 * ((summed_sim - fam_threshold) /
                                     (fam_max - fam_threshold)) - 1)

        nengo.Connection(model.summed_similarity,
                         model.familiarity.input,
                         function=familiarity_acc_transform)

        ##### Recollection & Representation #####

        model.dm_pairs = spa.AssociativeMemory(
            vocab_learned_pairs, wta_output=True)  #input_keys=list_of_pairs
        nengo.Connection(model.dm_pairs.output,
                         model.dm_pairs.input,
                         transform=.5,
                         synapse=.05)

        #representation
        rep_scale = 0.5
        model.representation = spa.State(D,
                                         vocab=vocab_all_words,
                                         feedback=1.0)
        model.rep_filled = spa.State(
            1, feedback=.9,
            feedback_synapse=.1)  #fb syn influences speed of acc
        model.do_rep = spa.AssociativeMemory(vocab_reset,
                                             default_output_key='CLEAR',
                                             threshold=.2)
        nengo.Connection(
            model.do_rep.am.ensembles[-1],
            model.rep_filled.all_ensembles[0].neurons,
            transform=np.ones(
                (model.rep_filled.all_ensembles[0].n_neurons, 1)) * -10,
            synapse=0.005)

        nengo.Connection(model.representation.output,
                         model.rep_filled.input,
                         transform=rep_scale *
                         np.reshape(sum(vocab_learned_pairs.vectors),
                                    ((1, D))))

        ###### Comparison #####

        model.comparison = spa.Compare(D,
                                       vocab=vocab_all_words,
                                       neurons_per_multiply=500,
                                       input_magnitude=.3)

        #turns out comparison is not an accumulator - we also need one of those.
        model.comparison_accumulator = spa.State(
            1, feedback=.9,
            feedback_synapse=0.05)  #fb syn influences speed of acc
        model.do_compare = spa.AssociativeMemory(vocab_reset,
                                                 default_output_key='CLEAR',
                                                 threshold=.2)

        #reset
        nengo.Connection(
            model.do_compare.am.ensembles[-1],
            model.comparison_accumulator.all_ensembles[0].neurons,
            transform=np.ones(
                (model.comparison_accumulator.all_ensembles[0].n_neurons, 1)) *
            -10,
            synapse=0.005)

        #error because we apply a function to a 'passthrough' node, inbetween ensemble as a solution:
        model.comparison_result = nengo.Ensemble(n_neurons=100, dimensions=1)
        nengo.Connection(model.comparison.output, model.comparison_result)

        def comparison_acc_transform(comparison):
            comparison_scale = .6
            comparison_threshold = 0  #actually, kind of bias
            comparison_max = .6
            return comparison_scale * (2 * (
                (comparison - comparison_threshold) /
                (comparison_max - comparison_threshold)) - 1)

        nengo.Connection(model.comparison_result,
                         model.comparison_accumulator.input,
                         function=comparison_acc_transform)

        #motor
        model.motor_net = nengo.Network()
        with model.motor_net:

            #input multiplier
            model.motor_input = spa.State(Dmid, vocab=vocab_motor)

            #higher motor area (SMA?)
            model.motor = spa.State(Dmid, vocab=vocab_motor, feedback=.7)

            #connect input multiplier with higher motor area
            nengo.Connection(model.motor_input.output,
                             model.motor.input,
                             synapse=.1,
                             transform=2)

            #finger area
            model.fingers = spa.AssociativeMemory(
                vocab_fingers,
                input_keys=['L1', 'L2', 'R1', 'R2'],
                wta_output=True)
            nengo.Connection(model.fingers.output,
                             model.fingers.input,
                             synapse=0.1,
                             transform=0.3)  #feedback

            #conncetion between higher order area (hand, finger), to lower area
            nengo.Connection(model.motor.output,
                             model.fingers.input,
                             transform=.25 * motor_mapping)  #was .2

            #finger position (spinal?)
            model.finger_pos = nengo.networks.EnsembleArray(n_neurons=50,
                                                            n_ensembles=4)
            nengo.Connection(model.finger_pos.output,
                             model.finger_pos.input,
                             synapse=0.1,
                             transform=0.8)  #feedback

            #connection between finger area and finger position
            nengo.Connection(model.fingers.am.elem_output,
                             model.finger_pos.input,
                             transform=1.0 *
                             np.diag([0.55, .54, .56, .55]))  #fix these

        model.bg = spa.BasalGanglia(
            spa.Actions(
                #wait & start
                a_aa_wait='dot(goal,WAIT) - .9 --> goal=0',
                a_attend_item1=
                'dot(goal,DO_TASK) - .0 --> goal=RECOG, attend=ITEM1, do_concepts=GO',

                #attend words
                b_attending_item1=
                'dot(goal,RECOG) + dot(attend,ITEM1) - concepts_evidence - .3 --> goal=RECOG, attend=ITEM1, do_concepts=GO',  # vis_pair=2.5*(ITEM1*concepts)',
                c_attend_item2=
                'dot(goal,RECOG) + dot(attend,ITEM1) + concepts_evidence - 1.6 --> goal=RECOG2, attend=ITEM2, vis_pair=3*(ITEM1*concepts)',
                d_attending_item2=
                'dot(goal,RECOG2+RECOG) + dot(attend,ITEM2) - concepts_evidence - .4 --> goal=RECOG2, attend=ITEM2, do_concepts=GO, dm_learned_words=1.0*(~ITEM1*vis_pair)',  #vis_pair=1.2*(ITEM2*concepts)
                e_start_familiarity=
                'dot(goal,RECOG2) + dot(attend,ITEM2) + concepts_evidence - 1.8 --> goal=FAMILIARITY, do_fam=GO, vis_pair=1.9*(ITEM2*concepts), dm_learned_words=2.0*(~ITEM1*vis_pair+~ITEM2*vis_pair)',

                #judge familiarity
                f_accumulate_familiarity=
                '1.1*dot(goal,FAMILIARITY) - 0.2 --> goal=FAMILIARITY-RECOG2, do_fam=GO, dm_learned_words=.8*(~ITEM1*vis_pair+~ITEM2*vis_pair)',
                g_respond_unfamiliar=
                'dot(goal,FAMILIARITY) - familiarity - .5*dot(fingers,L1+L2+R1+R2) - .6 --> goal=RESPOND_MISMATCH-FAMILIARITY, do_fam=GO, motor_input=1.6*(target_hand+MIDDLE)',
                #g2_respond_familiar =   'dot(goal,FAMILIARITY) + familiarity - .5*dot(fingers,L1+L2+R1+R2) - .6 --> goal=RESPOND, do_fam=GO, motor_input=1.6*(target_hand+INDEX)',

                #recollection & representation
                h_recollection=
                'dot(goal,FAMILIARITY) + familiarity - .5*dot(fingers,L1+L2+R1+R2) - .6 --> goal=RECOLLECTION-FAMILIARITY, dm_pairs = vis_pair',
                i_representation=
                'dot(goal,RECOLLECTION) - rep_filled - .1 --> goal=RECOLLECTION, dm_pairs = vis_pair, representation=3*dm_pairs, do_rep=GO',

                #comparison & respond
                j_10_compare_word1=
                'dot(goal,RECOLLECTION+1.4*COMPARE_ITEM1) + rep_filled - .9 --> goal=COMPARE_ITEM1-RECOLLECTION, do_rep=GO, do_compare=GO, comparison_A = ~ITEM1*vis_pair, comparison_B = ~ITEM1*representation',
                k_11_match_word1=
                'dot(goal,COMPARE_ITEM1) + comparison_accumulator - .7 --> goal=COMPARE_ITEM2-COMPARE_ITEM1, do_rep=GO, comparison_A = ~ITEM1*vis_pair, comparison_B = ~ITEM1*representation',
                l_12_mismatch_word1=
                'dot(goal,COMPARE_ITEM1) + .4 * dot(goal,RESPOND_MISMATCH) - comparison_accumulator - .7 --> goal=RESPOND_MISMATCH-COMPARE_ITEM1, do_rep=GO, motor_input=1.6*(target_hand+MIDDLE), do_compare=GO, comparison_A = ~ITEM1*vis_pair, comparison_B = ~ITEM1*representation',
                compare_word2=
                'dot(goal,COMPARE_ITEM2) - .5 --> goal=COMPARE_ITEM2, do_compare=GO, comparison_A = ~ITEM2*vis_pair, comparison_B = ~ITEM2*representation',
                m_match_word2=
                'dot(goal,COMPARE_ITEM2) + comparison_accumulator - .7 --> goal=RESPOND_MATCH-COMPARE_ITEM2, motor_input=1.6*(target_hand+INDEX), do_compare=GO, comparison_A = ~ITEM2*vis_pair, comparison_B = ~ITEM2*representation',
                n_mismatch_word2=
                'dot(goal,COMPARE_ITEM2) - comparison_accumulator - dot(fingers,L1+L2+R1+R2)- .7 --> goal=RESPOND_MISMATCH-COMPARE_ITEM2, motor_input=1.6*(target_hand+MIDDLE),do_compare=GO, comparison_A = ~ITEM2*vis_pair, comparison_B = ~ITEM2*representation',

                #respond
                o_respond_match=
                'dot(goal,RESPOND_MATCH) - .1 --> goal=RESPOND_MATCH, motor_input=1.6*(target_hand+INDEX)',
                p_respond_mismatch=
                'dot(goal,RESPOND_MISMATCH) - .1 --> goal=RESPOND_MISMATCH, motor_input=1.6*(target_hand+MIDDLE)',

                #finish
                x_response_done=
                'dot(goal,RESPOND_MATCH) + dot(goal,RESPOND_MISMATCH) + 2*dot(fingers,L1+L2+R1+R2) - .7 --> goal=2*END',
                y_end=
                'dot(goal,END)-.1 --> goal=END-RESPOND_MATCH-RESPOND_MISMATCH',
                z_threshold='.05 --> goal=0'

                #possible to match complete buffer, ie is representation filled?
                # motor_input=1.5*target_hand+MIDDLE,
            ))

        print(model.bg.actions.count)
        #print(model.bg.dimensions)

        model.thalamus = spa.Thalamus(model.bg)

        model.cortical = spa.Cortical(  # cortical connection: shorthand for doing everything with states and connections
            spa.Actions(
                #  'motor_input = .04*target_hand',
                # 'dm_learned_words = .1*vis_pair',
                #'dm_pairs = 2*stimulus'
                #'vis_pair = 2*attend*concepts+concepts',
                #fam 'comparison_A = 2*vis_pair',
                #fam 'comparison_B = 2*representation*~attend',
            ))

        #probes
        model.pr_motor_pos = nengo.Probe(
            model.finger_pos.output,
            synapse=.01)  #raw vector (dimensions x time)
        model.pr_motor = nengo.Probe(model.fingers.output, synapse=.01)
        #model.pr_motor1 = nengo.Probe(model.motor.output, synapse=.01)

        if not nengo_gui_on:
            model.pr_vision_gabor = nengo.Probe(
                model.vision_gabor.neurons, synapse=.005
            )  #do we need synapse, or should we do something with the spikes
            model.pr_familiarity = nengo.Probe(
                model.dm_learned_words.am.elem_output,
                synapse=.01)  #element output, don't include default
            model.pr_concepts = nengo.Probe(
                model.concepts.am.elem_output,
                synapse=.01)  # element output, don't include default

        #multiply spikes with the connection weights

        #input
        model.input = spa.Input(goal=goal_func)

        #print(sum(ens.n_neurons for ens in model.all_ensembles))

        #return model

        #to show select BG rules
        # get names rules
        if nengo_gui_on:
            vocab_actions = spa.Vocabulary(model.bg.output.size_out)
            for i, action in enumerate(model.bg.actions.actions):
                vocab_actions.add(action.name.upper(),
                                  np.eye(model.bg.output.size_out)[i])
            model.actions = spa.State(model.bg.output.size_out,
                                      subdimensions=model.bg.output.size_out,
                                      vocab=vocab_actions)
            nengo.Connection(model.thalamus.output, model.actions.input)

            for net in model.networks:
                if net.label is not None and net.label.startswith('channel'):
                    net.label = ''
Example #11
0
def test_conv_connection(channels, Simulator, seed, rng, plt, allclose):
    # channels_last = True
    channels_last = False
    if channels > 1:
        pytest.xfail("Cannot send population spikes to chip")

    # load data
    with open(os.path.join(test_dir, 'mnist10.pkl'), 'rb') as f:
        test10 = pickle.load(f)

    test_x = test10[0][0].reshape(28, 28)
    test_x = 1.999 * test_x - 0.999  # range (-1, 1)
    test_x = test_x[:, :, None]  # single channel
    input_shape = ImageShape(test_x.shape[0],
                             test_x.shape[1],
                             channels,
                             channels_last=channels_last)

    filters = Gabor(freq=Uniform(0.5, 1)).generate(8, (7, 7), rng=rng)
    filters = filters[None, :, :, :]  # single channel
    filters = np.transpose(filters, (0, 2, 3, 1))  # filters last
    strides = (2, 2)
    tau_rc = 0.02
    tau_ref = 0.002
    tau_s = 0.005
    dt = 0.001

    neuron_type = LoihiLIF(tau_rc=tau_rc, tau_ref=tau_ref)

    pres_time = 0.1

    with nengo.Network(seed=seed) as model:
        nengo_loihi.add_params(model)

        u = nengo.Node(nengo.processes.PresentInput([test_x.ravel()],
                                                    pres_time),
                       label='u')

        a = nengo.Ensemble(input_shape.size,
                           1,
                           neuron_type=LoihiSpikingRectifiedLinear(),
                           max_rates=nengo.dists.Choice([40 / channels]),
                           intercepts=nengo.dists.Choice([0]),
                           label='a')
        model.config[a].on_chip = False

        if channels == 1:
            nengo.Connection(u, a.neurons, transform=1, synapse=None)
        elif channels == 2:
            # encode image into spikes using two channels (on/off)
            if input_shape.channels_last:
                nengo.Connection(u, a.neurons[0::2], transform=1, synapse=None)
                nengo.Connection(u,
                                 a.neurons[1::2],
                                 transform=-1,
                                 synapse=None)
            else:
                k = input_shape.rows * input_shape.cols
                nengo.Connection(u, a.neurons[:k], transform=1, synapse=None)
                nengo.Connection(u, a.neurons[k:], transform=-1, synapse=None)

            filters = np.vstack([filters, -filters])
        else:
            raise ValueError("Test not configured for more than two channels")

        conv2d_transform = Conv2D.from_kernel(filters,
                                              input_shape,
                                              strides=strides)
        output_shape = conv2d_transform.output_shape

        gain, bias = neuron_type.gain_bias(max_rates=100, intercepts=0)
        gain = gain * 0.01  # account for `a` max_rates
        b = nengo.Ensemble(output_shape.size,
                           1,
                           neuron_type=neuron_type,
                           gain=nengo.dists.Choice([gain[0]]),
                           bias=nengo.dists.Choice([bias[0]]),
                           label='b')
        nengo.Connection(a.neurons,
                         b.neurons,
                         synapse=tau_s,
                         transform=conv2d_transform)

        bp = nengo.Probe(b.neurons)

    with nengo.Simulator(model, dt=dt, optimize=False) as sim:
        sim.run(pres_time)
    ref_out = sim.data[bp].mean(axis=0).reshape(output_shape.shape())

    # Currently, default TensorFlow does not support channels first in conv
    use_nengo_dl = nengo_dl is not None and channels_last
    ndl_out = np.zeros_like(ref_out)
    if use_nengo_dl:
        with nengo_dl.Simulator(model, dt=dt) as sim:
            sim.run(pres_time)
        ndl_out = sim.data[bp].mean(axis=0).reshape(output_shape.shape())

    with nengo_loihi.Simulator(model, dt=dt, target='simreal') as sim:
        sim.run(pres_time)
    real_out = sim.data[bp].mean(axis=0).reshape(output_shape.shape())

    with Simulator(model, dt=dt) as sim:
        sim.run(pres_time)
    sim_out = sim.data[bp].mean(axis=0).reshape(output_shape.shape())

    if not output_shape.channels_last:
        ref_out = np.transpose(ref_out, (1, 2, 0))
        ndl_out = np.transpose(ndl_out, (1, 2, 0))
        real_out = np.transpose(real_out, (1, 2, 0))
        sim_out = np.transpose(sim_out, (1, 2, 0))

    out_max = max(ref_out.max(), sim_out.max())

    # --- plot results
    rows = 2
    cols = 3

    ax = plt.subplot(rows, cols, 1)
    imshow(test_x, vmin=0, vmax=1, ax=ax)

    ax = plt.subplot(rows, cols, 2)
    tile(np.transpose(filters[0], (2, 0, 1)), cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 3)
    plt.hist(ref_out.ravel(), bins=31)
    plt.hist(sim_out.ravel(), bins=31)

    ax = plt.subplot(rows, cols, 4)
    tile(np.transpose(ref_out, (2, 0, 1)), vmin=0, vmax=out_max, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 5)
    tile(np.transpose(ndl_out, (2, 0, 1)), vmin=0, vmax=out_max, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 6)
    tile(np.transpose(sim_out, (2, 0, 1)), vmin=0, vmax=out_max, cols=8, ax=ax)

    if use_nengo_dl:
        assert allclose(ndl_out, ref_out, atol=1e-5, rtol=1e-5)
    assert allclose(real_out, ref_out, atol=1, rtol=1e-3)
    assert allclose(sim_out, ref_out, atol=10, rtol=1e-3)
Example #12
0
def test_conv2d_weights(request, plt, seed, rng, allclose):
    pop_type = 32
    out_channels_last = False

    # load data
    with open(os.path.join(test_dir, 'mnist10.pkl'), 'rb') as f:
        test10 = pickle.load(f)

    test_x = test10[0][0].reshape(28, 28)
    test_x = test_x[3:24, 3:24]
    test_x = 1.999 * test_x - 0.999

    filters = Gabor(freq=Uniform(0.5, 1)).generate(8, (7, 7), rng=rng)
    sti, stj = 2, 2
    tau_rc = 0.02
    tau_ref = 0.002
    tau_s = 0.005
    dt = 0.001

    encode_type = nengo.SpikingRectifiedLinear()
    encode_gain = 1. / dt
    encode_bias = 0.
    neuron_type = nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref)
    neuron_gain = 1.
    neuron_bias = 1.

    pres_time = 0.2

    # --- compute ideal outputs
    def conv_pm(x, kernel):
        y0 = scipy.signal.correlate2d(x[0], kernel, mode='valid')[::sti, ::stj]
        y1 = scipy.signal.correlate2d(x[1], kernel, mode='valid')[::sti, ::stj]
        return [y0, -y1]

    ref_out = np.array([test_x, -test_x])
    ref_out = loihi_rates(encode_type, ref_out, encode_gain, encode_bias, dt)
    ref_out = ref_out / encode_gain
    ref_out = np.array([conv_pm(ref_out, kernel) for kernel in filters])
    ref_out = ref_out.sum(axis=1)  # sum positive and negative parts
    ref_out = loihi_rates(neuron_type, ref_out, neuron_gain, neuron_bias, dt)

    # --- compute nengo_loihi outputs
    inp_biases = np.array([test_x, -test_x])
    inp_shape = ImageShape.from_shape(inp_biases.shape, channels_last=False)

    kernel = np.array([filters, -filters])  # two channels, pos and neg
    kernel = np.transpose(kernel, (0, 2, 3, 1))
    conv2d_transform = Conv2D.from_kernel(
        kernel,
        inp_shape,
        strides=(sti, stj),
        output_channels_last=out_channels_last)

    ni, nj, nk = inp_shape.shape(channels_last=True)
    out_size = ref_out.size
    nf, nyi, nyj = ref_out.shape
    assert out_size <= 1024

    model = loihi_cx.CxModel()

    # input group
    inp = loihi_cx.CxGroup(inp_shape.size, label='inp')
    assert inp.n <= 1024
    inp.configure_relu()
    inp.bias[:] = inp_biases.ravel()

    inp_ax = loihi_cx.CxAxons(inp_shape.n_pixels, label='inp_ax')
    inp_ax.set_axon_map(inp_shape.pixel_idxs(), inp_shape.channel_idxs())
    inp.add_axons(inp_ax)

    model.add_group(inp)

    # conv group
    neurons = loihi_cx.CxGroup(out_size, label='neurons')
    assert neurons.n <= 1024
    neurons.configure_lif(tau_rc=tau_rc, tau_ref=tau_ref, dt=dt)
    neurons.configure_filter(tau_s, dt=dt)
    neurons.bias[:] = neuron_bias

    synapses = loihi_cx.CxSynapses(inp_shape.n_pixels, label='synapses')
    weights, indices, axon_to_weight_map, cx_bases = conv2d_loihi_weights(
        conv2d_transform)
    synapses.set_population_weights(weights,
                                    indices,
                                    axon_to_weight_map,
                                    cx_bases,
                                    pop_type=pop_type)

    neurons.add_synapses(synapses)

    out_probe = loihi_cx.CxProbe(target=neurons, key='s')
    neurons.add_probe(out_probe)

    inp_ax.target = synapses
    model.add_group(neurons)

    # simulation
    model.discretize()

    n_steps = int(pres_time / dt)
    target = request.config.getoption("--target")
    if target == 'loihi':
        with LoihiSimulator(model, use_snips=False, seed=seed) as sim:
            sim.run_steps(n_steps)
            sim_out = sim.get_probe_output(out_probe)
    else:
        with CxSimulator(model, seed=seed) as sim:
            sim.run_steps(n_steps)
            sim_out = sim.get_probe_output(out_probe)

    sim_out = np.sum(sim_out, axis=0) / pres_time
    if out_channels_last:
        sim_out.shape = (nyi, nyj, nf)
        sim_out = np.transpose(sim_out, (2, 0, 1))
    else:
        sim_out.shape = (nf, nyi, nyj)

    out_max = max(ref_out.max(), sim_out.max())

    # --- plot results
    rows = 2
    cols = 2

    ax = plt.subplot(rows, cols, 1)
    tile(filters, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 2)
    tile(ref_out, vmin=0, vmax=out_max, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 3)
    plt.hist(ref_out.ravel(), bins=31)
    plt.hist(sim_out.ravel(), bins=31)

    ax = plt.subplot(rows, cols, 4)
    # tile(sim_out, vmin=0, vmax=1, cols=8, ax=ax)
    tile(sim_out, vmin=0, vmax=out_max, cols=8, ax=ax)

    assert allclose(sim_out, ref_out, atol=10, rtol=1e-3)
Example #13
0
model = nengo.Network(seed=3)
with model:
    stim = nengo.Node(
        ONE
    )  #lambda t: ONE if t < 0.1 else 0) #nengo.processes.PresentInput(labels,1))#

    ens_params = dict(
        eval_points=X_train,
        neuron_type=nengo.LIFRate(),  #Why not use LIF?
        intercepts=nengo.dists.Choice([-0.5]),
        max_rates=nengo.dists.Choice([100]),
    )

    # linear filter used for edge detection as encoders, more plausible for human visual system
    encoders = Gabor().generate(1000, (11, 11), rng=rng)
    encoders = Mask((28, 28)).populate(encoders, rng=rng, flatten=True)

    ens = nengo.Ensemble(1000, dim**2, seed=3, encoders=encoders, **ens_params)

    ens2 = nengo.Ensemble(1000,
                          dim**2,
                          seed=3,
                          encoders=encoders,
                          **ens_params)

    #nengo.Connection(ens.neurons, ens.neurons, transform = rotated_after_encoder_weights, synapse=0.1)
    nengo.Connection(ens.neurons,
                     ens2.neurons,
                     transform=rotated_after_encoder_weights,
                     synapse=0.1)
Example #14
0
train_targets = one_hot_from_labels(train_labels, classes=10)
test_targets = one_hot_from_labels(test_labels, classes=10)

assert train_images.shape[1] == n_in
assert train_targets.shape[1] == n_out

# --- network
neuron_type = nengo.LIF()
n_hids = [1000, 1000]

print("Encoders")
max_rates0 = 100 * np.ones(n_hids[0])
intercepts0 = 0.1 * np.ones(n_hids[0])
gain0, bias0 = neuron_type.gain_bias(max_rates0, intercepts0)
encoders0 = Mask(s_in).populate(Gabor().generate(n_hids[0], (11, 11), rng=rng),
                                rng=rng,
                                flatten=True)
h0 = neuron_type.rates(np.dot(train_images, encoders0.T), gain0, bias0)

max_rates1 = 100 * np.ones(n_hids[1])
intercepts1 = 0.1 * np.ones(n_hids[1])
gain1, bias1 = neuron_type.gain_bias(max_rates1, intercepts1)
encoders1 = ciw_encoders(n_hids[1], h0, train_labels, rng=rng)
# encoders1 *= Mask(s_in).generate(
#     n_hid, rf_shape, rng=rng, flatten=True)
encoders1 /= npext.norm(encoders1, axis=1, keepdims=True)
h1 = neuron_type.rates(np.dot(h0, encoders1.T), gain1, bias1)

print("Solving")
solver = hunse_thesis.solvers.LstsqClassifier(reg=0.01)
Example #15
0
    'learning_rate_EE': 1e-4
}

model = nengo.Network()

with model:

    sensor = nengo.Node(env_iface.sensor)
    sensor_net = nengo.Ensemble(n_neurons=n_input,
                                dimensions=np.prod(env_iface.state_dim),
                                radius=sensor_radius)

    gabor_size = (5, 5)  # Size of the gabor filter

    # Generate the encoders for the sensory ensemble
    sensor_encoders = Gabor().generate(n_input, gabor_size, rng=rng)
    sensor_encoders = Mask(image_shape).populate(sensor_encoders,
                                                 rng=rng,
                                                 flatten=True)
    sensor_net.encoders = sensor_encoders

    srf_net = PRF(n_excitatory=n_input,
                  n_inhibitory=n_inhibitory,
                  connect_exc_inh_input=True,
                  n_outputs=n_place,
                  dimensions=env_iface.n_actions,
                  label="Spatial receptive field network",
                  seed=seed,
                  **srf_params)

    actor_net = nengo.Ensemble(n_neurons=n_actor,
Example #16
0
def test_conv_connection(channels, channels_last, Simulator, seed, rng, plt,
                         allclose):
    if channels_last:
        plt.saveas = None
        pytest.xfail("Blocked by CxBase cannot be > 256 bug")

    # load data
    with open(os.path.join(test_dir, 'mnist10.pkl'), 'rb') as f:
        test10 = pickle.load(f)

    test_x = test10[0][0].reshape(28, 28)
    test_x = 1.999 * test_x - 0.999  # range (-1, 1)
    input_shape = nengo_transforms.ChannelShape(
        (test_x.shape + (channels, )) if channels_last else
        ((channels, ) + test_x.shape),
        channels_last=channels_last)

    filters = Gabor(freq=Uniform(0.5, 1)).generate(8, (7, 7), rng=rng)
    filters = filters[None, :, :, :]  # single channel
    filters = np.transpose(filters, (2, 3, 0, 1))
    strides = (2, 2)
    tau_rc = 0.02
    tau_ref = 0.002
    tau_s = 0.005
    dt = 0.001

    neuron_type = LoihiLIF(tau_rc=tau_rc, tau_ref=tau_ref)

    pres_time = 0.1

    with nengo.Network(seed=seed) as model:
        nengo_loihi.add_params(model)

        u = nengo.Node(test_x.ravel(), label="u")

        a = nengo.Ensemble(input_shape.size,
                           1,
                           neuron_type=LoihiSpikingRectifiedLinear(),
                           max_rates=nengo.dists.Choice([40 / channels]),
                           intercepts=nengo.dists.Choice([0]),
                           label='a')
        model.config[a].on_chip = False

        if channels == 1:
            nengo.Connection(u, a.neurons, transform=1, synapse=None)
        elif channels == 2:
            # encode image into spikes using two channels (on/off)
            if input_shape.channels_last:
                nengo.Connection(u, a.neurons[0::2], transform=1, synapse=None)
                nengo.Connection(u,
                                 a.neurons[1::2],
                                 transform=-1,
                                 synapse=None)
            else:
                k = input_shape.spatial_shape[0] * input_shape.spatial_shape[1]
                nengo.Connection(u, a.neurons[:k], transform=1, synapse=None)
                nengo.Connection(u, a.neurons[k:], transform=-1, synapse=None)

            filters = np.concatenate([filters, -filters], axis=2)
        else:
            raise ValueError("Test not configured for more than two channels")

        conv2d_transform = nengo_transforms.Convolution(
            8,
            input_shape,
            strides=strides,
            kernel_size=(7, 7),
            channels_last=channels_last,
            init=filters)

        output_shape = conv2d_transform.output_shape

        gain, bias = neuron_type.gain_bias(max_rates=100, intercepts=0)
        gain = gain * 0.01  # account for `a` max_rates
        b = nengo.Ensemble(output_shape.size,
                           1,
                           neuron_type=neuron_type,
                           gain=nengo.dists.Choice([gain[0]]),
                           bias=nengo.dists.Choice([bias[0]]),
                           label='b')
        nengo.Connection(a.neurons,
                         b.neurons,
                         synapse=tau_s,
                         transform=conv2d_transform)

        bp = nengo.Probe(b.neurons)

    with nengo.Simulator(model, dt=dt, optimize=False) as sim:
        sim.run(pres_time)
    ref_out = sim.data[bp].mean(axis=0).reshape(output_shape.shape)

    # Currently, non-gpu TensorFlow does not support channels first in conv
    use_nengo_dl = HAS_DL and channels_last
    ndl_out = np.zeros_like(ref_out)
    if use_nengo_dl:
        with nengo_dl.Simulator(model, dt=dt) as sim_dl:
            sim_dl.run(pres_time)
        ndl_out = sim_dl.data[bp].mean(axis=0).reshape(output_shape.shape)

    with nengo_loihi.Simulator(model, dt=dt, target='simreal') as sim_real:
        sim_real.run(pres_time)
    real_out = sim_real.data[bp].mean(axis=0).reshape(output_shape.shape)

    with Simulator(model, dt=dt) as sim_loihi:
        if "loihi" in sim_loihi.sims:
            sim_loihi.sims["loihi"].snip_max_spikes_per_step = 800
        sim_loihi.run(pres_time)
    sim_out = sim_loihi.data[bp].mean(axis=0).reshape(output_shape.shape)

    if not output_shape.channels_last:
        ref_out = np.transpose(ref_out, (1, 2, 0))
        ndl_out = np.transpose(ndl_out, (1, 2, 0))
        real_out = np.transpose(real_out, (1, 2, 0))
        sim_out = np.transpose(sim_out, (1, 2, 0))

    out_max = max(ref_out.max(), sim_out.max())

    # --- plot results
    rows = 2
    cols = 3

    ax = plt.subplot(rows, cols, 1)
    imshow(test_x, vmin=0, vmax=1, ax=ax)

    ax = plt.subplot(rows, cols, 2)
    tile(np.transpose(filters[0], (2, 0, 1)), cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 3)
    plt.hist(ref_out.ravel(), bins=31)
    plt.hist(sim_out.ravel(), bins=31)

    ax = plt.subplot(rows, cols, 4)
    tile(np.transpose(ref_out, (2, 0, 1)), vmin=0, vmax=out_max, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 5)
    tile(np.transpose(ndl_out, (2, 0, 1)), vmin=0, vmax=out_max, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 6)
    tile(np.transpose(sim_out, (2, 0, 1)), vmin=0, vmax=out_max, cols=8, ax=ax)

    if use_nengo_dl:
        assert allclose(ndl_out, ref_out, atol=1e-5, rtol=1e-5)
    assert allclose(real_out, ref_out, atol=1, rtol=1e-3)
    assert allclose(sim_out, ref_out, atol=10, rtol=1e-3)
Example #17
0
def test_conv_split(Simulator, rng, plt, allclose):
    channels_last = False

    # load data
    with open(os.path.join(test_dir, 'mnist10.pkl'), 'rb') as f:
        test10 = pickle.load(f)

    input_shape = nengo_transforms.ChannelShape((1, 28, 28),
                                                channels_last=channels_last)

    n_filters = 8
    kernel_size = (7, 7)
    kernel = Gabor(freq=Uniform(0.5, 1)).generate(n_filters,
                                                  kernel_size,
                                                  rng=rng)
    kernel = kernel[None, :, :, :]  # single channel
    kernel = np.transpose(kernel, (2, 3, 0, 1))
    strides = (2, 2)

    seed = 3  # fix seed to do the same computation for both channel positions

    with nengo.Network(seed=seed) as net:
        nengo_loihi.add_params(net)

        a = nengo.Node(test10[0][0].ravel())

        # --- make population to turn image into spikes
        nc = 1
        in_kernel = np.array([1.]).reshape((1, 1, 1, nc))
        transform = nengo_transforms.Convolution(1,
                                                 input_shape,
                                                 kernel_size=(1, 1),
                                                 init=in_kernel,
                                                 channels_last=channels_last)
        b = nengo.Ensemble(transform.output_shape.size,
                           1,
                           neuron_type=nengo.SpikingRectifiedLinear(),
                           max_rates=nengo.dists.Choice([50]),
                           intercepts=nengo.dists.Choice([0]))
        net.config[b].on_chip = False
        nengo.Connection(a, b.neurons, transform=transform)
        in_shape = transform.output_shape

        transform = nengo_transforms.Convolution(n_filters,
                                                 in_shape,
                                                 kernel_size=kernel_size,
                                                 strides=strides,
                                                 init=kernel,
                                                 channels_last=channels_last)
        out_shape = transform.output_shape
        split_slices = conv.split_channels(out_shape,
                                           max_size=1024,
                                           max_channels=4)

        # --- make convolution population, split across ensembles
        cc = []
        cp = []
        out_shapes = []
        xslice = conv.ImageSlice(in_shape)
        for yslice in split_slices:
            transform_xy = conv.split_transform(transform, xslice, yslice)
            out_shapes.append(transform_xy.output_shape)
            c = nengo.Ensemble(transform_xy.output_shape.size,
                               1,
                               neuron_type=nengo.LIF(),
                               max_rates=nengo.dists.Choice([15]),
                               intercepts=nengo.dists.Choice([0]))
            nengo.Connection(b.neurons, c.neurons, transform=transform_xy)
            cc.append(c)
            cp.append(nengo.Probe(c.neurons))

    simtime = 0.3

    with nengo.Simulator(net, optimize=False) as sim_nengo:
        sim_nengo.run(simtime)

    hw_opts = dict(snip_max_spikes_per_step=100)
    with Simulator(net, seed=seed, hardware_options=hw_opts) as sim_loihi:
        sim_loihi.run(simtime)

    nengo_out = []
    loihi_out = []
    for p, out_shape_i in zip(cp, out_shapes):
        nengo_out.append(
            (sim_nengo.data[p] > 0).sum(axis=0).reshape(out_shape_i.shape))
        loihi_out.append(
            (sim_loihi.data[p] > 0).sum(axis=0).reshape(out_shape_i.shape))

    if channels_last:
        nengo_out = np.concatenate(nengo_out, axis=2)
        loihi_out = np.concatenate(loihi_out, axis=2)

        # put channels first to display them separately
        nengo_out = np.transpose(nengo_out, (2, 0, 1))
        loihi_out = np.transpose(loihi_out, (2, 0, 1))
    else:
        nengo_out = np.concatenate(nengo_out, axis=0)
        loihi_out = np.concatenate(loihi_out, axis=0)

    out_max = np.maximum(nengo_out.max(), loihi_out.max())

    # --- plot results
    rows = 2
    cols = 3

    ax = plt.subplot(rows, cols, 1)
    imshow(test10[0][0].reshape((28, 28)), vmin=0, vmax=1, ax=ax)

    ax = plt.subplot(rows, cols, 2)
    tile(np.transpose(kernel[0], (2, 0, 1)), cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 3)
    plt.hist(nengo_out.ravel(), bins=31)
    plt.hist(loihi_out.ravel(), bins=31)

    ax = plt.subplot(rows, cols, 4)
    tile(nengo_out, vmin=0, vmax=out_max, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 6)
    tile(loihi_out, vmin=0, vmax=out_max, cols=8, ax=ax)

    assert allclose(loihi_out, nengo_out, atol=0.05 * out_max, rtol=0.15)
Example #18
0
def test_conv_connection(channels, channels_last, Simulator, seed, rng, plt,
                         allclose):
    # load data
    with open(os.path.join(test_dir, "mnist10.pkl"), "rb") as f:
        test10 = pickle.load(f)

    test_x = test10[0][0].reshape((28, 28))
    test_x = 1.999 * test_x - 0.999  # range (-1, 1)
    input_shape = make_channel_shape(test_x.shape, channels, channels_last)

    filters = Gabor(freq=Uniform(0.5, 1)).generate(8, (7, 7), rng=rng)
    filters = filters[None, :, :, :]  # single channel
    filters = np.transpose(filters, (2, 3, 0, 1))
    strides = (2, 2)
    tau_rc = 0.02
    tau_ref = 0.002
    tau_s = 0.005

    neuron_type = LoihiLIF(tau_rc=tau_rc, tau_ref=tau_ref)

    pres_time = 0.1

    with nengo.Network(seed=seed) as model:
        nengo_loihi.add_params(model)

        u = nengo.Node(test_x.ravel(), label="u")

        a = nengo.Ensemble(
            input_shape.size,
            1,
            neuron_type=LoihiSpikingRectifiedLinear(),
            max_rates=nengo.dists.Choice([40 / channels]),
            intercepts=nengo.dists.Choice([0]),
            label="a",
        )
        model.config[a].on_chip = False

        if channels == 1:
            nengo.Connection(u, a.neurons, transform=1, synapse=None)
        elif channels == 2:
            # encode image into spikes using two channels (on/off)
            if input_shape.channels_last:
                nengo.Connection(u, a.neurons[0::2], transform=1, synapse=None)
                nengo.Connection(u,
                                 a.neurons[1::2],
                                 transform=-1,
                                 synapse=None)
            else:
                k = input_shape.spatial_shape[0] * input_shape.spatial_shape[1]
                nengo.Connection(u, a.neurons[:k], transform=1, synapse=None)
                nengo.Connection(u, a.neurons[k:], transform=-1, synapse=None)

            filters = np.concatenate([filters, -filters], axis=2)
        else:
            raise ValueError("Test not configured for more than two channels")

        conv2d_transform = nengo_transforms.Convolution(
            8,
            input_shape,
            strides=strides,
            kernel_size=(7, 7),
            channels_last=channels_last,
            init=filters,
        )

        output_shape = conv2d_transform.output_shape

        gain, bias = neuron_type.gain_bias(max_rates=100, intercepts=0)
        gain = gain * 0.01  # account for `a` max_rates
        b = nengo.Ensemble(
            output_shape.size,
            1,
            neuron_type=neuron_type,
            gain=nengo.dists.Choice([gain[0]]),
            bias=nengo.dists.Choice([bias[0]]),
            label="b",
        )
        nengo.Connection(a.neurons,
                         b.neurons,
                         synapse=tau_s,
                         transform=conv2d_transform)

        bp = nengo.Probe(b.neurons)

    with nengo.Simulator(model, optimize=False) as sim_nengo:
        sim_nengo.run(pres_time)
    ref_out = sim_nengo.data[bp].mean(axis=0).reshape(output_shape.shape)

    with Simulator(model, target="simreal") as sim_emu:
        sim_emu.run(pres_time)
    emu_out = sim_emu.data[bp].mean(axis=0).reshape(output_shape.shape)

    with Simulator(model, hardware_options={"snip_max_spikes_per_step":
                                            800}) as sim_loihi:
        sim_loihi.run(pres_time)
    sim_out = sim_loihi.data[bp].mean(axis=0).reshape(output_shape.shape)

    if not output_shape.channels_last:
        ref_out = np.transpose(ref_out, (1, 2, 0))
        emu_out = np.transpose(emu_out, (1, 2, 0))
        sim_out = np.transpose(sim_out, (1, 2, 0))

    out_max = max(ref_out.max(), emu_out.max(), sim_out.max())

    # --- plot results
    rows = 2
    cols = 3

    ax = plt.subplot(rows, cols, 1)
    imshow(test_x, vmin=0, vmax=1, ax=ax)

    ax = plt.subplot(rows, cols, 2)
    tile(np.transpose(filters[0], (2, 0, 1)), cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 3)
    plt.hist(ref_out.ravel(), bins=31)
    plt.hist(sim_out.ravel(), bins=31)

    ax = plt.subplot(rows, cols, 4)
    tile(np.transpose(ref_out, (2, 0, 1)), vmin=0, vmax=out_max, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 5)
    tile(np.transpose(emu_out, (2, 0, 1)), vmin=0, vmax=out_max, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 6)
    tile(np.transpose(sim_out, (2, 0, 1)), vmin=0, vmax=out_max, cols=8, ax=ax)

    assert allclose(emu_out, ref_out, atol=10, rtol=1e-3)
    assert allclose(sim_out, ref_out, atol=10, rtol=1e-3)
Example #19
0
def test_conv_deepnet(
    channels_last,
    pop_type,
    precompute,
    Simulator,
    request,
    rng,
    seed,
    plt,
    allclose,
):
    """Run a convolutional network with two layers on the chip.

    Checks that network with block splitting on the target matches one without
    on the emulator.
    """
    # TODO: This case fails in NxSDK 0.9.0 but will be fixed in the next version.
    # Remove this check once the next version is released.
    if pop_type == 32:
        pytest.skip("Pop32 multichip test requires latest NxSDK")

    def set_partition(partition):
        os.environ["PARTITION"] = partition

    request.addfinalizer(lambda: set_partition(""))
    # multichip pop_type = 16 works only on nahuku32 board currently
    if pop_type == 16:
        set_partition("nahuku32")

    def conv_layer(x,
                   input_shape,
                   array_init=None,
                   label=None,
                   conn_args=None,
                   **conv_args):
        conn_args = {} if conn_args is None else conn_args

        if array_init is not None:
            assert all(a not in conv_args
                       for a in ("init", "kernel_size", "n_filters"))
            assert array_init.ndim == 4
            conv_args["init"] = array_init
            conv_args["kernel_size"] = array_init.shape[:2]
            assert array_init.shape[2] == input_shape.n_channels
            conv_args["n_filters"] = array_init.shape[3]

        conv = nengo.Convolution(input_shape=input_shape, **conv_args)

        # add an ensemble to implement the activation function
        layer = nengo.Ensemble(conv.output_shape.size, 1, label=label)

        # connect up the input object to the new layer
        conn = nengo.Connection(x, layer.neurons, transform=conv)

        return layer, conv, conn

    channels = 1
    n_filters0 = 1
    n_filters1 = 4
    n_filters2 = 4

    # load data
    with open(os.path.join(test_dir, "mnist10.pkl"), "rb") as f:
        test10 = pickle.load(f)

    test_x = test10[0][0].reshape(28, 28)  # range (0, 1)
    input_shape = make_channel_shape(test_x.shape, channels, channels_last)

    filters0 = np.ones((1, 1, channels, n_filters0))

    # use Gabor filters for first layer
    filters1 = Gabor(freq=Uniform(0.5, 1),
                     sigma_x=Choice([0.9]),
                     sigma_y=Choice([0.9])).generate(n_filters1, (7, 7),
                                                     rng=rng)
    assert n_filters0 == 1
    filters1 = filters1[None, :, :, :]  # single channel
    filters1 = np.transpose(filters1,
                            (2, 3, 0, 1))  # rows, cols, in_chan, out_chan

    # use random combinations of first-layer channels in 1x1 convolution
    filters2 = rng.uniform(-0.2, 1,
                           size=(n_filters1, n_filters2)).clip(0, None)
    filters2 *= 2 / filters2.sum(axis=0,
                                 keepdims=True)  # each filter sums to 2
    filters2 = filters2[None, None, :, :]  # rows, cols, in_chan, out_chan

    tau_s = 0.001
    max_rate = 100
    amp = 1 / max_rate
    f_split = 2 if pop_type == 32 else 4

    # use Loihi neuron type so Nengo sim mimics Loihi neuron effects
    neuron_type = LoihiSpikingRectifiedLinear(amplitude=amp)

    pres_time = 0.2

    with nengo.Network(seed=seed) as net:
        nengo_loihi.add_params(net)

        net.config[nengo.Ensemble].neuron_type = neuron_type
        net.config[nengo.Ensemble].max_rates = Choice([max_rate])
        net.config[nengo.Ensemble].intercepts = Choice([0])
        net.config[nengo.Connection].synapse = tau_s

        u = nengo.Node(test_x.ravel(), label="u")

        layer0, conv0, conn0 = conv_layer(
            u,
            input_shape=input_shape,
            array_init=filters0,
            strides=(1, 1),
            channels_last=channels_last,
            label="layer0",
            conn_args=dict(synapse=None),
        )
        net.config[layer0].on_chip = False

        layer1, conv1, conn1 = conv_layer(
            layer0.neurons,
            input_shape=conv0.output_shape,
            array_init=filters1,
            strides=(2, 2),
            channels_last=channels_last,
            label="layer1",
        )
        net.config[layer1].block_shape = nengo_loihi.BlockShape(
            make_shape((4, 4), f_split, channels_last), conv1)
        net.config[conn1].pop_type = pop_type

        layer2, conv2, conn2 = conv_layer(
            layer1.neurons,
            input_shape=conv1.output_shape,
            array_init=filters2,
            strides=(1, 1),
            channels_last=channels_last,
            label="layer2",
        )
        net.config[layer2].block_shape = nengo_loihi.BlockShape(
            make_shape((4, 4), f_split, channels_last), conv2)
        net.config[conn2].pop_type = pop_type

        output_p = nengo.Probe(layer2.neurons)
        output_shape = conv2.output_shape

    with nengo.Simulator(net, optimize=False) as sim_nengo:
        sim_nengo.run(pres_time)
        ref_out = (sim_nengo.data[output_p] > 0).sum(axis=0).reshape(
            output_shape.shape)

    with Simulator(net, target="sim") as sim_emu:
        sim_emu.run(pres_time)
        emu_out = (sim_emu.data[output_p] > 0).sum(axis=0).reshape(
            output_shape.shape)

    # TODO: Remove the if condition when configurable timeout parameter
    # is available in nxsdk
    if (pop_type == 32 or
            os.popen("sinfo -h --partition=nahuku32").read().find("idle") > 0):
        with Simulator(
                net,
                precompute=precompute,
                hardware_options={
                    "allocator": RoundRobin(),
                    "snip_max_spikes_per_step": 800,
                },
        ) as sim_loihi:
            sim_loihi.run(pres_time)
            sim_out = ((sim_loihi.data[output_p] > 0).sum(axis=0).reshape(
                output_shape.shape))
    elif nengo_loihi.version.dev is None:
        pytest.fail(
            "Pop16 multichip test failed since Nahuku32 is unavailable")
    else:
        pytest.skip(
            "Pop16 multichip test skipped since Nahuku32 is unavailable")

    out_max = ref_out.max()
    ref_out = ref_out / out_max
    emu_out = emu_out / out_max
    sim_out = sim_out / out_max

    if channels_last:
        # channels first, to display channels in separate plots
        ref_out = np.transpose(ref_out, (2, 0, 1))
        emu_out = np.transpose(emu_out, (2, 0, 1))
        sim_out = np.transpose(sim_out, (2, 0, 1))

    # --- plot results
    rows = 2
    cols = 3

    ax = plt.subplot(rows, cols, 1)
    imshow(test_x, vmin=0, vmax=1, ax=ax)

    ax = plt.subplot(rows, cols, 2)
    tile(np.transpose(filters1, (2, 3, 0, 1))[0],
         rows=2,
         cols=2,
         grid=True,
         ax=ax)

    ax = plt.subplot(rows, cols, 3)
    plt.hist((ref_out.ravel(), emu_out.ravel(), sim_out.ravel()), bins=21)

    ax = plt.subplot(rows, cols, 4)
    tile(ref_out, rows=2, cols=2, grid=True, ax=ax)

    ax = plt.subplot(rows, cols, 5)
    tile(emu_out, rows=2, cols=2, grid=True, ax=ax)

    ax = plt.subplot(rows, cols, 6)
    tile(sim_out, rows=2, cols=2, grid=True, ax=ax)

    assert allclose(sim_out, ref_out, atol=0.15, rtol=1e-3)
    assert allclose(sim_out, emu_out, atol=1e-3, rtol=1e-3)
Example #20
0
    im_size = im_size_new

# Generate the MNIST training and test data
train_targets = one_hot(y_train, 10)
test_targets = one_hot(y_test, 10)

# Set up the vision network parameters
n_vis = x_train.shape[1]  # Number of training samples
n_out = train_targets.shape[1]  # Number of output classes
n_hid = 16000 // (im_size**2)  # Number of neurons to use
# Note: the number of neurons to use is limited such that NxD <= 16000,
#       where D = im_size * im_size, and N is the number of neurons to use
gabor_size = (int(im_size / 2.5), int(im_size / 2.5))  # Size of the gabor filt

# Generate the encoders for the neural ensemble
encoders = Gabor().generate(n_hid, gabor_size, rng=rng)
encoders = Mask((im_size, im_size)).populate(encoders, rng=rng, flatten=True)

# Ensemble parameters
max_firing_rates = 100
ens_neuron_type = nengo.neurons.RectifiedLinear()
ens_intercepts = nengo.dists.Choice([-0.5])
ens_max_rates = nengo.dists.Choice([max_firing_rates])

# Output connection parameters
conn_synapse = None
conn_eval_points = x_train
conn_function = train_targets
conn_solver = nengo.solvers.LstsqL2(reg=0.01)

# Visual input process parameters
Example #21
0
def generate_gabors(load_gabors_svd=False, uncued=False, Ns=None, D=None):

    # global e_cued
    # global U_cued
    # global compressed_im_cued

    # global e_uncued
    # global U_uncued
    # global compressed_im_uncued

    #to speed things up, load previously generated ones
    if load_gabors_svd & os.path.isfile('Stimuli/gabors_svd_cued.npz'):
        gabors_svd_cued = np.load(
            'Stimuli/gabors_svd_cued.npz')  #load stims if previously generated
        e_cued = gabors_svd_cued['e_cued']
        U_cued = gabors_svd_cued['U_cued']
        compressed_im_cued = gabors_svd_cued['compressed_im_cued']
        if not uncued:
            return e_cued, U_cued, compressed_im_cued
        print("SVD cued loaded")

    else:  #or generate and save

        #cued module
        #for each neuron in the sensory layer, generate a Gabor of 1/3 of the image size
        # D: Each time the gabors are generated some of their properties are randomly sampled
        gabors_cued = Gabor().generate(
            Ns, (int(col / 3), int(row / 3)))  # DANIEL: Added casting to int
        #put gabors on image and make them the same shape as the stimuli
        gabors_cued = Mask((col, row)).populate(gabors_cued,
                                                flatten=True).reshape(Ns, -1)
        #normalize
        gabors_cued = gabors_cued / abs(
            max(np.amax(gabors_cued), abs(np.amin(gabors_cued))))
        #gabors are added to imagearr for SVD
        x_cued = np.vstack((imagearr, gabors_cued))

        #SVD
        print("SVD cued started...")
        U_cued, S_cued, V_cued = np.linalg.svd(x_cued.T)
        print("SVD cued done")

        #Use result of SVD to create encoders
        e_cued = np.dot(gabors_cued, U_cued[:, :D])  #encoders
        compressed_im_cued = np.dot(
            imagearr[:1800, :] / 100,
            U_cued[:, :D])  #D-dimensional vector reps of the images
        compressed_im_cued = np.vstack(
            (compressed_im_cued, np.dot(imagearr[-1, :] / 50, U_cued[:, :D])))

        np.savez('Stimuli/gabors_svd_cued.npz',
                 e_cued=e_cued,
                 U_cued=U_cued,
                 compressed_im_cued=compressed_im_cued)
        if not uncued:
            return e_cued, U_cued, compressed_im_cued

    #same for uncued module
    if uncued:

        if load_gabors_svd & os.path.isfile('Stimuli/gabors_svd_uncued.npz'):
            gabors_svd_uncued = np.load('Stimuli/gabors_svd_uncued.npz'
                                        )  #load stims if previously generated
            e_uncued = gabors_svd_uncued['e_uncued']
            U_uncued = gabors_svd_uncued['U_uncued']
            compressed_im_uncued = gabors_svd_uncued['compressed_im_uncued']
            print("SVD uncued loaded")
            return (e_cued, U_cued, compressed_im_cued, e_uncued, U_uncued,
                    compressed_im_uncued)
        else:
            gabors_uncued = Gabor().generate(Ns, (int(col / 3), int(
                row / 3)))  #.reshape(N, -1) # DANIEL: Added casting to ints
            gabors_uncued = Mask(
                (col, row)).populate(gabors_uncued,
                                     flatten=True).reshape(Ns, -1)
            gabors_uncued = gabors_uncued / abs(
                max(np.amax(gabors_uncued), abs(np.amin(gabors_uncued))))
            x_uncued = np.vstack((imagearr, gabors_uncued))

            print("SVD uncued started...")
            U_uncued, S_uncued, V_uncued = np.linalg.svd(x_uncued.T)
            print("SVD uncued done")
            e_uncued = np.dot(
                gabors_uncued, U_uncued[:, :D]
            )  # Due to the indexing until D, the images are limited to D dimension. This is later also used like this in the model
            compressed_im_uncued = np.dot(imagearr[:1800, :] / 100,
                                          U_uncued[:, :D])
            compressed_im_uncued = np.vstack((compressed_im_uncued,
                                              np.dot(imagearr[-1, :] / 50,
                                                     U_uncued[:, :D])))

            np.savez('Stimuli/gabors_svd_uncued.npz',
                     e_uncued=e_uncued,
                     U_uncued=U_uncued,
                     compressed_im_uncued=compressed_im_uncued)
            return (e_cued, U_cued, compressed_im_cued, e_uncued, U_uncued,
                    compressed_im_uncued)
def initialize_vocabs():
    #global encoders
    global train_targets
    global vocab_concepts
    global vocab_goal
    global vocab_motor
    global vision_mapping
    global vocab_items
    global vocab_fingers
    global motor_mapping

    if extended_visual:
        #low level vision
        vocab_vision = nengo.spa.Vocabulary(Dmid, max_similarity=.5)
        for name in y_train_words:
            vocab_vision.parse(name)

        train_targets = vocab_vision.vectors

    #word concepts - should have all concepts, including new foils
    vocab_concepts = spa.Vocabulary(D, max_similarity=0.2)
    if extended_visual:
        for i in y_train_words:
            vocab_concepts.parse(i)
    else:
        for i in items:
            if i not in vocab_concepts.keys:
                vocab_concepts.parse(i)

    #vision-concept mapping
    if extended_visual:
        vision_mapping = np.zeros((D, Dmid))
        for word in y_train_words:
            vision_mapping += np.outer(
                vocab_vision.parse(word).v,
                vocab_concepts.parse(word).v).T

    #experimental items
    vocab_items = spa.Vocabulary(D, max_similarity=.2)
    for item1, item2 in pairs:
        vocab_items.parse(item1)
        vocab_items.parse(item2)

    print(vocab_concepts.keys)

    #experimental pairs
    vocab_pairs = spa.Vocabulary(D, max_similarity=.2)
    list_of_pairs = []
    for item1, item2 in pairs:
        vocab_pairs.parse('%s*ITEM1 + %s*ITEM2' % (item1, item2))
        vocab_pairs.add(
            '%s_%s' % (item1, item2),
            vocab_pairs.parse('%s*ITEM1 + %s*ITEM2' % (item1, item2)))
        vocab_concepts.add(
            '%s_%s' % (item1, item2),
            vocab_concepts.parse('%s*ITEM1 + %s*ITEM2' % (item1, item2)))
        list_of_pairs.append('%s_%s' % (item1, item2))

    #motor vocab, just for sim calcs
    vocab_motor = spa.Vocabulary(
        Dmid)  #different dimension to be sure, upper motor hierarchy
    vocab_motor.parse('LEFT+RIGHT+INDEX+MIDDLE')

    vocab_fingers = spa.Vocabulary(Dlow)  #direct finger activation
    vocab_fingers.parse('L1+L2+R1+R2')

    #map higher and lower motor
    motor_mapping = np.zeros((Dlow, Dmid))
    motor_mapping += np.outer(
        vocab_motor.parse('LEFT+INDEX').v,
        vocab_fingers.parse('L1').v).T
    motor_mapping += np.outer(
        vocab_motor.parse('LEFT+MIDDLE').v,
        vocab_fingers.parse('L2').v).T
    motor_mapping += np.outer(
        vocab_motor.parse('RIGHT+INDEX').v,
        vocab_fingers.parse('R1').v).T
    motor_mapping += np.outer(
        vocab_motor.parse('RIGHT+MIDDLE').v,
        vocab_fingers.parse('R2').v).T
    #mapping *= 0.5

    #goal vocab
    vocab_goal = spa.Vocabulary(Dlow)
    vocab_goal.parse('DO_TASK')
    vocab_goal.parse('RECOG')
    vocab_goal.parse('RESPOND')
    vocab_goal.parse('END')

    #attend vocab
    vocab_attend = spa.Vocabulary(D, max_similarity=.2)
    vocab_attend.parse('ITEM1')
    vocab_attend.parse('ITEM2')

    # --- set up network parameters
    if extended_visual:
        global n_vis
        global n_out
        global n_hid
        n_vis = X_train.shape[1]  #nr of pixels, dimensions of network
        n_out = train_targets.shape[1]  #nr of items
        n_hid = 1000  # nr of gabor encoders/neurons - recommendations?, one neuron per encoder

    if extended_visual:
        # random state to start
        rng = np.random.RandomState(9)
        global encoders
        encoders = Gabor().generate(
            n_hid, (11, 11),
            rng=rng)  # gabor encoders, work better, 11,11 apparently, why?
        encoders = Mask((14, 90)).populate(
            encoders, rng=rng, flatten=True
        )  # use them on part of the image (28x28 = input image)
Example #23
0
    def evaluate(self, p, plt):
        files = []
        sets = []
        for f in os.listdir(p.dataset_dir):
            if f.endswith('events'):
                files.append(os.path.join(p.dataset_dir, f))

        if p.test_set == 'one':
            test_file = random.sample(files, 1)[0]
            files.remove(test_file)

        if p.n_data != -1:
            files = random.sample(files, p.n_data)

        inputs = []
        targets = []
        for f in files:
            print(f)
            times, imgs, targs = davis_track.load_data(
                f,
                dt=p.dt,
                decay_time=p.decay_time,
                separate_channels=p.separate_channels,
                saturation=p.saturation,
                merge=p.merge)
            inputs.append(imgs)
            targets.append(targs[:, :2])

        inputs_all = np.vstack(inputs)
        targets_all = np.vstack(targets)

        if p.test_set == 'odd':
            inputs_train = inputs_all[::2]
            inputs_test = inputs_all[1::2]
            targets_train = targets_all[::2]
            targets_test = targets_all[1::2]
        elif p.test_set == 'one':
            times, imgs, targs = davis_track.load_data(
                test_file,
                dt=p.dt_test,
                decay_time=p.decay_time,
                separate_channels=p.separate_channels,
                saturation=p.saturation)
            inputs_test = imgs
            targets_test = targs[:, :2]
            inputs_train = inputs_all
            targets_train = targets_all

        if p.augment:
            inputs_train, targets_train = davis_track.augment(
                inputs_train,
                targets_train,
                separate_channels=p.separate_channels)

        if p.separate_channels:
            shape = (360 // p.merge, 240 // p.merge)
        else:
            shape = (180 // p.merge, 240 // p.merge)

        dimensions = shape[0] * shape[1]
        eval_points_train = inputs_train.reshape(-1, dimensions)
        eval_points_test = inputs_test.reshape(-1, dimensions)

        model = nengo.Network()
        with model:
            from nengo_extras.vision import Gabor, Mask
            encoders = Gabor().generate(p.n_neurons,
                                        (p.gabor_size, p.gabor_size))
            encoders = Mask(shape).populate(encoders, flatten=True)

            ens = nengo.Ensemble(
                n_neurons=p.n_neurons,
                dimensions=dimensions,
                encoders=encoders,
                neuron_type=nengo.RectifiedLinear(),
                intercepts=nengo.dists.CosineSimilarity(p.gabor_size**2 + 2))

            result = nengo.Node(None, size_in=targets_all.shape[1])

            c = nengo.Connection(
                ens,
                result,
                eval_points=eval_points_train,
                function=targets_train,
                solver=nengo.solvers.LstsqL2(reg=p.reg),
            )
        sim = nengo.Simulator(model)

        error_train = sim.data[c].solver_info['rmses']

        _, a_train = nengo.utils.ensemble.tuning_curves(
            ens, sim, inputs=eval_points_train)
        outputs_train = np.dot(a_train, sim.data[c].weights.T)
        rmse_train = np.sqrt(
            np.mean((targets_train - outputs_train)**2, axis=0))
        _, a_test = nengo.utils.ensemble.tuning_curves(ens,
                                                       sim,
                                                       inputs=eval_points_test)
        outputs_test = np.dot(a_test, sim.data[c].weights.T)
        filt = nengo.synapses.Lowpass(p.output_filter)
        outputs_test = filt.filt(outputs_test, dt=p.dt_test)
        targets_test = filt.filt(targets_test, dt=p.dt_test)
        rmse_test = np.sqrt(np.mean(
            (targets_test - outputs_test)**2, axis=0)) * p.merge

        if plt:
            plt.subplot(2, 1, 1)
            plt.plot(targets_train, ls='--')
            plt.plot(outputs_train)
            plt.title('train\nrmse=%1.4f,%1.4f' % tuple(rmse_train))

            plt.subplot(2, 1, 2)
            plt.plot(targets_test, ls='--')
            plt.plot(outputs_test)
            plt.title('test\nrmse=%1.4f,%1.4f' % tuple(rmse_test))

        return dict(
            rmse_train=rmse_train,
            rmse_test=rmse_test,
        )
Example #24
0
def create_model():

    #print trial_info
    print('---- INTIALIZING MODEL ----')
    global model

    model = spa.SPA()
    with model:

        #display current stimulus pair (not part of model)
        if nengo_gui_on and True:
            model.pair_input = nengo.Node(present_pair)
            model.pair_display = nengo.Node(
                display_func,
                size_in=model.pair_input.size_out)  # to show input
            nengo.Connection(model.pair_input,
                             model.pair_display,
                             synapse=None)

        # control
        model.control_net = nengo.Network()
        with model.control_net:
            #assuming the model knows which hand to use (which was blocked)
            model.hand_input = nengo.Node(get_hand)
            model.target_hand = spa.State(Dmid, vocab=vocab_motor, feedback=1)
            nengo.Connection(model.hand_input,
                             model.target_hand.input,
                             synapse=None)

            model.attend = spa.State(D, vocab=vocab_attend,
                                     feedback=.5)  # vocab_attend
            model.goal = spa.State(Dlow, vocab=vocab_goal,
                                   feedback=.7)  # current goal

        ### vision ###

        # set up network parameters
        n_vis = X_train.shape[1]  # nr of pixels, dimensions of network
        n_hid = 1000  # nr of gabor encoders/neurons

        # random state to start
        rng = np.random.RandomState(9)
        encoders = Gabor().generate(
            n_hid, (4, 4), rng=rng)  # gabor encoders, 11x11 apparently, why?
        encoders = Mask(
            (14, 90)).populate(encoders, rng=rng,
                               flatten=True)  # use them on part of the image

        model.visual_net = nengo.Network()
        with model.visual_net:

            #represent currently attended item
            model.attended_item = nengo.Node(present_item2, size_in=D)
            nengo.Connection(model.attend.output, model.attended_item)

            model.vision_gabor = nengo.Ensemble(
                n_hid,
                n_vis,
                eval_points=X_train,
                neuron_type=nengo.LIF(),
                intercepts=nengo.dists.Uniform(-0.1, 0.1),
                #intercepts=nengo.dists.Choice([-0.5]), #should we comment this out? not sure what's happening
                #max_rates=nengo.dists.Choice([100]),
                encoders=encoders)
            #recurrent connection (time constant 500 ms)
            # strength = 1 - (100/500) = .8

            zeros = np.zeros_like(X_train)
            nengo.Connection(
                model.vision_gabor,
                model.vision_gabor,
                synapse=0.005,  #.1
                eval_points=np.vstack(
                    [X_train, zeros,
                     np.random.randn(*X_train.shape)]),
                transform=.5)

            model.visual_representation = nengo.Ensemble(n_hid,
                                                         dimensions=Dmid)

            model.visconn = nengo.Connection(
                model.vision_gabor,
                model.visual_representation,
                synapse=0.005,  #was .005
                eval_points=X_train,
                function=train_targets,
                solver=nengo.solvers.LstsqL2(reg=0.01))
            nengo.Connection(model.attended_item,
                             model.vision_gabor,
                             synapse=.02)  #.03) #synapse?

            # display attended item, only in gui
            if nengo_gui_on:
                # show what's being looked at
                model.display_attended = nengo.Node(
                    display_func,
                    size_in=model.attended_item.size_out)  # to show input
                nengo.Connection(model.attended_item,
                                 model.display_attended,
                                 synapse=None)
                #add node to plot total visual activity
                model.visual_activation = nengo.Node(None, size_in=1)
                nengo.Connection(model.vision_gabor.neurons,
                                 model.visual_activation,
                                 transform=np.ones((1, n_hid)),
                                 synapse=None)

        ### central cognition ###

        # concepts
        model.concepts = spa.AssociativeMemory(
            vocab_all_words,  #vocab_concepts,
            wta_output=True,
            wta_inhibit_scale=1,  #was 1
            #default_output_key='NONE', #what to say if input doesn't match
            threshold=0.3
        )  # how strong does input need to be for it to recognize
        nengo.Connection(
            model.visual_representation,
            model.concepts.input,
            transform=.8 * vision_mapping
        )  #not too fast to concepts, might have to be increased to have model react faster to first word.

        #concepts accumulator
        model.concepts_evidence = spa.State(
            1, feedback=1, feedback_synapse=0.005
        )  #the lower the synapse, the faster it accumulates (was .1)
        concepts_evidence_scale = 2.5
        nengo.Connection(model.concepts.am.elem_output,
                         model.concepts_evidence.input,
                         transform=concepts_evidence_scale * np.ones(
                             (1, model.concepts.am.elem_output.size_out)),
                         synapse=0.005)

        #concepts switch
        model.do_concepts = spa.AssociativeMemory(vocab_reset,
                                                  default_output_key='CLEAR',
                                                  threshold=.2)
        nengo.Connection(
            model.do_concepts.am.ensembles[-1],
            model.concepts_evidence.all_ensembles[0].neurons,
            transform=np.ones(
                (model.concepts_evidence.all_ensembles[0].n_neurons, 1)) * -10,
            synapse=0.005)

        # pair representation
        model.vis_pair = spa.State(
            D, vocab=vocab_all_words, feedback=1.0, feedback_synapse=.05
        )  #was 2, 1.6 works ok, but everything gets activated.

        #learned words
        model.dm_learned_words = spa.AssociativeMemory(
            vocab_learned_words, threshold=.2
        )  #default_output_key='NONE' familiarity should be continuous over all items, so no wta
        nengo.Connection(model.dm_learned_words.output,
                         model.dm_learned_words.input,
                         transform=.4,
                         synapse=.02)

        # this stores the accumulated evidence for or against familiarity
        model.familiarity = spa.State(
            1, feedback=.9,
            feedback_synapse=0.1)  #fb syn influences speed of acc
        familiarity_scale = 0.2  #keep stable for negative fam
        #nengo.Connection(model.dm_learned_words.am.ensembles[-1], model.familiarity.input, transform=-(familiarity_scale+0.8)) #accumulate to -1
        nengo.Connection(
            model.dm_learned_words.am.elem_output,
            model.familiarity.input,  #am.element_output == all outputs, we sum
            transform=(familiarity_scale + .1) * np.ones(
                (1, model.dm_learned_words.am.elem_output.size_out))
        )  #accumulate to 1

        model.do_fam = spa.AssociativeMemory(vocab_reset,
                                             default_output_key='CLEAR',
                                             threshold=.2)
        nengo.Connection(
            model.do_fam.am.ensembles[-1],
            model.familiarity.all_ensembles[0].neurons,
            transform=np.ones(
                (model.familiarity.all_ensembles[0].n_neurons, 1)) * -10,
            synapse=0.005)
        #negative accumulator
        nengo.Connection(
            model.do_fam.am.elem_output,
            model.familiarity.input,
            transform=-familiarity_scale * np.ones(
                (1, model.do_fam.am.elem_output.size_out)))  #accumulate to -1

        #fam model.dm_pairs = spa.AssociativeMemory(vocab_learned_pairs, input_keys=list_of_pairs,wta_output=True)
        #fam nengo.Connection(model.dm_pairs.output,model.dm_pairs.input,transform=.5)

        #this works:
        #fam model.representation = spa.AssociativeMemory(vocab_learned_pairs, input_keys=list_of_pairs, wta_output=True)
        #fam nengo.Connection(model.representation.output, model.representation.input, transform=2)
        #fam model.rep_filled = spa.State(1,feedback_synapse=.005) #no fb syn specified
        #fam nengo.Connection(model.representation.am.elem_output,model.rep_filled.input, #am.element_output == all outputs, we sum
        #fam                  transform=.8*np.ones((1,model.representation.am.elem_output.size_out)),synapse=0)

        #this doesn't:
        #model.representation = spa.State(D,feedback=1)
        #model.rep_filled = spa.State(1,feedback_synapse=.005) #no fb syn specified
        #nengo.Connection(model.representation.output,model.rep_filled.input, #am.element_output == all outputs, we sum
        #                 transform=.8*np.ones((1,model.representation.output.size_out)),synapse=0)

        # this shouldn't really be fixed I think
        #fam model.comparison = spa.Compare(D, vocab=vocab_concepts)

        #motor
        model.motor_net = nengo.Network()
        with model.motor_net:

            #input multiplier
            model.motor_input = spa.State(Dmid, vocab=vocab_motor)

            #higher motor area (SMA?)
            model.motor = spa.State(Dmid, vocab=vocab_motor, feedback=.7)

            #connect input multiplier with higher motor area
            nengo.Connection(model.motor_input.output,
                             model.motor.input,
                             synapse=.1,
                             transform=2)

            #finger area
            model.fingers = spa.AssociativeMemory(
                vocab_fingers,
                input_keys=['L1', 'L2', 'R1', 'R2'],
                wta_output=True)
            nengo.Connection(model.fingers.output,
                             model.fingers.input,
                             synapse=0.1,
                             transform=0.3)  #feedback

            #conncetion between higher order area (hand, finger), to lower area
            nengo.Connection(model.motor.output,
                             model.fingers.input,
                             transform=.25 * motor_mapping)  #was .2

            #finger position (spinal?)
            model.finger_pos = nengo.networks.EnsembleArray(n_neurons=50,
                                                            n_ensembles=4)
            nengo.Connection(model.finger_pos.output,
                             model.finger_pos.input,
                             synapse=0.1,
                             transform=0.8)  #feedback

            #connection between finger area and finger position
            nengo.Connection(model.fingers.am.elem_output,
                             model.finger_pos.input,
                             transform=1.0 *
                             np.diag([0.55, .54, .56, .55]))  #fix these

        model.bg = spa.BasalGanglia(
            spa.Actions(
                #wait & start
                a_aa_wait='dot(goal,WAIT) - .9 --> goal=0',
                a_attend_item1=
                'dot(goal,DO_TASK) - .1 --> goal=RECOG, attend=ITEM1, do_concepts=GO',

                #attend words
                b_attending_item1=
                'dot(goal,RECOG) + dot(attend,ITEM1) - concepts_evidence - .3 --> goal=RECOG, attend=ITEM1, do_concepts=GO',  # vis_pair=2.5*(ITEM1*concepts)',
                c_attend_item2=
                'dot(goal,RECOG) + dot(attend,ITEM1) + concepts_evidence - 1.6 --> goal=RECOG2, attend=ITEM2, vis_pair=3*(ITEM1*concepts)',
                d_attending_item2=
                'dot(goal,RECOG2+RECOG) + dot(attend,ITEM2) - concepts_evidence - .4 --> goal=RECOG2, attend=ITEM2, do_concepts=GO, dm_learned_words=1.0*(~ITEM1*vis_pair)',  #vis_pair=1.2*(ITEM2*concepts)
                e_judge_familiarity=
                'dot(goal,RECOG2) + dot(attend,ITEM2) + concepts_evidence - 1.8 --> goal=FAMILIARITY, do_fam=GO, vis_pair=1.9*(ITEM2*concepts), dm_learned_words=2.0*(~ITEM1*vis_pair+~ITEM2*vis_pair)',

                #judge familiarity
                f_judge_familiarity=
                'dot(goal,FAMILIARITY) - .1 --> goal=FAMILIARITY, do_fam=GO, dm_learned_words=.8*(~ITEM1*vis_pair+~ITEM2*vis_pair)',
                g_respond_unfamiliar=
                'dot(goal,FAMILIARITY) - familiarity - .5*dot(fingers,L1+L2+R1+R2) - .6 --> goal=RESPOND, do_fam=GO, motor_input=1.6*(target_hand+MIDDLE)',
                h_respond_familiar=
                'dot(goal,FAMILIARITY) + familiarity - .5*dot(fingers,L1+L2+R1+R2) - .6 --> goal=RESPOND, do_fam=GO, motor_input=1.6*(target_hand+INDEX)',
                x_response_done=
                '1.1*dot(goal,RESPOND) + 1.5*dot(fingers,L1+L2+R1+R2) - .7--> goal=2*END',
                y_end='dot(goal,END)-.1 --> goal=END',
                z_threshold='.05 --> goal=0'

                #possible to match complete buffer, ie is representation filled?
                # motor_input=1.5*target_hand+MIDDLE,
            ))

        #'dot(attention, W1) - evidence - 0.8 --> motor=NO, attention=W1',
        #'dot(attention, W1) + evidence - 0.8 --> attention=W2, reset=EVIDENCE',
        #'dot(attention, W1) --> attention=W1',  # if we don't set attention it goes back to 0
        #'dot(attention, W2) - evidence - 0.8 --> motor=NO, attention=W2',
        #'dot(attention, W2) + evidence - 0.8 --> motor=YES, attention=W2',
        #'dot(attention, W2) --> attention=W2',  # option might be feedback on attention, then no rule 3/6 but default rule

        model.thalamus = spa.Thalamus(model.bg)

        model.cortical = spa.Cortical(  # cortical connection: shorthand for doing everything with states and connections
            spa.Actions(
                #  'motor_input = .04*target_hand',
                # 'dm_learned_words = .1*vis_pair',
                #'dm_pairs = 2*stimulus'
                #'vis_pair = 2*attend*concepts+concepts',
                #fam 'comparison_A = 2*vis_pair',
                #fam 'comparison_B = 2*representation*~attend',
            ))

        #probes
        model.pr_motor_pos = nengo.Probe(
            model.finger_pos.output,
            synapse=.01)  #raw vector (dimensions x time)
        model.pr_motor = nengo.Probe(model.fingers.output, synapse=.01)
        #model.pr_motor1 = nengo.Probe(model.motor.output, synapse=.01)

        if not nengo_gui_on:
            model.pr_vision_gabor = nengo.Probe(
                model.vision_gabor.neurons, synapse=.005
            )  #do we need synapse, or should we do something with the spikes
            model.pr_familiarity = nengo.Probe(
                model.dm_learned_words.am.elem_output,
                synapse=.01)  #element output, don't include default
            model.pr_concepts = nengo.Probe(
                model.concepts.am.elem_output,
                synapse=.01)  # element output, don't include default

        #multiply spikes with the connection weights

        #input
        model.input = spa.Input(goal=goal_func)

        #print(sum(ens.n_neurons for ens in model.all_ensembles))

        #return model

        #to show select BG rules
        # get names rules
        if nengo_gui_on and False:
            vocab_actions = spa.Vocabulary(model.bg.output.size_out)
            for i, action in enumerate(model.bg.actions.actions):
                vocab_actions.add(action.name.upper(),
                                  np.eye(model.bg.output.size_out)[i])
            model.actions = spa.State(model.bg.output.size_out,
                                      vocab=vocab_actions)
            nengo.Connection(model.thalamus.output, model.actions.input)

            for net in model.networks:
                if net.label is not None and net.label.startswith('channel'):
                    net.label = ''