Пример #1
0
    def test_add_objects(self):
        network = Network(dt=1.0, learning=False)

        inpt = Input(100)
        network.add_layer(inpt, name="X")
        lif = LIFNodes(50)
        network.add_layer(lif, name="Y")

        assert inpt == network.layers["X"]
        assert lif == network.layers["Y"]

        conn = Connection(inpt, lif)
        network.add_connection(conn, source="X", target="Y")

        assert conn == network.connections[("X", "Y")]

        monitor = Monitor(lif, state_vars=["s", "v"])
        network.add_monitor(monitor, "Y")

        assert monitor == network.monitors["Y"]

        network.save("net.pt")
        _network = load("net.pt", learning=True)
        assert _network.learning
        assert "X" in _network.layers
        assert "Y" in _network.layers
        assert ("X", "Y") in _network.connections
        assert "Y" in _network.monitors
        del _network

        os.remove("net.pt")
Пример #2
0
    def test_add_objects(self):
        network = Network(dt=1.0, learning=False)

        inpt = Input(100)
        network.add_layer(inpt, name='X')
        lif = LIFNodes(50)
        network.add_layer(lif, name='Y')

        assert inpt == network.layers['X']
        assert lif == network.layers['Y']

        conn = Connection(inpt, lif)
        network.add_connection(conn, source='X', target='Y')

        assert conn == network.connections[('X', 'Y')]

        monitor = Monitor(lif, state_vars=['s', 'v'])
        network.add_monitor(monitor, 'Y')

        assert monitor == network.monitors['Y']

        network.save('net.pt')
        _network = load('net.pt', learning=True)
        assert _network.learning
        assert 'X' in _network.layers
        assert 'Y' in _network.layers
        assert ('X', 'Y') in _network.connections
        assert 'Y' in _network.monitors
        del _network

        os.remove('net.pt')
Пример #3
0
	def test_add_objects(self):
		network = Network(dt=1.0)
		
		inpt = Input(100); network.add_layer(inpt, name='X')
		lif = LIFNodes(50); network.add_layer(lif, name='Y')
		
		assert inpt == network.layers['X']
		assert lif == network.layers['Y']
		
		conn = Connection(inpt, lif); network.add_connection(conn, source='X', target='Y')
		
		assert conn == network.connections[('X', 'Y')]
		
		monitor = Monitor(lif, state_vars=['s', 'v']); network.add_monitor(monitor, 'Y')
		
		assert monitor == network.monitors['Y']
Пример #4
0
def main(n_input=1, n_output=10, time=1000):
    # Network building.
    network = Network(dt=1.0)
    input_layer = RealInput(n=n_input)
    output_layer = LIFNodes(n=n_output)
    connection = Connection(source=input_layer, target=output_layer)
    monitor = Monitor(obj=output_layer, state_vars=('v', ), time=time)

    # Adding network components.
    network.add_layer(input_layer, name='X')
    network.add_layer(output_layer, name='Y')
    network.add_connection(connection, source='X', target='Y')
    network.add_monitor(monitor, name='X_monitor')

    # Creating real-valued inputs and running simulation.
    inpts = {'X': torch.ones(time, n_input)}
    network.run(inpts=inpts, time=time)

    # Plot voltage activity.
    plt.plot(monitor.get('v').numpy().T)
    plt.show()
Пример #5
0
    def __init__(self, parameters: BenchmarkParameters):
        super(BindsNetModule, self).__init__()
        network = Network(batch_size=parameters.batch_size, dt=parameters.dt)
        lif_nodes = LIFNodes(n=parameters.features)
        monitor = Monitor(obj=lif_nodes,
                          state_vars=("s"),
                          time=parameters.sequence_length)
        network.add_layer(Input(n=parameters.features), name="Input")
        network.add_layer(lif_nodes, name="Neurons")
        network.add_connection(
            Connection(source=network.layers["Input"],
                       target=network.layers["Neurons"]),
            source="Input",
            target="Neurons",
        )
        network.add_monitor(monitor, "Monitor")
        network.to(parameters.device)

        self.parameters = parameters
        self.network = network
        self.monitor = monitor
Пример #6
0
           n_filters * conv_size2[0] * conv_size2[1])
recurrent_conn2 = Connection(conv_layer2, conv_layer2, w=w)

network.add_layer(input_layer, name='X')
network.add_layer(conv_layer, name='Y')
network.add_layer(conv_layer_, name='Y_')
network.add_layer(conv_layer2, name='Z')
network.add_connection(conv_conn, source='X', target='Y')
network.add_connection(conv_conn_, source='X', target='Y_')
network.add_connection(recurrent_conn, source='Y', target='Y')
network.add_connection(conv_conn2, source='Y', target='Z')
network.add_connection(recurrent_conn2, source='Z', target='Z')

# Voltage recording for excitatory and inhibitory layers.
voltage_monitor = Monitor(network.layers['Y'], ['v'], time=time)
network.add_monitor(voltage_monitor, name='output_voltage')

# Load MNIST data.
dataset = MNIST(path=os.path.join('..', '..', 'data', 'MNIST'), download=True)

if train:
    images, labels = dataset.get_train()
else:
    images, labels = dataset.get_test()

images *= intensity

# Record spikes during the simulation.
spike_record = torch.zeros(update_interval, time, n_neurons)

# Neuron assignments and spike proportions.
Пример #7
0
network.add_connection(connection=recurrent_connection, source="B", target="B")

# Create and add input and output layer monitors.
source_monitor = Monitor(
    obj=source_layer,
    state_vars=("s", ),  # Record spikes and voltages.
    time=time,  # Length of simulation (if known ahead of time).
)
target_monitor = Monitor(
    obj=target_layer,
    state_vars=("s", "v"),  # Record spikes and voltages.
    time=time,  # Length of simulation (if known ahead of time).
)

network.add_monitor(monitor=source_monitor, name="A")
network.add_monitor(monitor=target_monitor, name="B")

# Create input spike data,
# where each spike is distributed according to Bernoulli(0.1).
input_data = torch.ones(time, source_layer.n).byte()
inputs = {"A": input_data}
spikes = {}
voltages = {}
# Simulate network on input data.
# network.run(inputs=inputs, time=time)
for i in range(epoch):
    print(network.connections[("A", "B")].w)
    network.run(inputs=inputs, time=time, input_time_dim=1)
    # Retrieve and plot simulation spike, voltage data from monitors.
    # voltages = {"B": target_monitor.get("v")}
output_layer = IFNodes(n=10, sum_input=True)
bias = RealInput(n=1, sum_input=True)
network.add_layer(input_layer, name='X')
network.add_layer(output_layer, name='Y')
network.add_layer(bias, name='Y_b')

input_connection = Connection(source=input_layer, target=output_layer, norm=150, wmin=-1, wmax=1)
bias_connection = Connection(source=bias, target=output_layer)
network.add_connection(input_connection, source='X', target='Y')
network.add_connection(bias_connection, source='Y_b', target='Y')

# State variable monitoring.
time = 25
for l in network.layers:
    m = Monitor(network.layers[l], state_vars=['s'], time=time)
    network.add_monitor(m, name=l)

# Load Fashion-MNIST data.
images, labels = FashionMNIST(path='../../data/FashionMNIST', download=True).get_train()

# Run training.
grads = {}
lr, lr_decay = 1e-2, 0.95
criterion = torch.nn.CrossEntropyLoss()
spike_ims, spike_axes, weights_im = None, None, None
for i, (image, label) in enumerate(zip(images.view(-1, 784) / 255, labels)):
    label = torch.Tensor([label]).long()

    # Run simulation for single datum.
    inpts = {'X': image.repeat(time, 1), 'Y_b': torch.ones(time, 1)}
    network.run(inpts=inpts, time=time)
def main(seed=0,
         n_neurons=100,
         n_train=60000,
         n_test=10000,
         inhib=250,
         time=50,
         lr=1e-2,
         lr_decay=0.99,
         dt=1,
         theta_plus=0.05,
         theta_decay=1e-7,
         progress_interval=10,
         update_interval=250,
         train=True,
         plot=False,
         gpu=False):

    assert n_train % update_interval == 0 and n_test % update_interval == 0, \
                            'No. examples must be divisible by update_interval'

    params = [
        seed, n_neurons, n_train, inhib, time, lr, lr_decay, theta_plus,
        theta_decay, progress_interval, update_interval
    ]

    test_params = [
        seed, n_neurons, n_train, n_test, inhib, time, lr, lr_decay,
        theta_plus, theta_decay, progress_interval, update_interval
    ]

    model_name = '_'.join([str(x) for x in params])

    np.random.seed(seed)

    if gpu:
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        torch.cuda.manual_seed_all(seed)
    else:
        torch.manual_seed(seed)

    if train:
        n_examples = n_train
    else:
        n_examples = n_test

    n_sqrt = int(np.ceil(np.sqrt(n_neurons)))
    n_classes = 10

    # Build network.
    if train:
        network = Network(dt=dt)

        input_layer = Input(n=784, traces=True, trace_tc=5e-2)
        network.add_layer(input_layer, name='X')

        output_layer = DiehlAndCookNodes(n=n_neurons,
                                         traces=True,
                                         rest=0,
                                         reset=0,
                                         thresh=1,
                                         refrac=0,
                                         decay=1e-2,
                                         trace_tc=5e-2,
                                         theta_plus=theta_plus,
                                         theta_decay=theta_decay)
        network.add_layer(output_layer, name='Y')

        w = 0.3 * torch.rand(784, n_neurons)
        input_connection = Connection(source=network.layers['X'],
                                      target=network.layers['Y'],
                                      w=w,
                                      update_rule=PostPre,
                                      nu=[0, lr],
                                      wmin=0,
                                      wmax=1,
                                      norm=78.4)
        network.add_connection(input_connection, source='X', target='Y')

        w = -inhib * (torch.ones(n_neurons, n_neurons) -
                      torch.diag(torch.ones(n_neurons)))
        recurrent_connection = Connection(source=network.layers['Y'],
                                          target=network.layers['Y'],
                                          w=w,
                                          wmin=-inhib,
                                          wmax=0)
        network.add_connection(recurrent_connection, source='Y', target='Y')

    else:
        path = os.path.join('..', '..', 'params', data, model)
        network = load_network(os.path.join(path, model_name + '.pt'))
        network.connections['X', 'Y'].update_rule = NoOp(
            connection=network.connections['X', 'Y'],
            nu=network.connections['X', 'Y'].nu)
        network.layers['Y'].theta_decay = 0
        network.layers['Y'].theta_plus = 0

    # Load Fashion-MNIST data.
    dataset = FashionMNIST(path=os.path.join('..', '..', 'data',
                                             'FashionMNIST'),
                           download=True)

    if train:
        images, labels = dataset.get_train()
    else:
        images, labels = dataset.get_test()

    images = images.view(-1, 784)
    images = images / 255

    # if train:
    #     for i in range(n_neurons):
    #         network.connections['X', 'Y'].w[:, i] = images[i] + images[i].mean() * torch.randn(784)

    # Record spikes during the simulation.
    spike_record = torch.zeros(update_interval, time, n_neurons)

    # Neuron assignments and spike proportions.
    if train:
        assignments = -torch.ones_like(torch.Tensor(n_neurons))
        proportions = torch.zeros_like(torch.Tensor(n_neurons, n_classes))
        rates = torch.zeros_like(torch.Tensor(n_neurons, n_classes))
        ngram_scores = {}
    else:
        path = os.path.join('..', '..', 'params', data, model)
        path = os.path.join(path, '_'.join(['auxiliary', model_name]) + '.pt')
        assignments, proportions, rates, ngram_scores = torch.load(
            open(path, 'rb'))

    # Sequence of accuracy estimates.
    curves = {'all': [], 'proportion': [], 'ngram': []}

    if train:
        best_accuracy = 0

    spikes = {}

    for layer in set(network.layers):
        spikes[layer] = Monitor(network.layers[layer],
                                state_vars=['s'],
                                time=time)
        network.add_monitor(spikes[layer], name='%s_spikes' % layer)

    # Train the network.
    if train:
        print('\nBegin training.\n')
    else:
        print('\nBegin test.\n')

    inpt_axes = None
    inpt_ims = None
    spike_ims = None
    spike_axes = None
    weights_im = None
    assigns_im = None
    perf_ax = None

    start = t()
    for i in range(n_examples):
        if i % progress_interval == 0 and train:
            network.connections['X', 'Y'].update_rule.nu[1] *= lr_decay

        if i % progress_interval == 0:
            print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)')
            start = t()

        if i % update_interval == 0 and i > 0:
            if i % len(labels) == 0:
                current_labels = labels[-update_interval:]
            else:
                current_labels = labels[i % len(images) - update_interval:i %
                                        len(images)]

            # Update and print accuracy evaluations.
            curves, predictions = update_curves(curves,
                                                current_labels,
                                                n_classes,
                                                spike_record=spike_record,
                                                assignments=assignments,
                                                proportions=proportions,
                                                ngram_scores=ngram_scores,
                                                n=2)
            print_results(curves)

            if train:
                if any([x[-1] > best_accuracy for x in curves.values()]):
                    print(
                        'New best accuracy! Saving network parameters to disk.'
                    )

                    # Save network to disk.
                    path = os.path.join('..', '..', 'params', data, model)
                    if not os.path.isdir(path):
                        os.makedirs(path)

                    network.save(os.path.join(path, model_name + '.pt'))
                    path = os.path.join(
                        path, '_'.join(['auxiliary', model_name]) + '.pt')
                    torch.save((assignments, proportions, rates, ngram_scores),
                               open(path, 'wb'))

                    best_accuracy = max([x[-1] for x in curves.values()])

                # Assign labels to excitatory layer neurons.
                assignments, proportions, rates = assign_labels(
                    spike_record, current_labels, n_classes, rates)

                # Compute ngram scores.
                ngram_scores = update_ngram_scores(spike_record,
                                                   current_labels, n_classes,
                                                   2, ngram_scores)

            print()

        # Get next input sample.
        image = images[i % n_examples]
        sample = rank_order(datum=image, time=time, dt=dt)
        inpts = {'X': sample}

        # Run the network on the input.
        network.run(inpts=inpts, time=time)

        retries = 0
        while spikes['Y'].get('s').sum() < 5 and retries < 3:
            retries += 1
            image *= 2
            sample = rank_order(datum=image, time=time, dt=dt)
            inpts = {'X': sample}
            network.run(inpts=inpts, time=time)

        # Add to spikes recording.
        spike_record[i % update_interval] = spikes['Y'].get('s').t()

        # Optionally plot various simulation information.
        if plot:
            _input = images[i % n_examples].view(28, 28)
            reconstruction = inpts['X'].view(time, 784).sum(0).view(28, 28)
            _spikes = {layer: spikes[layer].get('s') for layer in spikes}
            input_exc_weights = network.connections['X', 'Y'].w
            square_weights = get_square_weights(
                input_exc_weights.view(784, n_neurons), n_sqrt, 28)
            square_assignments = get_square_assignments(assignments, n_sqrt)

            # inpt_axes, inpt_ims = plot_input(_input, reconstruction, label=labels[i], axes=inpt_axes, ims=inpt_ims)
            spike_ims, spike_axes = plot_spikes(_spikes,
                                                ims=spike_ims,
                                                axes=spike_axes)
            weights_im = plot_weights(square_weights, im=weights_im, wmax=0.25)
            # assigns_im = plot_assignments(square_assignments, im=assigns_im)
            # perf_ax = plot_performance(curves, ax=perf_ax)

            plt.pause(1e-8)

        network.reset_()  # Reset state variables.

    print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)')

    i += 1

    if i % len(labels) == 0:
        current_labels = labels[-update_interval:]
    else:
        current_labels = labels[i % len(images) - update_interval:i %
                                len(images)]

    # Update and print accuracy evaluations.
    curves, predictions = update_curves(curves,
                                        current_labels,
                                        n_classes,
                                        spike_record=spike_record,
                                        assignments=assignments,
                                        proportions=proportions,
                                        ngram_scores=ngram_scores,
                                        n=2)
    print_results(curves)

    if train:
        if any([x[-1] > best_accuracy for x in curves.values()]):
            print('New best accuracy! Saving network parameters to disk.')

            # Save network to disk.
            if train:
                path = os.path.join('..', '..', 'params', data, model)
                if not os.path.isdir(path):
                    os.makedirs(path)

                network.save(os.path.join(path, model_name + '.pt'))
                path = os.path.join(
                    path, '_'.join(['auxiliary', model_name]) + '.pt')
                torch.save((assignments, proportions, rates, ngram_scores),
                           open(path, 'wb'))

    if train:
        print('\nTraining complete.\n')
    else:
        print('\nTest complete.\n')

    print('Average accuracies:\n')
    for scheme in curves.keys():
        print('\t%s: %.2f' % (scheme, float(np.mean(curves[scheme]))))

    # Save accuracy curves to disk.
    path = os.path.join('..', '..', 'curves', data, model)
    if not os.path.isdir(path):
        os.makedirs(path)

    if train:
        to_write = ['train'] + params
    else:
        to_write = ['test'] + params

    to_write = [str(x) for x in to_write]
    f = '_'.join(to_write) + '.pt'

    torch.save((curves, update_interval, n_examples),
               open(os.path.join(path, f), 'wb'))

    # Save results to disk.
    path = os.path.join('..', '..', 'results', data, model)
    if not os.path.isdir(path):
        os.makedirs(path)

    results = [
        np.mean(curves['all']),
        np.mean(curves['proportion']),
        np.mean(curves['ngram']),
        np.max(curves['all']),
        np.max(curves['proportion']),
        np.max(curves['ngram'])
    ]

    if train:
        to_write = params + results
    else:
        to_write = test_params + results

    to_write = [str(x) for x in to_write]

    if train:
        name = 'train.csv'
    else:
        name = 'test.csv'

    if not os.path.isfile(os.path.join(path, name)):
        with open(os.path.join(path, name), 'w') as f:
            if train:
                f.write(
                    'random_seed,n_neurons,n_train,inhib,time,lr,lr_decay,theta_plus,theta_decay,'
                    'progress_interval,update_interval,mean_all_activity,mean_proportion_weighting,'
                    'mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n'
                )
            else:
                f.write(
                    'random_seed,n_neurons,n_train,n_test,inhib,time,lr,lr_decay,theta_plus,theta_decay,'
                    'progress_interval,update_interval,mean_all_activity,mean_proportion_weighting,'
                    'mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n'
                )

    with open(os.path.join(path, name), 'a') as f:
        f.write(','.join(to_write) + '\n')
Пример #10
0
def main(args):
    # Random seeding.
    torch.manual_seed(args.seed)

    # Device.
    device = torch.device("cuda" if args.gpu else "cpu")

    # No. workers.
    if args.n_workers == -1:
        args.n_workers = args.gpu * 4 * torch.cuda.device_count()

    # Build network.
    network = Network(batch_size=args.batch_size)
    network.add_layer(Input(shape=(1, 28, 28), traces=True), name="I")
    network.add_layer(LIFNodes(n=10,
                               traces=True,
                               rest=0,
                               reset=0,
                               thresh=1,
                               refrac=0),
                      name="O")
    network.add_connection(
        Connection(
            source=network.layers["I"],
            target=network.layers["O"],
            nu=(0.0, 0.01),
            update_rule=Hebbian,
            wmin=0.0,
            wmax=1.0,
            norm=100.0,
            reduction=torch.sum,
        ),
        source="I",
        target="O",
    )

    if args.plot:
        for l in network.layers:
            network.add_monitor(Monitor(network.layers[l],
                                        state_vars=("s", ),
                                        time=args.time),
                                name=l)

    network.to(device)

    # Load dataset.
    dataset = MNIST(
        image_encoder=PoissonEncoder(time=args.time, dt=1.0),
        label_encoder=None,
        root=os.path.join(ROOT_DIR, "data", "MNIST"),
        download=True,
        train=False,
        transform=transforms.Compose(
            [transforms.ToTensor(),
             transforms.Lambda(lambda x: x * 250)]),
    )

    # Create a dataloader to iterate and batch data
    dataloader = DataLoader(
        dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.n_workers,
        pin_memory=args.gpu,
    )

    spike_ims = None
    spike_axes = None
    weights_im = None

    t0 = time()
    for step, batch in enumerate(tqdm(dataloader)):
        # Prep next input batch.
        inputs = batch["encoded_image"]

        inpts = {"I": inputs}
        if args.gpu:
            inpts = {k: v.cuda() for k, v in inpts.items()}

        clamp = torch.nn.functional.one_hot(batch["label"],
                                            num_classes=10).byte()
        unclamp = ~clamp
        clamp = {"O": clamp}
        unclamp = {"O": unclamp}

        # Run the network on the input.
        network.run(
            inpts=inpts,
            time=args.time,
            one_step=args.one_step,
            clamp=clamp,
            unclamp=unclamp,
        )

        if args.plot:
            # Plot output spikes.
            spikes = {
                l: network.monitors[l].get("s")[:, 0]
                for l in network.monitors
            }
            spike_ims, spike_axes = plot_spikes(spikes=spikes,
                                                ims=spike_ims,
                                                axes=spike_axes)

            # Plot connection weights.
            weights = network.connections["I", "O"].w
            weights = get_square_weights(weights, n_sqrt=4, side=28)
            weights_im = plot_weights(weights,
                                      wmax=network.connections["I", "O"].wmax,
                                      im=weights_im)

            plt.pause(1e-2)

        # Reset state variables.
        network.reset_()

    network.learning = False

    for step, batch in enumerate(tqdm(dataloader)):
        # Prep next input batch.
        inputs = batch["encoded_image"]

        inpts = {"I": inputs}
        if args.gpu:
            inpts = {k: v.cuda() for k, v in inpts.items()}

        # Run the network on the input.
        network.run(inpts=inpts, time=args.time, one_step=args.one_step)

        if args.plot:
            # Plot output spikes.
            spikes = {
                l: network.monitors[l].get("s")[:, 0]
                for l in network.monitors
            }
            spike_ims, spike_axes = plot_spikes(spikes=spikes,
                                                ims=spike_ims,
                                                axes=spike_axes)

            # Plot connection weights.
            weights = network.connections["I", "O"].w
            weights = get_square_weights(weights, n_sqrt=4, side=28)
            weights_im = plot_weights(weights,
                                      wmax=network.connections["I", "O"].wmax,
                                      im=weights_im)

            plt.pause(1e-2)

    t1 = time() - t0

    print(f"Time: {t1}")
Пример #11
0
def main(seed=0,
         n_train=60000,
         n_test=10000,
         kernel_size=(16, ),
         stride=(4, ),
         n_filters=25,
         padding=0,
         inhib=100,
         time=25,
         lr=1e-3,
         lr_decay=0.99,
         dt=1,
         intensity=1,
         progress_interval=10,
         update_interval=250,
         plot=False,
         train=True,
         gpu=False):

    assert n_train % update_interval == 0 and n_test % update_interval == 0, \
        'No. examples must be divisible by update_interval'

    params = [
        seed, n_train, kernel_size, stride, n_filters, padding, inhib, time,
        lr, lr_decay, dt, intensity, update_interval
    ]

    model_name = '_'.join([str(x) for x in params])

    if not train:
        test_params = [
            seed, n_train, n_test, kernel_size, stride, n_filters, padding,
            inhib, time, lr, lr_decay, dt, intensity, update_interval
        ]

    np.random.seed(seed)

    if gpu:
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        torch.cuda.manual_seed_all(seed)
    else:
        torch.manual_seed(seed)

    n_examples = n_train if train else n_test
    input_shape = [20, 20]

    if kernel_size == input_shape:
        conv_size = [1, 1]
    else:
        conv_size = (int((input_shape[0] - kernel_size[0]) / stride[0]) + 1,
                     int((input_shape[1] - kernel_size[1]) / stride[1]) + 1)

    n_classes = 10
    n_neurons = n_filters * np.prod(conv_size)
    total_kernel_size = int(np.prod(kernel_size))
    total_conv_size = int(np.prod(conv_size))

    # Build network.
    if train:
        network = Network()
        input_layer = Input(n=400, shape=(1, 1, 20, 20), traces=True)
        conv_layer = DiehlAndCookNodes(n=n_filters * total_conv_size,
                                       shape=(1, n_filters, *conv_size),
                                       thresh=-64.0,
                                       traces=True,
                                       theta_plus=0.05 * (kernel_size[0] / 20),
                                       refrac=0)
        conv_layer2 = LIFNodes(n=n_filters * total_conv_size,
                               shape=(1, n_filters, *conv_size),
                               refrac=0)
        conv_conn = Conv2dConnection(input_layer,
                                     conv_layer,
                                     kernel_size=kernel_size,
                                     stride=stride,
                                     update_rule=WeightDependentPostPre,
                                     norm=0.05 * total_kernel_size,
                                     nu=[0, lr],
                                     wmin=0,
                                     wmax=0.25)
        conv_conn2 = Conv2dConnection(input_layer,
                                      conv_layer2,
                                      w=conv_conn.w,
                                      kernel_size=kernel_size,
                                      stride=stride,
                                      update_rule=None,
                                      wmax=0.25)

        w = -inhib * torch.ones(n_filters, conv_size[0], conv_size[1],
                                n_filters, conv_size[0], conv_size[1])
        for f in range(n_filters):
            for f2 in range(n_filters):
                if f != f2:
                    w[f, :, :f2, :, :] = 0

        w = w.view(n_filters * conv_size[0] * conv_size[1],
                   n_filters * conv_size[0] * conv_size[1])
        recurrent_conn = Connection(conv_layer, conv_layer, w=w)

        network.add_layer(input_layer, name='X')
        network.add_layer(conv_layer, name='Y')
        network.add_layer(conv_layer2, name='Y_')
        network.add_connection(conv_conn, source='X', target='Y')
        network.add_connection(conv_conn2, source='X', target='Y_')
        network.add_connection(recurrent_conn, source='Y', target='Y')

        # Voltage recording for excitatory and inhibitory layers.
        voltage_monitor = Monitor(network.layers['Y'], ['v'], time=time)
        network.add_monitor(voltage_monitor, name='output_voltage')
    else:
        network = load_network(os.path.join(params_path, model_name + '.pt'))
        network.connections['X', 'Y'].update_rule = NoOp(
            connection=network.connections['X', 'Y'],
            nu=network.connections['X', 'Y'].nu)
        network.layers['Y'].theta_decay = 0
        network.layers['Y'].theta_plus = 0

    # Load MNIST data.
    dataset = MNIST(data_path, download=True)

    if train:
        images, labels = dataset.get_train()
    else:
        images, labels = dataset.get_test()

    images *= intensity
    images = images[:, 4:-4, 4:-4].contiguous()

    # Record spikes during the simulation.
    spike_record = torch.zeros(update_interval, time, n_neurons)
    full_spike_record = torch.zeros(n_examples, n_neurons)

    # Neuron assignments and spike proportions.
    if train:
        logreg_model = LogisticRegression(warm_start=True,
                                          n_jobs=-1,
                                          solver='lbfgs',
                                          max_iter=1000,
                                          multi_class='multinomial')
        logreg_model.coef_ = np.zeros([n_classes, n_neurons])
        logreg_model.intercept_ = np.zeros(n_classes)
        logreg_model.classes_ = np.arange(n_classes)
    else:
        path = os.path.join(params_path,
                            '_'.join(['auxiliary', model_name]) + '.pt')
        logreg_coef, logreg_intercept = torch.load(open(path, 'rb'))
        logreg_model = LogisticRegression(warm_start=True,
                                          n_jobs=-1,
                                          solver='lbfgs',
                                          max_iter=1000,
                                          multi_class='multinomial')
        logreg_model.coef_ = logreg_coef
        logreg_model.intercept_ = logreg_intercept
        logreg_model.classes_ = np.arange(n_classes)

    # Sequence of accuracy estimates.
    curves = {'logreg': []}
    predictions = {scheme: torch.Tensor().long() for scheme in curves.keys()}

    if train:
        best_accuracy = 0

    spikes = {}
    for layer in set(network.layers):
        spikes[layer] = Monitor(network.layers[layer],
                                state_vars=['s'],
                                time=time)
        network.add_monitor(spikes[layer], name='%s_spikes' % layer)

    # Train the network.
    if train:
        print('\nBegin training.\n')
    else:
        print('\nBegin test.\n')

    inpt_ims = None
    inpt_axes = None
    spike_ims = None
    spike_axes = None
    weights_im = None

    plot_update_interval = 100

    start = t()
    for i in range(n_examples):
        if i % progress_interval == 0:
            print('Progress: %d / %d (%.4f seconds)' %
                  (i, n_examples, t() - start))
            start = t()

        if i % update_interval == 0 and i > 0:
            if train:
                network.connections['X', 'Y'].update_rule.nu[1] *= lr_decay

            if i % len(labels) == 0:
                current_labels = labels[-update_interval:]
                current_record = full_spike_record[-update_interval:]
            else:
                current_labels = labels[i % len(labels) - update_interval:i %
                                        len(labels)]
                current_record = full_spike_record[i % len(labels) -
                                                   update_interval:i %
                                                   len(labels)]

            # Update and print accuracy evaluations.
            curves, preds = update_curves(curves,
                                          current_labels,
                                          n_classes,
                                          full_spike_record=current_record,
                                          logreg=logreg_model)
            print_results(curves)

            for scheme in preds:
                predictions[scheme] = torch.cat(
                    [predictions[scheme], preds[scheme]], -1)

            # Save accuracy curves to disk.
            to_write = ['train'] + params if train else ['test'] + params
            f = '_'.join([str(x) for x in to_write]) + '.pt'
            torch.save((curves, update_interval, n_examples),
                       open(os.path.join(curves_path, f), 'wb'))

            if train:
                if any([x[-1] > best_accuracy for x in curves.values()]):
                    print(
                        'New best accuracy! Saving network parameters to disk.'
                    )

                    # Save network to disk.
                    network.save(os.path.join(params_path, model_name + '.pt'))
                    path = os.path.join(
                        params_path,
                        '_'.join(['auxiliary', model_name]) + '.pt')
                    torch.save((logreg_model.coef_, logreg_model.intercept_),
                               open(path, 'wb'))
                    best_accuracy = max([x[-1] for x in curves.values()])

                # Refit logistic regression model.
                logreg_model = logreg_fit(full_spike_record[:i], labels[:i],
                                          logreg_model)

            print()

        # Get next input sample.
        image = images[i % len(images)]
        sample = bernoulli(datum=image, time=time, dt=dt,
                           max_prob=1).unsqueeze(1).unsqueeze(1)
        inpts = {'X': sample}

        # Run the network on the input.
        network.run(inpts=inpts, time=time)

        network.connections['X', 'Y_'].w = network.connections['X', 'Y'].w

        # Add to spikes recording.
        spike_record[i % update_interval] = spikes['Y_'].get('s').view(
            time, -1)
        full_spike_record[i] = spikes['Y_'].get('s').view(time, -1).sum(0)

        # Optionally plot various simulation information.
        if plot and i % plot_update_interval == 0:
            _input = inpts['X'].view(time, 400).sum(0).view(20, 20)
            w = network.connections['X', 'Y'].w

            _spikes = {
                'X': spikes['X'].get('s').view(400, time),
                'Y': spikes['Y'].get('s').view(n_filters * total_conv_size,
                                               time),
                'Y_': spikes['Y_'].get('s').view(n_filters * total_conv_size,
                                                 time)
            }

            inpt_axes, inpt_ims = plot_input(image.view(20, 20),
                                             _input,
                                             label=labels[i % len(labels)],
                                             ims=inpt_ims,
                                             axes=inpt_axes)
            spike_ims, spike_axes = plot_spikes(spikes=_spikes,
                                                ims=spike_ims,
                                                axes=spike_axes)
            weights_im = plot_conv2d_weights(
                w, im=weights_im, wmax=network.connections['X', 'Y'].wmax)

            plt.pause(1e-2)

        network.reset_()  # Reset state variables.

    print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)')

    i += 1

    if i % len(labels) == 0:
        current_labels = labels[-update_interval:]
        current_record = full_spike_record[-update_interval:]
    else:
        current_labels = labels[i % len(labels) - update_interval:i %
                                len(labels)]
        current_record = full_spike_record[i % len(labels) -
                                           update_interval:i % len(labels)]

    # Update and print accuracy evaluations.
    curves, preds = update_curves(curves,
                                  current_labels,
                                  n_classes,
                                  full_spike_record=current_record,
                                  logreg=logreg_model)
    print_results(curves)

    for scheme in preds:
        predictions[scheme] = torch.cat([predictions[scheme], preds[scheme]],
                                        -1)

    if train:
        if any([x[-1] > best_accuracy for x in curves.values()]):
            print('New best accuracy! Saving network parameters to disk.')

            # Save network to disk.
            network.save(os.path.join(params_path, model_name + '.pt'))
            path = os.path.join(params_path,
                                '_'.join(['auxiliary', model_name]) + '.pt')
            torch.save((logreg_model.coef_, logreg_model.intercept_),
                       open(path, 'wb'))

    if train:
        print('\nTraining complete.\n')
    else:
        print('\nTest complete.\n')

    print('Average accuracies:\n')
    for scheme in curves.keys():
        print('\t%s: %.2f' % (scheme, float(np.mean(curves[scheme]))))

    # Save accuracy curves to disk.
    to_write = ['train'] + params if train else ['test'] + params
    to_write = [str(x) for x in to_write]
    f = '_'.join(to_write) + '.pt'
    torch.save((curves, update_interval, n_examples),
               open(os.path.join(curves_path, f), 'wb'))

    # Save results to disk.
    results = [np.mean(curves['logreg']), np.std(curves['logreg'])]

    to_write = params + results if train else test_params + results
    to_write = [str(x) for x in to_write]
    name = 'train.csv' if train else 'test.csv'

    if not os.path.isfile(os.path.join(results_path, name)):
        with open(os.path.join(results_path, name), 'w') as f:
            if train:
                columns = [
                    'seed', 'n_train', 'kernel_size', 'stride', 'n_filters',
                    'padding', 'inhib', 'time', 'lr', 'lr_decay', 'dt',
                    'intensity', 'update_interval', 'mean_logreg', 'std_logreg'
                ]

                header = ','.join(columns) + '\n'
                f.write(header)
            else:
                columns = [
                    'seed', 'n_train', 'n_test', 'kernel_size', 'stride',
                    'n_filters', 'padding', 'inhib', 'time', 'lr', 'lr_decay',
                    'dt', 'intensity', 'update_interval', 'mean_logreg',
                    'std_logreg'
                ]

                header = ','.join(columns) + '\n'
                f.write(header)

    with open(os.path.join(results_path, name), 'a') as f:
        f.write(','.join(to_write) + '\n')

    if labels.numel() > n_examples:
        labels = labels[:n_examples]
    else:
        while labels.numel() < n_examples:
            if 2 * labels.numel() > n_examples:
                labels = torch.cat(
                    [labels, labels[:n_examples - labels.numel()]])
            else:
                labels = torch.cat([labels, labels])

    # Compute confusion matrices and save them to disk.
    confusions = {}
    for scheme in predictions:
        confusions[scheme] = confusion_matrix(labels, predictions[scheme])

    to_write = ['train'] + params if train else ['test'] + test_params
    f = '_'.join([str(x) for x in to_write]) + '.pt'
    torch.save(confusions, os.path.join(confusion_path, f))
Пример #12
0
inpt = Input(784, shape=(28, 28))
network.add_layer(inpt, name='I')
output = LIFNodes(625, thresh=-52 + torch.randn(625))
network.add_layer(output, name='O')
C1 = Connection(source=inpt, target=output, w=torch.randn(inpt.n, output.n))
C2 = Connection(source=output,
                target=output,
                w=0.5 * torch.randn(output.n, output.n))

network.add_connection(C1, source='I', target='O')
network.add_connection(C2, source='O', target='O')

spikes = {}
for l in network.layers:
    spikes[l] = Monitor(network.layers[l], ['s'], time=250)
    network.add_monitor(spikes[l], name='%s_spikes' % l)

voltages = {'O': Monitor(network.layers['O'], ['v'], time=250)}
network.add_monitor(voltages['O'], name='O_voltages')

# Get MNIST training images and labels.
images, labels = MNIST(path='../../data/MNIST', download=True).get_train()
images *= 0.25

# Create lazily iterating Poisson-distributed data loader.
loader = zip(poisson_loader(images, time=250), iter(labels))

inpt_axes = None
inpt_ims = None
spike_axes = None
spike_ims = None
Пример #13
0
def main(args):
    if args.gpu:
        torch.cuda.manual_seed_all(args.seed)
    else:
        torch.manual_seed(args.seed)

    conv_size = int(
        (28 - args.kernel_size + 2 * args.padding) / args.stride) + 1

    # Build network.
    network = Network()
    input_layer = Input(n=784, shape=(1, 28, 28), traces=True)

    conv_layer = DiehlAndCookNodes(
        n=args.n_filters * conv_size * conv_size,
        shape=(args.n_filters, conv_size, conv_size),
        traces=True,
    )

    conv_conn = Conv2dConnection(
        input_layer,
        conv_layer,
        kernel_size=args.kernel_size,
        stride=args.stride,
        update_rule=PostPre,
        norm=0.4 * args.kernel_size**2,
        nu=[0, args.lr],
        reduction=max_without_indices,
        wmax=1.0,
    )

    w = torch.zeros(args.n_filters, conv_size, conv_size, args.n_filters,
                    conv_size, conv_size)
    for fltr1 in range(args.n_filters):
        for fltr2 in range(args.n_filters):
            if fltr1 != fltr2:
                for i in range(conv_size):
                    for j in range(conv_size):
                        w[fltr1, i, j, fltr2, i, j] = -100.0

    w = w.view(args.n_filters * conv_size * conv_size,
               args.n_filters * conv_size * conv_size)
    recurrent_conn = Connection(conv_layer, conv_layer, w=w)

    network.add_layer(input_layer, name="X")
    network.add_layer(conv_layer, name="Y")
    network.add_connection(conv_conn, source="X", target="Y")
    network.add_connection(recurrent_conn, source="Y", target="Y")

    # Voltage recording for excitatory and inhibitory layers.
    voltage_monitor = Monitor(network.layers["Y"], ["v"], time=args.time)
    network.add_monitor(voltage_monitor, name="output_voltage")

    if args.gpu:
        network.to("cuda")

    # Load MNIST data.
    train_dataset = MNIST(
        PoissonEncoder(time=args.time, dt=args.dt),
        None,
        os.path.join(ROOT_DIR, "data", "MNIST"),
        download=True,
        train=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Lambda(lambda x: x * args.intensity)
        ]),
    )

    spikes = {}
    for layer in set(network.layers):
        spikes[layer] = Monitor(network.layers[layer],
                                state_vars=["s"],
                                time=args.time)
        network.add_monitor(spikes[layer], name="%s_spikes" % layer)

    voltages = {}
    for layer in set(network.layers) - {"X"}:
        voltages[layer] = Monitor(network.layers[layer],
                                  state_vars=["v"],
                                  time=args.time)
        network.add_monitor(voltages[layer], name="%s_voltages" % layer)

    # Train the network.
    print("Begin training.\n")
    start = time()

    weights_im = None

    for epoch in range(args.n_epochs):
        if epoch % args.progress_interval == 0:
            print("Progress: %d / %d (%.4f seconds)" %
                  (epoch, args.n_epochs, time() - start))
            start = time()

        train_dataloader = DataLoader(
            train_dataset,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=4,
            pin_memory=args.gpu,
        )

        for step, batch in enumerate(tqdm(train_dataloader)):
            # Get next input sample.
            inpts = {"X": batch["encoded_image"]}
            if args.gpu:
                inpts = {k: v.cuda() for k, v in inpts.items()}

            # Run the network on the input.
            network.run(inpts=inpts, time=args.time, input_time_dim=0)

            # Decay learning rate.
            network.connections["X", "Y"].nu[1] *= 0.99

            # Optionally plot various simulation information.
            if args.plot:
                weights = conv_conn.w
                weights_im = plot_conv2d_weights(weights, im=weights_im)

                plt.pause(1e-8)

            network.reset_()  # Reset state variables.

    print("Progress: %d / %d (%.4f seconds)\n" %
          (args.n_epochs, args.n_epochs, time() - start))
    print("Training complete.\n")
Пример #14
0
def toLIF(network: Network):
    new_network = Network(dt=1, learning=True)
    input_layer = Input(n=network.X.n,
                        shape=network.X.shape,
                        traces=True,
                        tc_trace=network.X.tc_trace.item())
    exc_layer = LIFNodes(
        n=network.Ae.n,
        traces=True,
        rest=network.Ai.rest.item(),
        reset=network.Ai.reset.item(),
        thresh=network.Ai.thresh.item(),
        refrac=network.Ai.refrac.item(),
        tc_decay=network.Ai.tc_decay.item(),
    )
    inh_layer = LIFNodes(
        n=network.Ai.n,
        traces=False,
        rest=network.Ai.rest.item(),
        reset=network.Ai.reset.item(),
        thresh=network.Ai.thresh.item(),
        tc_decay=network.Ai.tc_decay.item(),
        refrac=network.Ai.refrac.item(),
    )

    # Connections
    w = network.X_to_Ae.w
    input_exc_conn = Connection(
        source=input_layer,
        target=exc_layer,
        w=w,
        update_rule=PostPre,
        nu=network.X_to_Ae.nu,
        reduction=network.X_to_Ae.reduction,
        wmin=network.X_to_Ae.wmin,
        wmax=network.X_to_Ae.wmax,
        norm=network.X_to_Ae.norm * 1,
    )
    w = network.Ae_to_Ai.w
    exc_inh_conn = Connection(source=exc_layer,
                              target=inh_layer,
                              w=w,
                              wmin=network.Ae_to_Ai.wmin,
                              wmax=network.Ae_to_Ai.wmax)
    w = network.Ai_to_Ae.w

    inh_exc_conn = Connection(source=inh_layer,
                              target=exc_layer,
                              w=w,
                              wmin=network.Ai_to_Ae.wmin,
                              wmax=network.Ai_to_Ae.wmax)

    # Add to network
    new_network.add_layer(input_layer, name="X")
    new_network.add_layer(exc_layer, name="Ae")
    new_network.add_layer(inh_layer, name="Ai")
    new_network.add_connection(input_exc_conn, source="X", target="Ae")
    new_network.add_connection(exc_inh_conn, source="Ae", target="Ai")
    new_network.add_connection(inh_exc_conn, source="Ai", target="Ae")

    exc_voltage_monitor = Monitor(new_network.layers["Ae"], ["v"], time=500)
    inh_voltage_monitor = Monitor(new_network.layers["Ai"], ["v"], time=500)
    new_network.add_monitor(exc_voltage_monitor, name="exc_voltage")
    new_network.add_monitor(inh_voltage_monitor, name="inh_voltage")

    spikes = {}
    for layer in set(network.layers):
        spikes[layer] = Monitor(new_network.layers[layer],
                                state_vars=["s"],
                                time=time)
        new_network.add_monitor(spikes[layer], name="%s_spikes" % layer)

    return new_network
def main(seed=0, n_neurons=100, n_train=60000, n_test=10000, inhib=100, lr=0.01, lr_decay=1, time=350, dt=1,
         theta_plus=0.05, theta_decay=1e-7, progress_interval=10, update_interval=250, plot=False,
         train=True, gpu=False):

    assert n_train % update_interval == 0 and n_test % update_interval == 0, \
                            'No. examples must be divisible by update_interval'

    params = [
        seed, n_neurons, n_train, inhib, lr_decay, time, dt,
        theta_plus, theta_decay, progress_interval, update_interval
    ]

    model_name = '_'.join([str(x) for x in params])

    np.random.seed(seed)

    if gpu:
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        torch.cuda.manual_seed_all(seed)
    else:
        torch.manual_seed(seed)

    n_examples = n_train if train else n_test
    n_classes = 10

    # Build network.
    if train:
        network = Network(dt=dt)

        input_layer = Input(n=784, traces=True, trace_tc=5e-2)
        network.add_layer(input_layer, name='X')

        output_layer = DiehlAndCookNodes(
            n=n_classes, rest=0, reset=1, thresh=1, decay=1e-2,
            theta_plus=theta_plus, theta_decay=theta_decay, traces=True, trace_tc=5e-2
        )
        network.add_layer(output_layer, name='Y')

        w = torch.rand(784, n_classes)
        input_connection = Connection(
            source=input_layer, target=output_layer, w=w,
            update_rule=MSTDPET, nu=lr, wmin=0, wmax=1,
            norm=78.4, tc_e_trace=0.1
        )
        network.add_connection(input_connection, source='X', target='Y')

    else:
        network = load(os.path.join(params_path, model_name + '.pt'))
        network.connections['X', 'Y'].update_rule = NoOp(
            connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu
        )
        network.layers['Y'].theta_decay = torch.IntTensor([0])
        network.layers['Y'].theta_plus = torch.IntTensor([0])

    # Load MNIST data.
    environment = MNISTEnvironment(
        dataset=MNIST(root=data_path, download=True), train=train, time=time
    )

    # Create pipeline.
    pipeline = Pipeline(
        network=network, environment=environment, encoding=repeat,
        action_function=select_spiked, output='Y', reward_delay=None
    )

    spikes = {}
    for layer in set(network.layers):
        spikes[layer] = Monitor(network.layers[layer], state_vars=('s',), time=time)
        network.add_monitor(spikes[layer], name='%s_spikes' % layer)

    if train:
        network.add_monitor(Monitor(
                network.connections['X', 'Y'].update_rule, state_vars=('tc_e_trace',), time=time
            ), 'X_Y_e_trace')

    # Train the network.
    if train:
        print('\nBegin training.\n')
    else:
        print('\nBegin test.\n')

    spike_ims = None
    spike_axes = None
    weights_im = None
    elig_axes = None
    elig_ims = None

    start = t()
    for i in range(n_examples):
        if i % progress_interval == 0:
            print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)')
            start = t()

            if i > 0 and train:
                network.connections['X', 'Y'].update_rule.nu[1] *= lr_decay

        # Run the network on the input.
        # print("Example",i,"Results:")
        # for j in range(time):
        #     result = pipeline.env_step()
        #     pipeline.step(result,a_plus=1, a_minus=0)
        # print(result)
        for j in range(time):
            pipeline.train()

        if not train:
            _spikes = {layer: spikes[layer].get('s') for layer in spikes}

        if plot:
            _spikes = {layer: spikes[layer].get('s') for layer in spikes}
            w = network.connections['X', 'Y'].w
            square_weights = get_square_weights(w.view(784, n_classes), 4, 28)

            spike_ims, spike_axes = plot_spikes(_spikes, ims=spike_ims, axes=spike_axes)
            weights_im = plot_weights(square_weights, im=weights_im)
            elig_ims, elig_axes = plot_voltages(
                {'Y': network.monitors['X_Y_e_trace'].get('e_trace').view(-1, time)[1500:2000]},
                plot_type='line', ims=elig_ims, axes=elig_axes
            )

            plt.pause(1e-8)

        pipeline.reset_state_variables()  # Reset state variables.
        network.connections['X', 'Y'].update_rule.tc_e_trace = torch.zeros(784, n_classes)

    print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)')

    if train:
        network.save(os.path.join(params_path, model_name + '.pt'))
        print('\nTraining complete.\n')
    else:
        print('\nTest complete.\n')
Пример #16
0
# Determines number of workers to use
if n_workers == -1:
    n_workers = gpu * 4 * torch.cuda.device_count()
print(n_workers)
print(torch.cuda.device_count())

n_sqrt = int(np.ceil(np.sqrt(n_neurons)))
start_intensity = intensity
network = Network(dt=1.0)
network.to(device)
network.to("cuda")
# state_vars: Iterable of strings indicating names of state variables to record.
for l in network.layers:
    m = Monitor(network.layers[l], state_vars=['s'], time=time_arg)
    network.add_monitor(m, name=l)

print(n_neurons, batch_size, nu_single, nu_pair)

# Build network.
network = DiehlAndCook2015(
    n_inpt=784,
    n_neurons=n_neurons,
    exc=exc,
    inh=inh,
    dt=dt,
    norm=78.4,
    nu=(nu_single, nu_pair),
    theta_plus=theta_plus,
    inpt_shape=(1, 28, 28),
    #emax=emax,
Пример #17
0
    def forward(self, x):
        return self.linear(x)


# Build a simple, two layer, "input-output" network.
network = Network(dt=1.0)
inpt = Input(784, shape=(28, 28))
network.add_layer(inpt, name='I')
output = LIFNodes(625, thresh=-52 + torch.randn(625))
network.add_layer(output, name='O')
network.add_connection(
    Connection(inpt, output, w=torch.randn(inpt.n, output.n)), 'I', 'O')
network.add_connection(
    Connection(output, output, w=0.5 * torch.randn(output.n, output.n)), 'O',
    'O')
network.add_monitor(Monitor(output, ['s'], time=250), name='output_spikes')

# Get MNIST training images and labels and create data loader.
images, labels = MNIST(path='../../data/MNIST').get_train()
loader = zip(poisson_loader(images * 0.25, time=250), iter(labels))

# Run training data on reservoir and store (spikes per neuron, label) pairs.
training_pairs = []
for i, (datum, label) in enumerate(loader):
    network.run(inpts={'I': datum}, time=250)
    training_pairs.append(
        [network.monitors['output_spikes'].get('s').sum(-1), label])
    network.reset_()

    if (i + 1) % 50 == 0: print('Train progress: (%d / 500)' % (i + 1))
    if (i + 1) == 500:
Пример #18
0
    def __init__(
        self,
        network: Network,
        results_path: str,
        dataset_name: str = "MNIST",
        seed: int = 0,
        batch_size: int = 1,
        n_epochs: int = 1,
        n_workers: int = -1,
        update_interval: int = 250,
        gif: bool = False,
        gpu: bool = False,
        plot: bool = False,
        debug: bool = False,
    ) -> None:
        """
        Constructor for class Spiking.
        :param network:         Network model to use.
        :param results_path:    Path to save training & testing results.
        :param dataset_name:    Name of dataset to use.
        :param seed:            Seed for pseudorandom number generator (PRNG).
        :param batch_size:      Mini-batch size.
        :param n_epochs:        Number of epochs for training.
        :param n_workers:       Number of workers to use.
        :param update_interval: Interval to show network accuracy.
        :param gif:             Whether to create gif of weight maps.
        :param gpu:             Whether to use gpu.
        :param plot:            Whether to plot each timestep in real time
        :param debug:           Whether to save files for debugging purpose.
        """
        self.network = network
        self.results_path = results_path
        self.batch_size = batch_size
        self.n_epochs = n_epochs
        self.n_workers = n_workers
        self.update_interval = update_interval / batch_size
        self.gif = gif
        self.gpu = gpu
        self.plot = plot
        self.debug = debug

        self.n_outpt = network.layers["Z"].n
        self.profile = {
            'method': network.method,
            'dataset_name': dataset_name,
            'n_train': None,
            'n_test': None,
        }

        self.time = network.time
        self.dt = network.dt
        self.timestep = int(self.time / self.dt)

        self.start_intensity_scale = 4
        self.start_intensity = 256.0 / 8.0 * self.start_intensity_scale

        self.train_dataset = None
        self.validation_dataset = None
        self.test_dataset = None

        # Store results for checking purpose.
        self.sl_train_spike = []
        self.sl_test_spike = []
        self.right_pred = []
        self.wrong_pred = []

        # Store network accuracy.
        self.acc_history = {'train_acc': [], 'test_acc': []}
        self.store_pred = {}

        # Initialize plot class.
        self.visualize = Plot()

        # Save initial weights for plot.
        self.exc_init_weight = network.connections[("X", "Y")].w.detach().clone()
        self.sl_init_weight = network.connections[("Y", "Z")].w.detach().clone()

        # Determines number of workers to use.
        if n_workers == -1:
            self.n_workers = gpu * 4 * torch.cuda.device_count()

        # Sets max number of images to use for gif.
        if gif:
            self.n_gif_img = 35

        # Sets up Gpu use.
        if gpu:
            torch.cuda.manual_seed_all(seed)
        else:
            torch.manual_seed(seed)

        # Directs network to GPU.
        if gpu:
            network.to("cuda")

        # Load train & test data.
        encoder = PoissonEncoder(time=self.time, dt=self.dt)
        self.train_dataset = load_data(dataset_name, encoder, True, self.start_intensity)
        self.test_dataset = load_data(dataset_name, encoder, False, self.start_intensity)

        # Set up monitors for spikes and voltages.
        spikes = {}
        for layer in set(network.layers):
            l = network.layers[layer]
            spikes[layer] = Monitor(l, state_vars=["s"], time=self.timestep)
            network.add_monitor(spikes[layer], name="%s_spikes" % layer)

        voltages = {}
        for layer in set(network.layers) - {"X"}:
            l = network.layers[layer]
            voltages[layer] = Monitor(l, state_vars=["v"], time=self.timestep)
            network.add_monitor(voltages[layer], name="%s_voltages" % layer)

        #voltages['Y'] = Monitor(network.layers['Y'], state_vars=['theta'], time=self.timestep)
        #network.add_monitor(voltages['Y'], name="Y_voltages")
        self.spikes = spikes
        self.voltages = voltages
def main(seed=0,
         n_neurons=100,
         n_train=60000,
         n_test=10000,
         c_low=1,
         c_high=25,
         p_low=0.5,
         time=250,
         dt=1,
         theta_plus=0.05,
         theta_decay=1e-7,
         intensity=1,
         progress_interval=10,
         update_interval=250,
         plot=False,
         train=True,
         gpu=False):

    assert n_train % update_interval == 0 and n_test % update_interval == 0,\
        'No. examples must be divisible by update_interval'

    params = [
        seed, n_neurons, n_train, c_low, c_high, p_low, time, dt, theta_plus,
        theta_decay, intensity, progress_interval, update_interval
    ]

    model_name = '_'.join([str(x) for x in params])

    if not train:
        test_params = [
            seed, n_neurons, n_train, n_test, c_low, c_high, p_low, time, dt,
            theta_plus, theta_decay, intensity, progress_interval,
            update_interval
        ]

    np.random.seed(seed)

    if gpu:
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        torch.cuda.manual_seed_all(seed)
    else:
        torch.manual_seed(seed)

    n_examples = n_train if train else n_test
    n_sqrt = int(np.ceil(np.sqrt(n_neurons)))
    n_classes = 10

    # Build network.
    if train:
        network = Network(dt=dt)
        input_layer = Input(n=784, traces=True)
        exc_layer = DiehlAndCookNodes(n=n_neurons, traces=True)

        w = torch.rand(input_layer.n, exc_layer.n)
        input_exc_conn = Connection(input_layer,
                                    exc_layer,
                                    w=w,
                                    update_rule=PostPre,
                                    norm=78.4,
                                    nu=(1e-4, 1e-2),
                                    wmax=1.0)

        w = torch.zeros(exc_layer.n, exc_layer.n)
        for k1 in range(n_neurons):
            for k2 in range(n_neurons):
                if k1 != k2:
                    x1, y1 = k1 // np.sqrt(n_neurons), k1 % np.sqrt(n_neurons)
                    x2, y2 = k2 // np.sqrt(n_neurons), k2 % np.sqrt(n_neurons)

                    w[k1, k2] = max(
                        -c_high,
                        -c_low * np.sqrt(euclidean([x1, y1], [x2, y2])))

        recurrent_conn = Connection(exc_layer, exc_layer, w=w)

        network.add_layer(input_layer, name='X')
        network.add_layer(exc_layer, name='Y')
        network.add_connection(input_exc_conn, source='X', target='Y')
        network.add_connection(recurrent_conn, source='Y', target='Y')
    else:
        network = load_network(os.path.join(params_path, model_name + '.pt'))
        network.connections['X', 'Y'].update_rule = NoOp(
            connection=network.connections['X', 'Y'],
            nu=network.connections['X', 'Y'].nu)
        network.layers['Y'].theta_decay = 0
        network.layers['Y'].theta_plus = 0

    # Load MNIST data.
    dataset = MNIST(data_path, download=True)

    if train:
        images, labels = dataset.get_train()
    else:
        images, labels = dataset.get_test()

    images = images.view(-1, 784)
    images *= intensity

    # Record spikes during the simulation.
    spike_record = torch.zeros(update_interval, int(time / dt), n_neurons)

    # Neuron assignments and spike proportions.
    if train:
        assignments = -torch.ones_like(torch.Tensor(n_neurons))
        proportions = torch.zeros_like(torch.Tensor(n_neurons, 10))
        rates = torch.zeros_like(torch.Tensor(n_neurons, 10))
        ngram_scores = {}
    else:
        path = os.path.join(params_path,
                            '_'.join(['auxiliary', model_name]) + '.pt')
        assignments, proportions, rates, ngram_scores = torch.load(
            open(path, 'rb'))

    # Sequence of accuracy estimates.
    curves = {'all': [], 'proportion': [], 'ngram': []}
    predictions = {scheme: torch.Tensor().long() for scheme in curves.keys()}

    if train:
        best_accuracy = 0

    spikes = {}
    for layer in set(network.layers) - {'X'}:
        spikes[layer] = Monitor(network.layers[layer],
                                state_vars=['s'],
                                time=int(time / dt))
        network.add_monitor(spikes[layer], name='%s_spikes' % layer)

    # Train the network.
    if train:
        print('\nBegin training.\n')
    else:
        print('\nBegin test.\n')

    inpt_axes = None
    inpt_ims = None
    spike_ims = None
    spike_axes = None
    weights_im = None
    assigns_im = None
    perf_ax = None

    # Calculate linear increase every update interval.
    if train:
        n_increase = int(p_low * n_examples) / update_interval
        increase = (c_high - c_low) / n_increase
        increases = 0
        inhib = c_low

    start = t()
    for i in range(n_examples):
        if train and i % update_interval == 0 and i > 0 and increases < n_increase:
            inhib = inhib + increase

            print(f'\nIncreasing inhibition to {inhib}.\n')

            w = torch.zeros(n_neurons, n_neurons)
            for k1 in range(n_neurons):
                for k2 in range(n_neurons):
                    if k1 != k2:
                        x1, y1 = k1 // np.sqrt(n_neurons), k1 % np.sqrt(
                            n_neurons)
                        x2, y2 = k2 // np.sqrt(n_neurons), k2 % np.sqrt(
                            n_neurons)

                        w[k1, k2] = max(
                            -c_high,
                            -inhib * np.sqrt(euclidean([x1, y1], [x2, y2])))

            network.connections['Y', 'Y'].w = w

            increases += 1

        if i % progress_interval == 0:
            print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)')
            start = t()

        if i % update_interval == 0 and i > 0:
            if i % len(labels) == 0:
                current_labels = labels[-update_interval:]
            else:
                current_labels = labels[i % len(images) - update_interval:i %
                                        len(images)]

            # Update and print accuracy evaluations.
            curves, preds = update_curves(curves,
                                          current_labels,
                                          n_classes,
                                          spike_record=spike_record,
                                          assignments=assignments,
                                          proportions=proportions,
                                          ngram_scores=ngram_scores,
                                          n=2)
            print_results(curves)

            for scheme in preds:
                predictions[scheme] = torch.cat(
                    [predictions[scheme], preds[scheme]], -1)

            # Save accuracy curves to disk.
            to_write = ['train'] + params if train else ['test'] + params
            f = '_'.join([str(x) for x in to_write]) + '.pt'
            torch.save((curves, update_interval, n_examples),
                       open(os.path.join(curves_path, f), 'wb'))

            if train:
                if any([x[-1] > best_accuracy for x in curves.values()]):
                    print(
                        'New best accuracy! Saving network parameters to disk.'
                    )

                    # Save network to disk.
                    network.save(os.path.join(params_path, model_name + '.pt'))
                    path = os.path.join(
                        params_path,
                        '_'.join(['auxiliary', model_name]) + '.pt')
                    torch.save((assignments, proportions, rates, ngram_scores),
                               open(path, 'wb'))

                    best_accuracy = max([x[-1] for x in curves.values()])

                # Assign labels to excitatory layer neurons.
                assignments, proportions, rates = assign_labels(
                    spike_record, labels[i - update_interval:i], 10, rates)

                # Compute ngram scores.
                ngram_scores = update_ngram_scores(
                    spike_record, labels[i - update_interval:i], 10, 2,
                    ngram_scores)

            print()

        # Get next input sample.
        image = images[i]
        sample = poisson(datum=image, time=int(time / dt))
        inpts = {'X': sample}

        # Run the network on the input.
        network.run(inpts=inpts, time=time)

        retries = 0
        while spikes['Y'].get('s').sum() < 5 and retries < 3:
            retries += 1
            image *= 2
            sample = poisson(datum=image, time=int(time / dt))
            inpts = {'X': sample}
            network.run(inpts=inpts, time=time)

        # Add to spikes recording.
        spike_record[i % update_interval] = spikes['Y'].get('s').t()

        # Optionally plot various simulation information.
        if plot:
            inpt = inpts['X'].view(time, 784).sum(0).view(28, 28)
            _spikes = {layer: spikes[layer].get('s') for layer in spikes}
            input_exc_weights = network.connections['X', 'Y'].w
            square_weights = get_square_weights(
                input_exc_weights.view(784, n_neurons), n_sqrt, 28)
            square_assignments = get_square_assignments(assignments, n_sqrt)

            # inpt_axes, inpt_ims = plot_input(images[i].view(28, 28), inpt, label=labels[i], axes=inpt_axes, ims=inpt_ims)
            spike_ims, spike_axes = plot_spikes(_spikes,
                                                ims=spike_ims,
                                                axes=spike_axes)
            weights_im = plot_weights(square_weights, im=weights_im)
            # assigns_im = plot_assignments(square_assignments, im=assigns_im)
            # perf_ax = plot_performance(curves, ax=perf_ax)

            plt.pause(1e-8)

        network.reset_()  # Reset state variables.

    print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)')

    i += 1

    if i % len(labels) == 0:
        current_labels = labels[-update_interval:]
    else:
        current_labels = labels[i % len(images) - update_interval:i %
                                len(images)]

    # Update and print accuracy evaluations.
    curves, preds = update_curves(curves,
                                  current_labels,
                                  n_classes,
                                  spike_record=spike_record,
                                  assignments=assignments,
                                  proportions=proportions,
                                  ngram_scores=ngram_scores,
                                  n=2)
    print_results(curves)

    for scheme in preds:
        predictions[scheme] = torch.cat([predictions[scheme], preds[scheme]],
                                        -1)

    if train:
        if any([x[-1] > best_accuracy for x in curves.values()]):
            print('New best accuracy! Saving network parameters to disk.')

            # Save network to disk.
            network.save(os.path.join(params_path, model_name + '.pt'))
            path = os.path.join(params_path,
                                '_'.join(['auxiliary', model_name]) + '.pt')
            torch.save((assignments, proportions, rates, ngram_scores),
                       open(path, 'wb'))

    if train:
        print('\nTraining complete.\n')
    else:
        print('\nTest complete.\n')

    print('Average accuracies:\n')
    for scheme in curves.keys():
        print(f'\t%s: %.2f' % (scheme, float(np.mean(curves[scheme]))))

    # Save accuracy curves to disk.
    to_write = ['train'] + params if train else ['test'] + params
    to_write = [str(x) for x in to_write]
    f = '_'.join(to_write) + '.pt'
    torch.save((curves, update_interval, n_examples),
               open(os.path.join(curves_path, f), 'wb'))

    results = [
        np.mean(curves['all']),
        np.mean(curves['proportion']),
        np.mean(curves['ngram']),
        np.max(curves['all']),
        np.max(curves['proportion']),
        np.max(curves['ngram'])
    ]

    to_write = params + results if train else test_params + results
    to_write = [str(x) for x in to_write]
    name = 'train.csv' if train else 'test.csv'

    if not os.path.isfile(os.path.join(results_path, name)):
        with open(os.path.join(results_path, name), 'w') as f:
            if train:
                f.write(
                    'random_seed,n_neurons,n_train,excite,c_low,c_high,p_low,time,timestep,theta_plus,theta_decay,'
                    'intensity,progress_interval,update_interval,mean_all_activity,mean_proportion_weighting,'
                    'mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n'
                )
            else:
                f.write(
                    'random_seed,n_neurons,n_train,n_test,excite,c_low,c_high,p_low,time,timestep,theta_plus,theta_decay,'
                    'intensity,progress_interval,update_interval,mean_all_activity,mean_proportion_weighting,'
                    'mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n'
                )

    with open(os.path.join(results_path, name), 'a') as f:
        f.write(','.join(to_write) + '\n')

    if labels.numel() > n_examples:
        labels = labels[:n_examples]
    else:
        while labels.numel() < n_examples:
            if 2 * labels.numel() > n_examples:
                labels = torch.cat(
                    [labels, labels[:n_examples - labels.numel()]])
            else:
                labels = torch.cat([labels, labels])

    # Compute confusion matrices and save them to disk.
    confusions = {}
    for scheme in predictions:
        confusions[scheme] = confusion_matrix(labels, predictions[scheme])

    to_write = ['train'] + params if train else ['test'] + test_params
    f = '_'.join([str(x) for x in to_write]) + '.pt'
    torch.save(confusions, os.path.join(confusion_path, f))

    print()
Пример #20
0
                       source="PK_Anti",
                       target="DCN_Anti")

GR_monitor = Monitor(obj=GR_Joint_layer, state_vars=("s"), time=time)
PK_monitor = Monitor(obj=PK, state_vars=("s", "v"), time=time)
PK_Anti_monitor = Monitor(obj=PK_Anti, state_vars=("s", "v"), time=time)

IO_monitor = Monitor(obj=IO, state_vars=("s"), time=time)
DCN_monitor = Monitor(
    obj=DCN,
    state_vars=("s", "v"),
    time=time,
)

DCN_Anti_monitor = Monitor(obj=DCN_Anti, state_vars=("s", "v"), time=time)
network.add_monitor(monitor=GR_monitor, name="GR")
network.add_monitor(monitor=PK_monitor, name="PK")
network.add_monitor(monitor=PK_Anti_monitor, name="PK_Anti")
network.add_monitor(monitor=IO_monitor, name="IO")
network.add_monitor(monitor=DCN_monitor, name="DCN")
network.add_monitor(monitor=DCN_Anti_monitor, name="DCN_Anti")
data_Joint = torch.bernoulli(0.01 * torch.rand(time, GR_Joint_layer.n)).byte()

# 随机生成 error(无时间维度)
error = 0.1 * torch.rand(IO.n)
Curr_list = Error2IO_Current(error)
print(Curr_list)
IO_Input = poisson(Curr_list[0], time=time)
print(IO_Input)
IO_Anti_Input = poisson(Curr_list[1], time=time)
inputs = {
# learning rule
KC_EN.update_rule = STDP(connection=KC_EN,
                         nu=(-A, -A),
                         tc_eligibility_trace=40.0,
                         tc_plus=15,
                         tc_minus=15,
                         tc_reward=20.0,
                         min_weight=min_weight)

# monitors
input_monitor = Monitor(obj=input_layer, state_vars=("s"))
PN_monitor = Monitor(obj=PN, state_vars=("s", "v"))
KC_monitor = Monitor(obj=KC, state_vars=("s", "v"))
EN_monitor = Monitor(obj=EN, state_vars=("s", "v"))
landmark_guidance.add_monitor(monitor=input_monitor, name="Input monitor")
landmark_guidance.add_monitor(monitor=PN_monitor, name="PN monitor")
landmark_guidance.add_monitor(monitor=KC_monitor, name="KC monitor")
landmark_guidance.add_monitor(monitor=EN_monitor, name="EN monitor")
print("Run")

for phase in [1, 2, 3]:

    print("Running phase", phase)

    landmark_guidance.reset_state_variables()

    if phase == 2:
        landmark_guidance.learning = True
    else:
        landmark_guidance.learning = False
Пример #22
0
labels = torch.empty(1, dtype=torch.int)

# create a spike monitor for each layer in the network
# this allows us to read the spikes in order to assign labels to neurons and determine the predicted class
layer_monitors = {}
for layer in set(network.layers):

    # initialize spike monitor at the layer
    # do not record the voltage if at the input layer
    state_vars = ["s", "v"] if (layer != input_layer_name) else ["s"]
    layer_monitors[layer] = Monitor(network.layers[layer],
                                    state_vars=state_vars,
                                    time=int(time / dt))

    # connect the monitor to the network
    network.add_monitor(layer_monitors[layer], name="%s_spikes" % layer)

weight_history = None
num_correct = 0.0

### DEBUG ###
### can be used to force the network to learn the inputs in a specific way
supervised = True
### used to determine if status messages are printed out at each sample
log_messages = False
### used to show weight changes
graph_weights = False
###############

# show current weights
#print("Current Weights:")
# Voltage recordings for excitatory and readout layers.
voltages = {}
for layer in set(layers.keys()) - {'X'}:
    voltages[layer] = Monitor(layers[layer], ['v'], time=plot_interval)

# Add all layers and connections to the network.
for layer in layers:
    network.add_layer(layers[layer], name=layer)

network.add_connection(input_exc_conn, source='X', target='E')
network.add_connection(exc_readout_conn, source='E', target='R')

# Add all monitors to the network.
for layer in layers:
    network.add_monitor(spikes[layer], name='%s_spikes' % layer)
    
    if layer in voltages:
        network.add_monitor(voltages[layer], name='%s_voltages' % layer)

# Load SpaceInvaders environment.
environment = GymEnvironment('Asteroids-v0')
environment.reset()

pipeline = Pipeline(network, environment, encoding=bernoulli, time=1, history=5, delta=10, plot_interval=plot_interval,
                    print_interval=print_interval, render_interval=render_interval, action_function=select_multinomial,
                    output='R')

total = 0
rewards = []
avg_rewards = []
Пример #24
0
def main(seed=0,
         n_neurons=100,
         n_train=60000,
         n_test=10000,
         inhib=250,
         lr=1e-2,
         lr_decay=1,
         time=100,
         dt=1,
         theta_plus=0.05,
         theta_decay=1e-7,
         intensity=1,
         progress_interval=10,
         update_interval=100,
         plot=False,
         train=True,
         gpu=False,
         no_inhib=False,
         no_theta=False):

    assert n_train % update_interval == 0 and n_test % update_interval == 0, \
                            'No. examples must be divisible by update_interval'

    params = [
        seed, n_neurons, n_train, inhib, lr, lr_decay, time, dt, theta_plus,
        theta_decay, intensity, progress_interval, update_interval
    ]

    test_params = [
        seed, n_neurons, n_train, n_test, inhib, lr, lr_decay, time, dt,
        theta_plus, theta_decay, intensity, progress_interval, update_interval
    ]

    model_name = '_'.join([str(x) for x in params])

    np.random.seed(seed)

    if gpu:
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        torch.cuda.manual_seed_all(seed)
    else:
        torch.manual_seed(seed)

    n_examples = n_train if train else n_test
    n_sqrt = int(np.ceil(np.sqrt(n_neurons)))
    n_classes = 10

    # Build network.
    if train:
        network = Network()

        input_layer = Input(n=784, traces=True, trace_tc=5e-2)
        network.add_layer(input_layer, name='X')

        output_layer = DiehlAndCookNodes(n=n_neurons,
                                         traces=True,
                                         rest=0,
                                         reset=0,
                                         thresh=5,
                                         refrac=0,
                                         decay=1e-2,
                                         trace_tc=5e-2,
                                         theta_plus=theta_plus,
                                         theta_decay=theta_decay)
        network.add_layer(output_layer, name='Y')

        w = 0.3 * torch.rand(784, n_neurons)
        input_connection = Connection(source=network.layers['X'],
                                      target=network.layers['Y'],
                                      w=w,
                                      update_rule=WeightDependentPostPre,
                                      nu=[0, lr],
                                      wmin=0,
                                      wmax=1,
                                      norm=78.4)
        network.add_connection(input_connection, source='X', target='Y')

        w = -inhib * (torch.ones(n_neurons, n_neurons) -
                      torch.diag(torch.ones(n_neurons)))
        recurrent_connection = Connection(source=network.layers['Y'],
                                          target=network.layers['Y'],
                                          w=w,
                                          wmin=-inhib,
                                          wmax=0,
                                          update_rule=WeightDependentPostPre,
                                          nu=[0, -100 * lr],
                                          norm=inhib / 2 * n_neurons)
        network.add_connection(recurrent_connection, source='Y', target='Y')

        mask = network.connections['Y', 'Y'].w == 0
        masks = {('Y', 'Y'): mask}

    else:
        network = load_network(os.path.join(params_path, model_name + '.pt'))
        network.connections['X', 'Y'].update_rule = NoOp(
            connection=network.connections['X', 'Y'],
            nu=network.connections['X', 'Y'].nu)
        network.connections['Y', 'Y'].update_rule = NoOp(
            connection=network.connections['X', 'Y'],
            nu=network.connections['X', 'Y'].nu)
        network.layers['Y'].theta_decay = 0
        network.layers['Y'].theta_plus = 0

        if no_inhib:
            del network.connections['Y', 'Y']

        if no_theta:
            network.layers['Y'].theta = 0

    # Load MNIST data.
    dataset = MNIST(path=data_path, download=True)

    if train:
        images, labels = dataset.get_train()
    else:
        images, labels = dataset.get_test()

    images = images.view(-1, 784)
    images *= intensity
    labels = labels.long()

    monitors = {}
    for layer in set(network.layers):
        if 'v' in network.layers[layer].__dict__:
            monitors[layer] = Monitor(network.layers[layer],
                                      state_vars=['s', 'v'],
                                      time=time)
        else:
            monitors[layer] = Monitor(network.layers[layer],
                                      state_vars=['s'],
                                      time=time)

        network.add_monitor(monitors[layer], name=layer)

    # Train the network.
    if train:
        print('\nBegin training.\n')
    else:
        print('\nBegin test.\n')

    inpt_axes = None
    inpt_ims = None
    spike_ims = None
    spike_axes = None
    voltage_ims = None
    voltage_axes = None
    weights_im = None
    weights2_im = None

    unclamps = {}
    per_class = int(n_neurons / n_classes)
    for label in range(n_classes):
        unclamp = torch.ones(n_neurons).byte()
        unclamp[label * per_class:(label + 1) * per_class] = 0
        unclamps[label] = unclamp

    predictions = torch.zeros(n_examples)
    corrects = torch.zeros(n_examples)

    start = t()
    for i in range(n_examples):
        if i % progress_interval == 0:
            print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)')
            start = t()

        if i % update_interval == 0 and i > 0 and train:
            network.save(os.path.join(params_path, model_name + '.pt'))
            network.connections['X', 'Y'].update_rule.nu[1] *= lr_decay

        # Get next input sample.
        image = images[i % len(images)]
        label = labels[i % len(images)].item()
        sample = poisson(datum=image, time=time, dt=dt)
        inpts = {'X': sample}

        # Run the network on the input.
        if train:
            network.run(inpts=inpts,
                        time=time,
                        unclamp={'Y': unclamps[label]},
                        masks=masks)
        else:
            network.run(inpts=inpts, time=time)

        if not train:
            retries = 0
            while monitors['Y'].get('s').sum() == 0 and retries < 3:
                retries += 1
                image *= 1.5
                sample = poisson(datum=image, time=time, dt=dt)
                inpts = {'X': sample}

                if train:
                    network.run(inpts=inpts,
                                time=time,
                                unclamp={'Y': unclamps[label]},
                                masks=masks)
                else:
                    network.run(inpts=inpts, time=time)

        output = monitors['Y'].get('s')
        summed_neurons = output.sum(dim=1).view(n_classes, per_class)
        summed_classes = summed_neurons.sum(dim=1)
        prediction = torch.argmax(summed_classes).item()
        correct = prediction == label

        predictions[i] = prediction
        corrects[i] = int(correct)

        # Optionally plot various simulation information.
        if plot:
            # _input = image.view(28, 28)
            # reconstruction = inpts['X'].view(time, 784).sum(0).view(28, 28)
            # v = {'Y': monitors['Y'].get('v')}

            s = {layer: monitors[layer].get('s') for layer in monitors}
            input_exc_weights = network.connections['X', 'Y'].w
            square_weights = get_square_weights(
                input_exc_weights.view(784, n_neurons), n_sqrt, 28)
            recurrent_weights = network.connections['Y', 'Y'].w

            # inpt_axes, inpt_ims = plot_input(_input, reconstruction, label=labels[i], axes=inpt_axes, ims=inpt_ims)
            # voltage_ims, voltage_axes = plot_voltages(v, ims=voltage_ims, axes=voltage_axes)

            spike_ims, spike_axes = plot_spikes(s,
                                                ims=spike_ims,
                                                axes=spike_axes)
            weights_im = plot_weights(square_weights, im=weights_im)
            weights2_im = plot_weights(recurrent_weights,
                                       im=weights2_im,
                                       wmin=-inhib,
                                       wmax=0)

            plt.pause(1e-8)

        network.reset_()  # Reset state variables.

    print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)')

    if train:
        network.save(os.path.join(params_path, model_name + '.pt'))

    if train:
        print('\nTraining complete.\n')
    else:
        print('\nTest complete.\n')

    accuracy = torch.mean(corrects).item() * 100

    print(f'\nAccuracy: {accuracy}\n')

    to_write = params + [accuracy] if train else test_params + [accuracy]
    to_write = [str(x) for x in to_write]
    name = 'train.csv' if train else 'test.csv'

    if not os.path.isfile(os.path.join(results_path, name)):
        with open(os.path.join(results_path, name), 'w') as f:
            if train:
                f.write(
                    'random_seed,n_neurons,n_train,inhib,lr,lr_decay,time,timestep,theta_plus,'
                    'theta_decay,intensity,progress_interval,update_interval,accuracy\n'
                )
            else:
                f.write(
                    'random_seed,n_neurons,n_train,n_test,inhib,lr,lr_decay,time,timestep,'
                    'theta_plus,theta_decay,intensity,progress_interval,update_interval,accuracy\n'
                )

    with open(os.path.join(results_path, name), 'a') as f:
        f.write(','.join(to_write) + '\n')

    if labels.numel() > n_examples:
        labels = labels[:n_examples]
    else:
        while labels.numel() < n_examples:
            if 2 * labels.numel() > n_examples:
                labels = torch.cat(
                    [labels, labels[:n_examples - labels.numel()]])
            else:
                labels = torch.cat([labels, labels])

    # Compute confusion matrices and save them to disk.
    confusion = confusion_matrix(labels, predictions)

    to_write = ['train'] + params if train else ['test'] + test_params
    f = '_'.join([str(x) for x in to_write]) + '.pt'
    torch.save(confusion, os.path.join(confusion_path, f))
Пример #25
0
inpt = Input(784, shape=(28, 28))
network.add_layer(inpt, name="I")
output = LIFNodes(625, thresh=-52 + torch.randn(625))
network.add_layer(output, name="O")
C1 = Connection(source=inpt, target=output, w=torch.randn(inpt.n, output.n))
C2 = Connection(source=output,
                target=output,
                w=0.5 * torch.randn(output.n, output.n))

network.add_connection(C1, source="I", target="O")
network.add_connection(C2, source="O", target="O")

spikes = {}
for l in network.layers:
    spikes[l] = Monitor(network.layers[l], ["s"], time=250)
    network.add_monitor(spikes[l], name="%s_spikes" % l)

voltages = {"O": Monitor(network.layers["O"], ["v"], time=250)}
network.add_monitor(voltages["O"], name="O_voltages")

# Get MNIST training images and labels.
images, labels = MNIST(path="../../data/MNIST", download=True).get_train()
images *= 0.25

# Create lazily iterating Poisson-distributed data loader.
loader = zip(poisson_loader(images, time=250), iter(labels))

inpt_axes = None
inpt_ims = None
spike_axes = None
spike_ims = None
Пример #26
0
network = Network(dt=1.0)

inpt = Input(n=784, traces=True)
exc = AdaptiveLIFNodes(n=n, traces=True)

ew = 0.3 * torch.rand(784, n)
econn = Connection(inpt, exc, w=ew, update_rule=MSTDPET, nu=0.1, wmin=0, wmax=1, norm=78.4)

network.add_layer(inpt, 'X')
network.add_layer(exc, 'Y')
network.add_connection(econn, source='X', target='Y')

spike_monitors = {layer : Monitor(network.layers[layer], ['s']) for layer in network.layers}
for layer in spike_monitors:
    network.add_monitor(spike_monitors[layer], '%s' % layer)

# Load MNIST data.
images, labels = MNIST(path=os.path.join('..', '..', 'data', 'MNIST'),
                       download=True).get_train()
images *= intensity
images /= 4

# Lazily encode data as Poisson spike trains.
ims = []
for j in range(0, iters, change_interval):
    ims.extend([images[j % 60000]] * change_interval)

lbls = []
for j in range(0, iters, change_interval):
    lbls.extend([int(labels[j % 60000])] * change_interval)
Пример #27
0
network.add_connection(FF2a, source="TNN_1a", target="rTNN_1")
network.add_connection(FF2b, source="TNN_1b", target="rTNN_1")
# (Recurrences)
network.add_connection(rTNN_to_buf1, source="rTNN_1", target="BUF_1")
# network.add_connection(buf1_to_buf2, source="BUF_1", target="BUF_2")
network.add_connection(buf1_to_rTNN, source="BUF_1", target="rTNN_1")
# network.add_connection(buf2_to_rTNN, source="BUF_2", target="rTNN_1")


# End of network creation

# Monitors:
spikes = {}
for l in network.layers:
	spikes[l] = Monitor(network.layers[l], ["s"], time=num_timesteps)
	network.add_monitor(spikes[l], name="%s_spikes" % l)


# Data and initial encoding:
dataset = MNIST(
	RampNoLeakTNNEncoder(time=num_timesteps, dt=1),
	None,
	root=os.path.join("..", "..", "data", "MNIST"),
	download=True,
	transform=transforms.Compose(
		[transforms.ToTensor(), transforms.Lambda(lambda x: x * intensity)]
	),
)


# Create a dataloader to iterate and batch data
Пример #28
0
)

mstdp_1 = Connection(
    source=inpt_mstdp, target=hiddn_mstdp, update_rule=MSTDP, wmin=w_min_1, wmax=w_max_1, nu=gamma_mstdp
)
mstdp_2 = Connection(
    source=hiddn_mstdp, target=outpt_mstdp, update_rule=MSTDP, wmin=w_min_2, wmax=w_max_2, nu=gamma_mstdp
)

network_mstdp = Network(dt=dt)
network_mstdp.add_layer(name='Input', layer=inpt_mstdp)
network_mstdp.add_layer(name='Hidden', layer=hiddn_mstdp)
network_mstdp.add_layer(name='Output', layer=outpt_mstdp)
network_mstdp.add_connection(source='Input', target='Hidden', connection=mstdp_1)
network_mstdp.add_connection(source='Hidden', target='Output', connection=mstdp_2)
network_mstdp.add_monitor(name='In', monitor=Monitor(obj=inpt_mstdp, state_vars=['s'], time=100))
network_mstdp.add_monitor(name='Hid', monitor=Monitor(obj=hiddn_mstdp, state_vars=['s', 'v'], time=100))

# MSTDPET
torch.manual_seed(seed)
np.random.seed(seed)

inpt_mstdpet = Input(n_in)
hiddn_mstdpet = LIFNodes(
    n_hidden, thresh=thresh_lif, rest=rest_lif, reset=reset_lif, tc_decay=tau_lif, refrac=refrac_lif
)
outpt_mstdpet = LIFNodes(
    n_out, thresh=thresh_lif, rest=rest_lif, reset=reset_lif, tc_decay=tau_lif, refrac=refrac_lif
)

mstdpet_1 = Connection(
Пример #29
0
            for i in range(conv_size):
                for j in range(conv_size):
                    w[fltr1, i, j, fltr2, i, j] = -100.0

w = w.view(n_filters * conv_size * conv_size,
           n_filters * conv_size * conv_size)
recurrent_conn = Connection(conv_layer, conv_layer, w=w)

network.add_layer(input_layer, name="X")
network.add_layer(conv_layer, name="Y")
network.add_connection(conv_conn, source="X", target="Y")
network.add_connection(recurrent_conn, source="Y", target="Y")

# Voltage recording for excitatory and inhibitory layers.
voltage_monitor = Monitor(network.layers["Y"], ["v"], time=time)
network.add_monitor(voltage_monitor, name="output_voltage")

if gpu:
    network.to("cuda")

# Load MNIST data.
train_dataset = MNIST(
    PoissonEncoder(time=time, dt=dt),
    None,
    "../../data/MNIST",
    download=True,
    train=True,
    transform=transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x * intensity)]),
)
Пример #30
0
network = Network()

X = Input(n=100)
Y = LIFNodes(n=100)

C = MeanFieldConnection(source=X, target=Y, norm=100.0)

M_X = Monitor(X, state_vars=['s'])
M_Y = Monitor(Y, state_vars=['s', 'v'])
M_C = Monitor(C, state_vars=['w'])

network.add_layer(X, name='X')
network.add_layer(Y, name='Y')
network.add_connection(C, source='X', target='Y')
network.add_monitor(M_X, 'M_X')
network.add_monitor(M_Y, 'M_Y')
network.add_monitor(M_C, 'M_C')

spikes = torch.bernoulli(torch.rand(1000, 100))
inpts = {'X': spikes}

network.run(inpts=inpts, time=1000)

spikes = {'X': M_X.get('s'), 'Y': M_Y.get('s')}
weights = M_C.get('w')

plt.ioff()
plot_spikes(spikes)
plot_weights(weights)
plt.show()