def test_add_objects(self): network = Network(dt=1.0, learning=False) inpt = Input(100) network.add_layer(inpt, name='X') lif = LIFNodes(50) network.add_layer(lif, name='Y') assert inpt == network.layers['X'] assert lif == network.layers['Y'] conn = Connection(inpt, lif) network.add_connection(conn, source='X', target='Y') assert conn == network.connections[('X', 'Y')] monitor = Monitor(lif, state_vars=['s', 'v']) network.add_monitor(monitor, 'Y') assert monitor == network.monitors['Y'] network.save('net.pt') _network = load('net.pt', learning=True) assert _network.learning assert 'X' in _network.layers assert 'Y' in _network.layers assert ('X', 'Y') in _network.connections assert 'Y' in _network.monitors del _network os.remove('net.pt')
def lif_feed_forward_benchmark(parameters: BenchmarkParameters): T = parameters.dt * parameters.sequence_length network = Network(batch_size=parameters.batch_size, dt=parameters.dt) network.add_layer(Input(n=parameters.features), name="Input") network.add_layer(LIFNodes(n=parameters.features), name="Neurons") network.add_connection( Connection(source=network.layers["Input"], target=network.layers["Neurons"]), source="Input", target="Neurons", ) input_spikes = ( PoissonEncoder(time=T, dt=parameters.dt)( 0.3 * torch.ones(parameters.batch_size, parameters.features) ) .to(parameters.device) .float() ) input_spikes.requires_grad = False input_data = {"Input": input_spikes} network.to(parameters.device) for param in network.parameters(): param.requires_grad = False start = time.time() network.run(inputs=input_data, time=T) end = time.time() duration = end - start return duration
def test_add_objects(self): network = Network(dt=1.0, learning=False) inpt = Input(100) network.add_layer(inpt, name="X") lif = LIFNodes(50) network.add_layer(lif, name="Y") assert inpt == network.layers["X"] assert lif == network.layers["Y"] conn = Connection(inpt, lif) network.add_connection(conn, source="X", target="Y") assert conn == network.connections[("X", "Y")] monitor = Monitor(lif, state_vars=["s", "v"]) network.add_monitor(monitor, "Y") assert monitor == network.monitors["Y"] network.save("net.pt") _network = load("net.pt", learning=True) assert _network.learning assert "X" in _network.layers assert "Y" in _network.layers assert ("X", "Y") in _network.connections assert "Y" in _network.monitors del _network os.remove("net.pt")
def test_post_pre(self): # Connection test network = Network(dt=1.0) network.add_layer(Input(n=100, traces=True), name='input') network.add_layer(LIFNodes(n=100, traces=True), name='output') network.add_connection(Connection(source=network.layers['input'], target=network.layers['output'], nu=1e-2, update_rule=PostPre), source='input', target='output') network.run( inpts={'input': torch.bernoulli(torch.rand(250, 100)).byte()}, time=250) # Conv2dConnection test network = Network(dt=1.0) network.add_layer(Input(shape=[1, 1, 10, 10], traces=True), name='input') network.add_layer(LIFNodes(shape=[1, 32, 8, 8], traces=True), name='output') network.add_connection(Conv2dConnection( source=network.layers['input'], target=network.layers['output'], kernel_size=3, stride=1, nu=1e-2, update_rule=PostPre), source='input', target='output') network.run(inpts={ 'input': torch.bernoulli(torch.rand(250, 1, 1, 10, 10)).byte() }, time=250)
def create_bindsnet(input_width, input_height, action_num=3): from bindsnet.network import Network from bindsnet.learning import MSTDP from bindsnet.network.nodes import Input, LIFNodes from bindsnet.network.topology import Connection network = Network(dt=1.0) # Layers of neurons. inpt = Input(n=input_height * input_width, shape=[input_height, input_width], traces=True) middle = LIFNodes(n=100, traces=True) out = LIFNodes(n=action_num, refrac=0, traces=True) # Connections between layers. inpt_middle = Connection(source=inpt, target=middle, wmin=0, wmax=1e-1) middle_out = Connection(source=middle, target=out, wmin=0, wmax=1, update_rule=MSTDP, nu=1e-1, norm=0.5 * middle.n) # Add all layers and connections to the network. network.add_layer(inpt, name='Input Layer') network.add_layer(middle, name='Hidden Layer') network.add_layer(out, name='Output Layer') network.add_connection(inpt_middle, source='Input Layer', target='Hidden Layer') network.add_connection(middle_out, source='Hidden Layer', target='Output Layer') return network
def test_weight_dependent_post_pre(self): # Connection test network = Network(dt=1.0) network.add_layer(Input(n=100, traces=True), name="input") network.add_layer(LIFNodes(n=100, traces=True), name="output") network.add_connection( Connection( source=network.layers["input"], target=network.layers["output"], nu=1e-2, update_rule=WeightDependentPostPre, wmin=-1, wmax=1, ), source="input", target="output", ) network.run( inputs={"input": torch.bernoulli(torch.rand(250, 100)).byte()}, time=250, ) # Conv2dConnection test network = Network(dt=1.0) network.add_layer(Input(shape=[1, 10, 10], traces=True), name="input") network.add_layer( LIFNodes(shape=[32, 8, 8], traces=True), name="output" ) network.add_connection( Conv2dConnection( source=network.layers["input"], target=network.layers["output"], kernel_size=3, stride=1, nu=1e-2, update_rule=WeightDependentPostPre, wmin=-1, wmax=1, ), source="input", target="output", ) network.run( inputs={ "input": torch.bernoulli(torch.rand(250, 1, 1, 10, 10)).byte() }, time=250, )
def test_rmax(self): # Connection test network = Network(dt=1.0) network.add_layer(Input(n=100, traces=True, traces_additive=True), name='input') network.add_layer(SRM0Nodes(n=100), name='output') network.add_connection(Connection(source=network.layers['input'], target=network.layers['output'], nu=1e-2, update_rule=Rmax), source='input', target='output') network.run( inpts={'input': torch.bernoulli(torch.rand(250, 100)).byte()}, time=250, reward=1.)
def test_add_objects(self): network = Network(dt=1.0) inpt = Input(100); network.add_layer(inpt, name='X') lif = LIFNodes(50); network.add_layer(lif, name='Y') assert inpt == network.layers['X'] assert lif == network.layers['Y'] conn = Connection(inpt, lif); network.add_connection(conn, source='X', target='Y') assert conn == network.connections[('X', 'Y')] monitor = Monitor(lif, state_vars=['s', 'v']); network.add_monitor(monitor, 'Y') assert monitor == network.monitors['Y']
def test_mstdpet(self): # Connection test network = Network(dt=1.0) network.add_layer(Input(n=100), name="input") network.add_layer(LIFNodes(n=100), name="output") network.add_connection( Connection( source=network.layers["input"], target=network.layers["output"], nu=1e-2, update_rule=MSTDPET, ), source="input", target="output", ) network.run( inputs={"input": torch.bernoulli(torch.rand(250, 100)).byte()}, time=250, reward=1.0, ) # Conv2dConnection test network = Network(dt=1.0) network.add_layer(Input(shape=[1, 10, 10]), name="input") network.add_layer(LIFNodes(shape=[32, 8, 8]), name="output") network.add_connection( Conv2dConnection( source=network.layers["input"], target=network.layers["output"], kernel_size=3, stride=1, nu=1e-2, update_rule=MSTDPET, ), source="input", target="output", ) network.run( inputs={ "input": torch.bernoulli(torch.rand(250, 1, 1, 10, 10)).byte() }, time=250, reward=1.0, )
def test_hebbian(self): # Connection test network = Network(dt=1.0) network.add_layer(Input(n=100, traces=True), name="input") network.add_layer(LIFNodes(n=100, traces=True), name="output") network.add_connection( Connection( source=network.layers["input"], target=network.layers["output"], nu=1e-2, update_rule=Hebbian, ), source="input", target="output", ) network.run( inputs={"input": torch.bernoulli(torch.rand(250, 100)).byte()}, time=250, ) # Conv2dConnection test network = Network(dt=1.0) network.add_layer(Input(shape=[1, 10, 10], traces=True), name="input") network.add_layer( LIFNodes(shape=[32, 8, 8], traces=True), name="output" ) network.add_connection( Conv2dConnection( source=network.layers["input"], target=network.layers["output"], kernel_size=3, stride=1, nu=1e-2, update_rule=Hebbian, ), source="input", target="output", ) # shape is [time, batch, channels, height, width] network.run( inputs={ "input": torch.bernoulli(torch.rand(250, 1, 1, 10, 10)).byte() }, time=250, )
def BindsNET_cpu(n_neurons, time): t0 = t() torch.set_default_tensor_type('torch.FloatTensor') t1 = t() network = Network() network.add_layer(Input(n=n_neurons), name='X') network.add_layer(LIFNodes(n=n_neurons), name='Y') network.add_connection(Connection(source=network.layers['X'], target=network.layers['Y']), source='X', target='Y') data = {'X': poisson(datum=torch.rand(n_neurons), time=time)} network.run(inpts=data, time=time) return t() - t0, t() - t1
def BindsNET_cpu(n_neurons, time): t0 = t() torch.set_default_tensor_type("torch.FloatTensor") t1 = t() network = Network() network.add_layer(Input(n=n_neurons), name="X") network.add_layer(LIFNodes(n=n_neurons), name="Y") network.add_connection( Connection(source=network.layers["X"], target=network.layers["Y"]), source="X", target="Y", ) data = {"X": poisson(datum=torch.rand(n_neurons), time=time)} network.run(inputs=data, time=time) return t() - t0, t() - t1
def __init__(self, parameters: BenchmarkParameters): super(BindsNetModule, self).__init__() network = Network(batch_size=parameters.batch_size, dt=parameters.dt) lif_nodes = LIFNodes(n=parameters.features) monitor = Monitor(obj=lif_nodes, state_vars=("s"), time=parameters.sequence_length) network.add_layer(Input(n=parameters.features), name="Input") network.add_layer(lif_nodes, name="Neurons") network.add_connection( Connection(source=network.layers["Input"], target=network.layers["Neurons"]), source="Input", target="Neurons", ) network.add_monitor(monitor, "Monitor") network.to(parameters.device) self.parameters = parameters self.network = network self.monitor = monitor
def main(n_input=1, n_output=10, time=1000): # Network building. network = Network(dt=1.0) input_layer = RealInput(n=n_input) output_layer = LIFNodes(n=n_output) connection = Connection(source=input_layer, target=output_layer) monitor = Monitor(obj=output_layer, state_vars=('v', ), time=time) # Adding network components. network.add_layer(input_layer, name='X') network.add_layer(output_layer, name='Y') network.add_connection(connection, source='X', target='Y') network.add_monitor(monitor, name='X_monitor') # Creating real-valued inputs and running simulation. inpts = {'X': torch.ones(time, n_input)} network.run(inpts=inpts, time=time) # Plot voltage activity. plt.plot(monitor.get('v').numpy().T) plt.show()
from bindsnet.network.monitors import Monitor from bindsnet.network.nodes import LIFNodes from bindsnet.network.topology import Connection from bindsnet.utils import get_square_weights network = Network(dt=1.0) inpt = Input(784, shape=(28, 28)) network.add_layer(inpt, name="I") output = LIFNodes(625, thresh=-52 + torch.randn(625)) network.add_layer(output, name="O") C1 = Connection(source=inpt, target=output, w=torch.randn(inpt.n, output.n)) C2 = Connection(source=output, target=output, w=0.5 * torch.randn(output.n, output.n)) network.add_connection(C1, source="I", target="O") network.add_connection(C2, source="O", target="O") spikes = {} for l in network.layers: spikes[l] = Monitor(network.layers[l], ["s"], time=250) network.add_monitor(spikes[l], name="%s_spikes" % l) voltages = {"O": Monitor(network.layers["O"], ["v"], time=250)} network.add_monitor(voltages["O"], name="O_voltages") # Get MNIST training images and labels. images, labels = MNIST(path="../../data/MNIST", download=True).get_train() images *= 0.25 # Create lazily iterating Poisson-distributed data loader.
def main(args): if args.gpu: torch.cuda.manual_seed_all(args.seed) else: torch.manual_seed(args.seed) conv_size = int( (28 - args.kernel_size + 2 * args.padding) / args.stride) + 1 # Build network. network = Network() input_layer = Input(n=784, shape=(1, 28, 28), traces=True) conv_layer = DiehlAndCookNodes( n=args.n_filters * conv_size * conv_size, shape=(args.n_filters, conv_size, conv_size), traces=True, ) conv_conn = Conv2dConnection( input_layer, conv_layer, kernel_size=args.kernel_size, stride=args.stride, update_rule=PostPre, norm=0.4 * args.kernel_size**2, nu=[0, args.lr], reduction=max_without_indices, wmax=1.0, ) w = torch.zeros(args.n_filters, conv_size, conv_size, args.n_filters, conv_size, conv_size) for fltr1 in range(args.n_filters): for fltr2 in range(args.n_filters): if fltr1 != fltr2: for i in range(conv_size): for j in range(conv_size): w[fltr1, i, j, fltr2, i, j] = -100.0 w = w.view(args.n_filters * conv_size * conv_size, args.n_filters * conv_size * conv_size) recurrent_conn = Connection(conv_layer, conv_layer, w=w) network.add_layer(input_layer, name="X") network.add_layer(conv_layer, name="Y") network.add_connection(conv_conn, source="X", target="Y") network.add_connection(recurrent_conn, source="Y", target="Y") # Voltage recording for excitatory and inhibitory layers. voltage_monitor = Monitor(network.layers["Y"], ["v"], time=args.time) network.add_monitor(voltage_monitor, name="output_voltage") if args.gpu: network.to("cuda") # Load MNIST data. train_dataset = MNIST( PoissonEncoder(time=args.time, dt=args.dt), None, os.path.join(ROOT_DIR, "data", "MNIST"), download=True, train=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Lambda(lambda x: x * args.intensity) ]), ) spikes = {} for layer in set(network.layers): spikes[layer] = Monitor(network.layers[layer], state_vars=["s"], time=args.time) network.add_monitor(spikes[layer], name="%s_spikes" % layer) voltages = {} for layer in set(network.layers) - {"X"}: voltages[layer] = Monitor(network.layers[layer], state_vars=["v"], time=args.time) network.add_monitor(voltages[layer], name="%s_voltages" % layer) # Train the network. print("Begin training.\n") start = time() weights_im = None for epoch in range(args.n_epochs): if epoch % args.progress_interval == 0: print("Progress: %d / %d (%.4f seconds)" % (epoch, args.n_epochs, time() - start)) start = time() train_dataloader = DataLoader( train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=args.gpu, ) for step, batch in enumerate(tqdm(train_dataloader)): # Get next input sample. inpts = {"X": batch["encoded_image"]} if args.gpu: inpts = {k: v.cuda() for k, v in inpts.items()} # Run the network on the input. network.run(inpts=inpts, time=args.time, input_time_dim=0) # Decay learning rate. network.connections["X", "Y"].nu[1] *= 0.99 # Optionally plot various simulation information. if args.plot: weights = conv_conn.w weights_im = plot_conv2d_weights(weights, im=weights_im) plt.pause(1e-8) network.reset_() # Reset state variables. print("Progress: %d / %d (%.4f seconds)\n" % (args.n_epochs, args.n_epochs, time() - start)) print("Training complete.\n")
target=tnn_layer_1, w=0 * torch.rand(tnn_layer_1.n, tnn_layer_1.n), update_rule=TNN_STDP, ucapture=10 / 128, uminus=10 / 128, usearch=2 / 128, ubackoff=96 / 128, umin=4 / 128, timesteps=num_timesteps, maxweight=max_weight) network.add_layer(input_layer, name="I") network.add_layer(tnn_layer_1, name="TNN_1") network.add_layer(buffer_layer_1, name="BUF") # network.add_connection(C2, source="TNN_1", target="TNN_1") network.add_connection(buf_to_TNN, source="BUF", target="TNN_1") network.add_connection(TNN_to_buf, source="TNN_1", target="BUF") network.add_connection(C1, source="I", target="TNN_1") spikes = {} for l in network.layers: spikes[l] = Monitor(network.layers[l], ["s"], time=num_timesteps) network.add_monitor(spikes[l], name="%s_spikes" % l) dataset = MNIST( RampNoLeakTNNEncoder(time=num_timesteps, dt=1), None, root=os.path.join("..", "..", "data", "MNIST"), download=True, transform=transforms.Compose( [transforms.ToTensor(),
# Spike recordings for all layers. spikes = {} for layer in layers: spikes[layer] = Monitor(layers[layer], ['s'], time=plot_interval) # Voltage recordings for excitatory and readout layers. voltages = {} for layer in set(layers.keys()) - {'X'}: voltages[layer] = Monitor(layers[layer], ['v'], time=plot_interval) # Add all layers and connections to the network. for layer in layers: network.add_layer(layers[layer], name=layer) network.add_connection(input_exc_conn, source='X', target='E') network.add_connection(exc_readout_conn, source='E', target='R') # Add all monitors to the network. for layer in layers: network.add_monitor(spikes[layer], name='%s_spikes' % layer) if layer in voltages: network.add_monitor(voltages[layer], name='%s_voltages' % layer) # Load SpaceInvaders environment. environment = GymEnvironment('Asteroids-v0') environment.reset() pipeline = Pipeline(network, environment, encoding=bernoulli, time=1, history=5, delta=10, plot_interval=plot_interval, print_interval=print_interval, render_interval=render_interval, action_function=select_multinomial,
def toLIF(network: Network): new_network = Network(dt=1, learning=True) input_layer = Input(n=network.X.n, shape=network.X.shape, traces=True, tc_trace=network.X.tc_trace.item()) exc_layer = LIFNodes( n=network.Ae.n, traces=True, rest=network.Ai.rest.item(), reset=network.Ai.reset.item(), thresh=network.Ai.thresh.item(), refrac=network.Ai.refrac.item(), tc_decay=network.Ai.tc_decay.item(), ) inh_layer = LIFNodes( n=network.Ai.n, traces=False, rest=network.Ai.rest.item(), reset=network.Ai.reset.item(), thresh=network.Ai.thresh.item(), tc_decay=network.Ai.tc_decay.item(), refrac=network.Ai.refrac.item(), ) # Connections w = network.X_to_Ae.w input_exc_conn = Connection( source=input_layer, target=exc_layer, w=w, update_rule=PostPre, nu=network.X_to_Ae.nu, reduction=network.X_to_Ae.reduction, wmin=network.X_to_Ae.wmin, wmax=network.X_to_Ae.wmax, norm=network.X_to_Ae.norm * 1, ) w = network.Ae_to_Ai.w exc_inh_conn = Connection(source=exc_layer, target=inh_layer, w=w, wmin=network.Ae_to_Ai.wmin, wmax=network.Ae_to_Ai.wmax) w = network.Ai_to_Ae.w inh_exc_conn = Connection(source=inh_layer, target=exc_layer, w=w, wmin=network.Ai_to_Ae.wmin, wmax=network.Ai_to_Ae.wmax) # Add to network new_network.add_layer(input_layer, name="X") new_network.add_layer(exc_layer, name="Ae") new_network.add_layer(inh_layer, name="Ai") new_network.add_connection(input_exc_conn, source="X", target="Ae") new_network.add_connection(exc_inh_conn, source="Ae", target="Ai") new_network.add_connection(inh_exc_conn, source="Ai", target="Ae") exc_voltage_monitor = Monitor(new_network.layers["Ae"], ["v"], time=500) inh_voltage_monitor = Monitor(new_network.layers["Ai"], ["v"], time=500) new_network.add_monitor(exc_voltage_monitor, name="exc_voltage") new_network.add_monitor(inh_voltage_monitor, name="inh_voltage") spikes = {} for layer in set(network.layers): spikes[layer] = Monitor(new_network.layers[layer], state_vars=["s"], time=time) new_network.add_monitor(spikes[layer], name="%s_spikes" % layer) return new_network
out = LIFNodes(n=60, refrac=0, traces=True, thresh=-40.0) # Connections between layers. inpt_middle = Connection(source=inpt, target=middle, wmax=1e-2) middle_out = Connection(source=middle, target=out, wmax=0.5, update_rule=m_stdp_et, nu=2e-2, norm=0.15 * middle.n) # Add all layers and connections to the network. network.add_layer(inpt, name='X') network.add_layer(middle, name='Y') network.add_layer(out, name='Z') network.add_connection(inpt_middle, source='X', target='Y') network.add_connection(middle_out, source='Y', target='Z') # Load SpaceInvaders environment. environment = GymEnvironment('SpaceInvaders-v0') environment.reset() # Build pipeline from specified components. pipeline = Pipeline(network, environment, encoding=bernoulli, feedback=select_multinomial, output='Z', time=1, history_length=2, delta=4,
def ann_to_snn(ann: Union[nn.Module, str], input_shape: Sequence[int], data: Optional[torch.Tensor] = None, percentile: float = 99.9, node_type: Optional[nodes.Nodes] = SubtractiveResetIFNodes, **kwargs) -> Network: # language=rst """ Converts an artificial neural network (ANN) written as a ``torch.nn.Module`` into a near-equivalent spiking neural network. :param ann: Artificial neural network implemented in PyTorch. Accepts either ``torch.nn.Module`` or path to network saved using ``torch.save()``. :param input_shape: Shape of input data. :param data: Data to use to perform data-based weight normalization of shape ``[n_examples, ...]``. :param percentile: Percentile (in ``[0, 100]``) of activations to scale by in data-based normalization scheme. :return: Spiking neural network implemented in PyTorch. """ if isinstance(ann, str): ann = torch.load(ann) else: ann = deepcopy(ann) assert isinstance(ann, nn.Module) if data is None: import warnings warnings.warn("Data is None. Weights will not be scaled.", RuntimeWarning) else: ann = data_based_normalization(ann=ann, data=data.detach(), percentile=percentile) snn = Network() input_layer = nodes.RealInput(shape=input_shape) snn.add_layer(input_layer, name="Input") children = [] for c in ann.children(): if isinstance(c, nn.Sequential): for c2 in list(c.children()): children.append(c2) else: children.append(c) i = 0 prev = input_layer while i < len(children) - 1: current, nxt = children[i:i + 2] layer, connection = _ann_to_snn_helper(prev, current, node_type, **kwargs) i += 1 if layer is None or connection is None: continue snn.add_layer(layer, name=str(i)) snn.add_connection(connection, source=str(i - 1), target=str(i)) prev = layer current = children[-1] layer, connection = _ann_to_snn_helper(prev, current, node_type, **kwargs) i += 1 if layer is not None or connection is not None: snn.add_layer(layer, name=str(i)) snn.add_connection(connection, source=str(i - 1), target=str(i)) return snn
# w_recur[torch.rand(rtnn_layer_1.n, rtnn_layer_1.n) < 0.90] = 0 # buf1_to_rTNN = Connection( # source=buffer_layer_1, # target=rtnn_layer_1, # w = w_recur, # timesteps = num_timesteps, # update_rule=None ) # Add all nodes to network: network.add_layer(input_layer_a, name="I_a") network.add_layer(rtnn_layer_1, name="rTNN_1") # network.add_layer(buffer_layer_1, name="BUF_1") # Add connections to network: # (feedforward) network.add_connection(FF1a, source="I_a", target="rTNN_1") # (Recurrences) # network.add_connection(rTNN_to_buf1, source="rTNN_1", target="BUF_1") # network.add_connection(buf1_to_rTNN, source="BUF_1", target="rTNN_1") # End of network creation # Monitors: spikes = {} for l in network.layers: spikes[l] = Monitor(network.layers[l], ["s"], time=num_timesteps) network.add_monitor(spikes[l], name="%s_spikes" % l) # Data and initial encoding: dataset = MNIST( RampNoLeakTNNEncoder(time=num_timesteps, dt=1),
w=-0.1 * torch.ones(PK.n, DCN.n)) PK_DCN_Anti = Connection(source=PK_Anti, target=DCN_Anti, w_max=0, w=-0.1 * torch.ones(PK_Anti.n, DCN_Anti.n)) network.add_layer(layer=GR_Joint_layer, name="GR_Joint_layer") network.add_layer(layer=PK, name="PK") network.add_layer(layer=PK_Anti, name="PK_Anti") network.add_layer(layer=IO, name="IO") network.add_layer(layer=IO_Anti, name="IO_Anti") network.add_layer(layer=DCN, name="DCN") network.add_layer(layer=DCN_Anti, name="DCN_Anti") network.add_connection(connection=Parallelfiber, source="GR_Joint_layer", target="PK") network.add_connection(connection=Parallelfiber_Anti, source="GR_Joint_layer", target="PK_Anti") network.add_connection(connection=Climbingfiber, source="IO", target="PK") network.add_connection(connection=Climbingfiber_Anti, source="IO_Anti", target="PK_Anti") network.add_connection(connection=PK_DCN, source="PK", target="DCN") network.add_connection(connection=PK_DCN_Anti, source="PK_Anti", target="DCN_Anti") GR_monitor = Monitor(obj=GR_Joint_layer, state_vars=("s"), time=time) PK_monitor = Monitor(obj=PK, state_vars=("s", "v"), time=time)
w = 0.5 * max_weight * torch.rand(rtnn_layer_1.n, rtnn_layer_1.n), timesteps = num_timesteps, update_rule=TNN_STDP, **stdp_rtnn_params ) # Add all nodes to network: network.add_layer(input_layer_a, name="I_a") network.add_layer(input_layer_b, name="I_b") network.add_layer(tnn_layer_1a, name="TNN_1a") network.add_layer(tnn_layer_1b, name="TNN_1b") network.add_layer(buffer_layer_1, name="BUF_1") # network.add_layer(buffer_layer_2, name="BUF_2") network.add_layer(rtnn_layer_1, name="rTNN_1") # Add connections to network: # (feedforward) network.add_connection(FF1a, source="I_a", target="TNN_1a") network.add_connection(FF1b, source="I_b", target="TNN_1b") network.add_connection(FF2a, source="TNN_1a", target="rTNN_1") network.add_connection(FF2b, source="TNN_1b", target="rTNN_1") # (Recurrences) network.add_connection(rTNN_to_buf1, source="rTNN_1", target="BUF_1") # network.add_connection(buf1_to_buf2, source="BUF_1", target="BUF_2") network.add_connection(buf1_to_rTNN, source="BUF_1", target="rTNN_1") # network.add_connection(buf2_to_rTNN, source="BUF_2", target="rTNN_1") # End of network creation # Monitors: spikes = {} for l in network.layers:
inpt_middle = Connection(source=inpt, target=middle, wmin=0, wmax=1e-1) middle_out = Connection( source=middle, target=out, wmin=0, wmax=1, update_rule=MSTDP, nu=1e-1, norm=0.5 * middle.n, ) # Add all layers and connections to the network. network.add_layer(inpt, name="Input Layer") network.add_layer(middle, name="Hidden Layer") network.add_layer(out, name="Output Layer") network.add_connection(inpt_middle, source="Input Layer", target="Hidden Layer") network.add_connection(middle_out, source="Hidden Layer", target="Output Layer") # Load the Breakout environment. environment = GymEnvironment("BreakoutDeterministic-v4") environment.reset() # Build pipeline from specified components. environment_pipeline = EnvironmentPipeline( network, environment, encoding=bernoulli, action_function=select_softmax, output="Output Layer", time=100, history_length=1,
w = torch.zeros(n_filters, conv_size, conv_size, n_filters, conv_size, conv_size) for fltr1 in range(n_filters): for fltr2 in range(n_filters): if fltr1 != fltr2: for i in range(conv_size): for j in range(conv_size): w[fltr1, i, j, fltr2, i, j] = -100.0 w = w.view(n_filters * conv_size * conv_size, n_filters * conv_size * conv_size) recurrent_conn = Connection(conv_layer, conv_layer, w=w) network.add_layer(input_layer, name="X") network.add_layer(conv_layer, name="Y") network.add_connection(conv_conn, source="X", target="Y") network.add_connection(recurrent_conn, source="Y", target="Y") # Voltage recording for excitatory and inhibitory layers. voltage_monitor = Monitor(network.layers["Y"], ["v"], time=time) network.add_monitor(voltage_monitor, name="output_voltage") if gpu: network.to("cuda") # Load MNIST data. train_dataset = MNIST( PoissonEncoder(time=time, dt=dt), None, "../../data/MNIST", download=True,
def main(seed=0, n_train=60000, n_test=10000, kernel_size=(16, ), stride=(4, ), n_filters=25, padding=0, inhib=100, time=25, lr=1e-3, lr_decay=0.99, dt=1, intensity=1, progress_interval=10, update_interval=250, plot=False, train=True, gpu=False): assert n_train % update_interval == 0 and n_test % update_interval == 0, \ 'No. examples must be divisible by update_interval' params = [ seed, n_train, kernel_size, stride, n_filters, padding, inhib, time, lr, lr_decay, dt, intensity, update_interval ] model_name = '_'.join([str(x) for x in params]) if not train: test_params = [ seed, n_train, n_test, kernel_size, stride, n_filters, padding, inhib, time, lr, lr_decay, dt, intensity, update_interval ] np.random.seed(seed) if gpu: torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) n_examples = n_train if train else n_test input_shape = [20, 20] if kernel_size == input_shape: conv_size = [1, 1] else: conv_size = (int((input_shape[0] - kernel_size[0]) / stride[0]) + 1, int((input_shape[1] - kernel_size[1]) / stride[1]) + 1) n_classes = 10 n_neurons = n_filters * np.prod(conv_size) total_kernel_size = int(np.prod(kernel_size)) total_conv_size = int(np.prod(conv_size)) # Build network. if train: network = Network() input_layer = Input(n=400, shape=(1, 1, 20, 20), traces=True) conv_layer = DiehlAndCookNodes(n=n_filters * total_conv_size, shape=(1, n_filters, *conv_size), thresh=-64.0, traces=True, theta_plus=0.05 * (kernel_size[0] / 20), refrac=0) conv_layer2 = LIFNodes(n=n_filters * total_conv_size, shape=(1, n_filters, *conv_size), refrac=0) conv_conn = Conv2dConnection(input_layer, conv_layer, kernel_size=kernel_size, stride=stride, update_rule=WeightDependentPostPre, norm=0.05 * total_kernel_size, nu=[0, lr], wmin=0, wmax=0.25) conv_conn2 = Conv2dConnection(input_layer, conv_layer2, w=conv_conn.w, kernel_size=kernel_size, stride=stride, update_rule=None, wmax=0.25) w = -inhib * torch.ones(n_filters, conv_size[0], conv_size[1], n_filters, conv_size[0], conv_size[1]) for f in range(n_filters): for f2 in range(n_filters): if f != f2: w[f, :, :f2, :, :] = 0 w = w.view(n_filters * conv_size[0] * conv_size[1], n_filters * conv_size[0] * conv_size[1]) recurrent_conn = Connection(conv_layer, conv_layer, w=w) network.add_layer(input_layer, name='X') network.add_layer(conv_layer, name='Y') network.add_layer(conv_layer2, name='Y_') network.add_connection(conv_conn, source='X', target='Y') network.add_connection(conv_conn2, source='X', target='Y_') network.add_connection(recurrent_conn, source='Y', target='Y') # Voltage recording for excitatory and inhibitory layers. voltage_monitor = Monitor(network.layers['Y'], ['v'], time=time) network.add_monitor(voltage_monitor, name='output_voltage') else: network = load_network(os.path.join(params_path, model_name + '.pt')) network.connections['X', 'Y'].update_rule = NoOp( connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu) network.layers['Y'].theta_decay = 0 network.layers['Y'].theta_plus = 0 # Load MNIST data. dataset = MNIST(data_path, download=True) if train: images, labels = dataset.get_train() else: images, labels = dataset.get_test() images *= intensity images = images[:, 4:-4, 4:-4].contiguous() # Record spikes during the simulation. spike_record = torch.zeros(update_interval, time, n_neurons) full_spike_record = torch.zeros(n_examples, n_neurons) # Neuron assignments and spike proportions. if train: logreg_model = LogisticRegression(warm_start=True, n_jobs=-1, solver='lbfgs', max_iter=1000, multi_class='multinomial') logreg_model.coef_ = np.zeros([n_classes, n_neurons]) logreg_model.intercept_ = np.zeros(n_classes) logreg_model.classes_ = np.arange(n_classes) else: path = os.path.join(params_path, '_'.join(['auxiliary', model_name]) + '.pt') logreg_coef, logreg_intercept = torch.load(open(path, 'rb')) logreg_model = LogisticRegression(warm_start=True, n_jobs=-1, solver='lbfgs', max_iter=1000, multi_class='multinomial') logreg_model.coef_ = logreg_coef logreg_model.intercept_ = logreg_intercept logreg_model.classes_ = np.arange(n_classes) # Sequence of accuracy estimates. curves = {'logreg': []} predictions = {scheme: torch.Tensor().long() for scheme in curves.keys()} if train: best_accuracy = 0 spikes = {} for layer in set(network.layers): spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=time) network.add_monitor(spikes[layer], name='%s_spikes' % layer) # Train the network. if train: print('\nBegin training.\n') else: print('\nBegin test.\n') inpt_ims = None inpt_axes = None spike_ims = None spike_axes = None weights_im = None plot_update_interval = 100 start = t() for i in range(n_examples): if i % progress_interval == 0: print('Progress: %d / %d (%.4f seconds)' % (i, n_examples, t() - start)) start = t() if i % update_interval == 0 and i > 0: if train: network.connections['X', 'Y'].update_rule.nu[1] *= lr_decay if i % len(labels) == 0: current_labels = labels[-update_interval:] current_record = full_spike_record[-update_interval:] else: current_labels = labels[i % len(labels) - update_interval:i % len(labels)] current_record = full_spike_record[i % len(labels) - update_interval:i % len(labels)] # Update and print accuracy evaluations. curves, preds = update_curves(curves, current_labels, n_classes, full_spike_record=current_record, logreg=logreg_model) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat( [predictions[scheme], preds[scheme]], -1) # Save accuracy curves to disk. to_write = ['train'] + params if train else ['test'] + params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save((curves, update_interval, n_examples), open(os.path.join(curves_path, f), 'wb')) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print( 'New best accuracy! Saving network parameters to disk.' ) # Save network to disk. network.save(os.path.join(params_path, model_name + '.pt')) path = os.path.join( params_path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((logreg_model.coef_, logreg_model.intercept_), open(path, 'wb')) best_accuracy = max([x[-1] for x in curves.values()]) # Refit logistic regression model. logreg_model = logreg_fit(full_spike_record[:i], labels[:i], logreg_model) print() # Get next input sample. image = images[i % len(images)] sample = bernoulli(datum=image, time=time, dt=dt, max_prob=1).unsqueeze(1).unsqueeze(1) inpts = {'X': sample} # Run the network on the input. network.run(inpts=inpts, time=time) network.connections['X', 'Y_'].w = network.connections['X', 'Y'].w # Add to spikes recording. spike_record[i % update_interval] = spikes['Y_'].get('s').view( time, -1) full_spike_record[i] = spikes['Y_'].get('s').view(time, -1).sum(0) # Optionally plot various simulation information. if plot and i % plot_update_interval == 0: _input = inpts['X'].view(time, 400).sum(0).view(20, 20) w = network.connections['X', 'Y'].w _spikes = { 'X': spikes['X'].get('s').view(400, time), 'Y': spikes['Y'].get('s').view(n_filters * total_conv_size, time), 'Y_': spikes['Y_'].get('s').view(n_filters * total_conv_size, time) } inpt_axes, inpt_ims = plot_input(image.view(20, 20), _input, label=labels[i % len(labels)], ims=inpt_ims, axes=inpt_axes) spike_ims, spike_axes = plot_spikes(spikes=_spikes, ims=spike_ims, axes=spike_axes) weights_im = plot_conv2d_weights( w, im=weights_im, wmax=network.connections['X', 'Y'].wmax) plt.pause(1e-2) network.reset_() # Reset state variables. print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)') i += 1 if i % len(labels) == 0: current_labels = labels[-update_interval:] current_record = full_spike_record[-update_interval:] else: current_labels = labels[i % len(labels) - update_interval:i % len(labels)] current_record = full_spike_record[i % len(labels) - update_interval:i % len(labels)] # Update and print accuracy evaluations. curves, preds = update_curves(curves, current_labels, n_classes, full_spike_record=current_record, logreg=logreg_model) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat([predictions[scheme], preds[scheme]], -1) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print('New best accuracy! Saving network parameters to disk.') # Save network to disk. network.save(os.path.join(params_path, model_name + '.pt')) path = os.path.join(params_path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((logreg_model.coef_, logreg_model.intercept_), open(path, 'wb')) if train: print('\nTraining complete.\n') else: print('\nTest complete.\n') print('Average accuracies:\n') for scheme in curves.keys(): print('\t%s: %.2f' % (scheme, float(np.mean(curves[scheme])))) # Save accuracy curves to disk. to_write = ['train'] + params if train else ['test'] + params to_write = [str(x) for x in to_write] f = '_'.join(to_write) + '.pt' torch.save((curves, update_interval, n_examples), open(os.path.join(curves_path, f), 'wb')) # Save results to disk. results = [np.mean(curves['logreg']), np.std(curves['logreg'])] to_write = params + results if train else test_params + results to_write = [str(x) for x in to_write] name = 'train.csv' if train else 'test.csv' if not os.path.isfile(os.path.join(results_path, name)): with open(os.path.join(results_path, name), 'w') as f: if train: columns = [ 'seed', 'n_train', 'kernel_size', 'stride', 'n_filters', 'padding', 'inhib', 'time', 'lr', 'lr_decay', 'dt', 'intensity', 'update_interval', 'mean_logreg', 'std_logreg' ] header = ','.join(columns) + '\n' f.write(header) else: columns = [ 'seed', 'n_train', 'n_test', 'kernel_size', 'stride', 'n_filters', 'padding', 'inhib', 'time', 'lr', 'lr_decay', 'dt', 'intensity', 'update_interval', 'mean_logreg', 'std_logreg' ] header = ','.join(columns) + '\n' f.write(header) with open(os.path.join(results_path, name), 'a') as f: f.write(','.join(to_write) + '\n') if labels.numel() > n_examples: labels = labels[:n_examples] else: while labels.numel() < n_examples: if 2 * labels.numel() > n_examples: labels = torch.cat( [labels, labels[:n_examples - labels.numel()]]) else: labels = torch.cat([labels, labels]) # Compute confusion matrices and save them to disk. confusions = {} for scheme in predictions: confusions[scheme] = confusion_matrix(labels, predictions[scheme]) to_write = ['train'] + params if train else ['test'] + test_params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save(confusions, os.path.join(confusion_path, f))
def Translate_Into_Networks(input_N, Shape, Output_N, Weight): network_list = [] path = "gene/" file_list = os.listdir(path) gene_file_check = [file for file in file_list if file.endswith(".txt")] if len(gene_file_check) == 0: import startup Gene_List = Genetic.Read_Gene() for i in range(len(Gene_List)): network = Network() Decoded_List = [] Decoded_DNA_List = [] for j in range(len(Gene_List[i])): Decoded_Gene = Gene_List[i][j].split('-') if (Decoded_Gene[3] == 'F'): pass else: if Decoded_Gene[1] == '~': Decoded_List.append( [int(Decoded_Gene[0]), int(Decoded_Gene[2]), 0]) elif Decoded_Gene[1] == '!': Decoded_List.append( [int(Decoded_Gene[0]), int(Decoded_Gene[2]), 1]) elif Decoded_Gene[1] == '@': Decoded_List.append( [int(Decoded_Gene[0]), int(Decoded_Gene[2]), 2]) elif Decoded_Gene[1] == '#': Decoded_List.append( [int(Decoded_Gene[0]), int(Decoded_Gene[2]), 3]) elif Decoded_Gene[1] == '$': Decoded_List.append( [int(Decoded_Gene[0]), int(Decoded_Gene[2]), 4]) else: print("THE GENOTYPE VALUE IS UNVALID") raise ValueError Decoded_DNA_List.append(Decoded_List) Decoded_RNA_List: list = Decoded_DNA_List.copy() for decoded_dna_idx, decoded_dna in enumerate(Decoded_DNA_List): Gene_NUM = len(decoded_dna) for k in range(Gene_NUM): a = Decoded_DNA_List[decoded_dna_idx][k] for l in range(k, Gene_NUM): b = Decoded_DNA_List[decoded_dna_idx][l] if a and b == 1: if decoded_dna[k][2] == 0: Decoded_RNA_List[decoded_dna_idx].remove( decoded_dna[l]) elif decoded_dna[k][2] == 1: if decoded_dna[l][2] < 1: Decoded_RNA_List[decoded_dna_idx].remove( decoded_dna[k]) else: Decoded_RNA_List[decoded_dna_idx].remove( decoded_dna[l]) elif decoded_dna[k][2] == 2: if decoded_dna[l][2] < 2: Decoded_RNA_List[decoded_dna_idx].remove( decoded_dna[k]) else: Decoded_RNA_List[decoded_dna_idx].remove( decoded_dna[l]) elif decoded_dna[k][2] == 3: if decoded_dna[l][2] < 3: Decoded_RNA_List[decoded_dna_idx].remove( decoded_dna[k]) else: Decoded_RNA_List[decoded_dna_idx].remove( decoded_dna[l]) elif decoded_dna[k][2] == 4: if decoded_dna[l][2] >= 4: Decoded_RNA_List[decoded_dna_idx].remove( decoded_dna[l]) else: Decoded_RNA_List[decoded_dna_idx].remove( decoded_dna[k]) else: pass else: pass for Decoded_RNA in Decoded_RNA_List: layer_list = {} for m in range(len(Decoded_RNA)): for n in range(m, len(Decoded_RNA)): if Decoded_RNA[m][1] == Decoded_RNA[n][0]: if Decoded_RNA[n][2] == 0: layer_list[Decoded_RNA[m][0]] = nodes.IFNodes( n=1, traces=True) elif Decoded_RNA[n][2] == 1: layer_list[Decoded_RNA[m][0]] = nodes.LIFNodes( n=1, traces=True) elif Decoded_RNA[n][2] == 2: layer_list[Decoded_RNA[m] [0]] = nodes.McCullochPitts(n=1, traces=True) elif Decoded_RNA[n][2] == 3: layer_list[Decoded_RNA[m] [0]] = nodes.IzhikevichNodes( n=1, traces=True) elif Decoded_RNA[n][2] == 4: layer_list[Decoded_RNA[m][0]] = nodes.SRM0Nodes( n=1, traces=True) else: print("UNVALID GENO_NEURON CODE") raise ValueError elif n == len(Decoded_List) - 1: layer_list[Decoded_RNA[m][1]] = nodes.LIFNodes(n=1) for l in range(len(Decoded_RNA)): if not Decoded_RNA[l][0] in layer_list: if Decoded_RNA[l][2] == 0: layer_list[Decoded_RNA[l][0]] = nodes.IFNodes( n=1, traces=True) elif Decoded_RNA[l][2] == 1: layer_list[Decoded_RNA[l][0]] = nodes.LIFNodes( n=1, traces=True) elif Decoded_RNA[l][2] == 2: layer_list[Decoded_RNA[l][0]] = nodes.McCullochPitts( n=1, traces=True) elif Decoded_RNA[l][2] == 3: layer_list[Decoded_RNA[l][0]] = nodes.IzhikevichNodes( n=1, traces=True) elif Decoded_RNA[l][2] == 4: layer_list[Decoded_RNA[l][0]] = nodes.SRM0Nodes( n=1, traces=True) Input_Layer = nodes.Input(n=input_N, shape=Shape, traces=True) out = nodes.LIFNodes(n=Output_N, refrac=0, traces=True) network.add_layer(layer=Input_Layer, name="Input Layer") for key_l in list(layer_list.keys()): network.add_layer(layer=layer_list[key_l], name=str(key_l)) network.add_layer(layer=out, name="Output Layer") if len(layer_list.keys()) == 0: layer = nodes.LIFNodes(n=1, traces=True) network.add_layer(layer=layer, name="mid layer") inpt_connection = Connection(source=Input_Layer, target=layer, w=Weight * torch.ones(input_N)) opt_connection = Connection(source=layer, target=out, w=Weight * torch.ones(1)) network.add_connection(inpt_connection, source="Input_Layer", target="mid layer") network.add_connection(opt_connection, source="mid layer", target="Output Layer") else: for key_ic in list(layer_list.keys()): inpt_connection = Connection(source=Input_Layer, target=layer_list[key_ic], w=Weight * torch.ones(input_N)) network.add_connection(inpt_connection, source="Input_Layer", target=str(key_ic)) for key_op in list(layer_list.keys()): output_connection = Connection(source=layer_list[key_op], target=out, w=Weight * torch.ones(1), update_rule=MSTDP) network.add_connection(output_connection, source=str(key_op), target="Output Layer") for generating_protein in Decoded_RNA: mid_connection = Connection( source=layer_list[generating_protein[0]], target=layer_list[generating_protein[1]], w=Weight * torch.ones(1), update_rule=MSTDP) network.add_connection(mid_connection, source=str(generating_protein[0]), target=str(generating_protein[1])) network_list.append(network) network.save('Network/' + str(i) + '.pt') return network_list
# Connections between layers. inpt_middle = Connection(source=inpt, target=middle, wmax=1e-2) middle_out = Connection( source=middle, target=out, wmax=0.5, update_rule=MSTDPET, nu=2e-2, norm=0.15 * middle.n, ) # Add all layers and connections to the network. network.add_layer(inpt, name="X") network.add_layer(middle, name="Y") network.add_layer(out, name="Z") network.add_connection(inpt_middle, source="X", target="Y") network.add_connection(middle_out, source="Y", target="Z") # Load SpaceInvaders environment. environment = GymEnvironment( "SpaceInvaders-v0", BernoulliEncoder(time=int(network.dt), dt=network.dt), history_length=2, delta=4, ) environment.reset() # Plotting configuration. plot_config = { "data_step": 1, "data_length": 10,
# Spike recordings for all layers. spikes = {} for layer in layers: spikes[layer] = Monitor(layers[layer], ["s"], time=plot_interval) # Voltage recordings for excitatory and readout layers. voltages = {} for layer in set(layers.keys()) - {"X"}: voltages[layer] = Monitor(layers[layer], ["v"], time=plot_interval) # Add all layers and connections to the network. for layer in layers: network.add_layer(layers[layer], name=layer) network.add_connection(input_exc_conn, source="X", target="E") network.add_connection(exc_readout_conn, source="E", target="R") # Add all monitors to the network. for layer in layers: network.add_monitor(spikes[layer], name="%s_spikes" % layer) if layer in voltages: network.add_monitor(voltages[layer], name="%s_voltages" % layer) # Load the Breakout environment. environment = GymEnvironment("BreakoutDeterministic-v4") environment.reset() pipeline = EnvironmentPipeline( network,