def BindsNET_cpu(n_neurons, time): t0 = t() torch.set_default_tensor_type("torch.FloatTensor") t1 = t() network = Network() network.add_layer(Input(n=n_neurons), name="X") network.add_layer(LIFNodes(n=n_neurons), name="Y") network.add_connection( Connection(source=network.layers["X"], target=network.layers["Y"]), source="X", target="Y", ) data = {"X": poisson(datum=torch.rand(n_neurons), time=time)} network.run(inputs=data, time=time) return t() - t0, t() - t1
seed = args.seed n_neurons = args.n_neurons dt = args.dt plot_interval = args.plot_interval render_interval = args.render_interval print_interval = args.print_interval gpu = args.gpu if gpu: torch.set_default_tensor_type("torch.cuda.FloatTensor") torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) # Build network. network = Network(dt=dt) # Layers of neurons. inpt = Input(shape=(80, 80), traces=True) # Input layer exc = LIFNodes(n=n_neurons, refrac=0, traces=True) # Excitatory layer readout = LIFNodes(n=16, refrac=0, traces=True) # Readout layer layers = {"X": inpt, "E": exc, "R": readout} # Connections between layers. # Input -> excitatory. w = 0.01 * torch.rand(layers["X"].n, layers["E"].n) input_exc_conn = Connection( source=layers["X"], target=layers["E"], w=0.01 * torch.rand(layers["X"].n, layers["E"].n), wmax=0.02,
) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=1, shuffle=True, num_workers=0 ) # Grab the shape of a single sample (not including batch) # So, TxCxHxW sample_shape = train_dataset[0]["encoded_image"].shape print(args.dataset, " has shape ", sample_shape) conv_size = int((sample_shape[-1] - kernel_size + 2 * padding) / stride) + 1 per_class = int((n_filters * conv_size * conv_size) / 10) # Build a small convolutional network network = Network() # Make sure to include the batch dimension but not time input_layer = Input(shape=(1, *sample_shape[1:]), traces=True) conv_layer = LIFNodes( n=n_filters * conv_size * conv_size, shape=(1, n_filters, conv_size, conv_size), traces=True, ) conv_conn = Conv2dConnection( input_layer, conv_layer, kernel_size=kernel_size, stride=stride,
plot = args.plot gpu = args.gpu if gpu: torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) if not train: update_interval = n_test conv_size = int((28 - kernel_size + 2 * padding) / stride) + 1 per_class = int((n_filters * conv_size * conv_size) / 10) # Build network. network = Network() input_layer = Input(n=784, shape=(1, 28, 28), traces=True) conv_layer = DiehlAndCookNodes( n=n_filters * conv_size * conv_size, shape=(n_filters, conv_size, conv_size), traces=True, ) conv_conn = Conv2dConnection( input_layer, conv_layer, kernel_size=kernel_size, stride=stride, update_rule=PostPre, norm=0.4 * kernel_size**2,
from bindsnet_qa.network import Network from bindsnet_qa.pipeline import EnvironmentPipeline from bindsnet_qa.encoding import bernoulli from bindsnet_qa.network.topology import Connection from bindsnet_qa.environment import GymEnvironment from bindsnet_qa.network.nodes import Input, LIFNodes from bindsnet_qa.pipeline.action import select_softmax # Build network. network = Network(dt=1.0) # Layers of neurons. inpt = Input(n=80 * 80, shape=[80, 80], traces=True) middle = LIFNodes(n=100, traces=True) out = LIFNodes(n=4, refrac=0, traces=True) # Connections between layers. inpt_middle = Connection(source=inpt, target=middle, wmin=0, wmax=1e-1) middle_out = Connection(source=middle, target=out, wmin=0, wmax=1) # Add all layers and connections to the network. network.add_layer(inpt, name="Input Layer") network.add_layer(middle, name="Hidden Layer") network.add_layer(out, name="Output Layer") network.add_connection(inpt_middle, source="Input Layer", target="Hidden Layer") network.add_connection(middle_out, source="Hidden Layer", target="Output Layer")
def ann_to_snn( ann: Union[nn.Module, str], input_shape: Sequence[int], data: Optional[torch.Tensor] = None, percentile: float = 99.9, node_type: Optional[nodes.Nodes] = SubtractiveResetIFNodes, **kwargs, ) -> Network: # language=rst """ Converts an artificial neural network (ANN) written as a ``torch.nn.Module`` into a near-equivalent spiking neural network. :param ann: Artificial neural network implemented in PyTorch. Accepts either ``torch.nn.Module`` or path to network saved using ``torch.save()``. :param input_shape: Shape of input data. :param data: Data to use to perform data-based weight normalization of shape ``[n_examples, ...]``. :param percentile: Percentile (in ``[0, 100]``) of activations to scale by in data-based normalization scheme. :param node_type: Class of ``Nodes`` to use in replacing ``torch.nn.Linear`` layers in original ANN. :return: Spiking neural network implemented in PyTorch. """ if isinstance(ann, str): ann = torch.load(ann) else: ann = deepcopy(ann) assert isinstance(ann, nn.Module) if data is None: import warnings warnings.warn("Data is None. Weights will not be scaled.", RuntimeWarning) else: ann = data_based_normalization( ann=ann, data=data.detach(), percentile=percentile ) snn = Network() input_layer = nodes.Input(shape=input_shape) snn.add_layer(input_layer, name="Input") children = [] for c in ann.children(): if isinstance(c, nn.Sequential): for c2 in list(c.children()): children.append(c2) else: children.append(c) i = 0 prev = input_layer while i < len(children) - 1: current, nxt = children[i : i + 2] layer, connection = _ann_to_snn_helper(prev, current, node_type, **kwargs) i += 1 if layer is None or connection is None: continue snn.add_layer(layer, name=str(i)) snn.add_connection(connection, source=str(i - 1), target=str(i)) prev = layer current = children[-1] layer, connection = _ann_to_snn_helper( prev, current, node_type, last=True, **kwargs ) i += 1 if layer is not None or connection is not None: snn.add_layer(layer, name=str(i)) snn.add_connection(connection, source=str(i - 1), target=str(i)) return snn
plot = args.plot gpu = args.gpu device_id = args.device_id np.random.seed(seed) torch.cuda.manual_seed_all(seed) torch.manual_seed(seed) # Sets up Gpu use if gpu and torch.cuda.is_available(): torch.cuda.set_device(device_id) # torch.set_default_tensor_type('torch.cuda.FloatTensor') else: torch.manual_seed(seed) network = Network(dt=dt) inpt = Input(784, shape=(1, 28, 28)) network.add_layer(inpt, name="I") output = LIFNodes(n_neurons, thresh=-52 + np.random.randn(n_neurons).astype(float)) network.add_layer(output, name="O") C1 = Connection(source=inpt, target=output, w=0.5 * torch.randn(inpt.n, output.n)) C2 = Connection(source=output, target=output, w=0.5 * torch.randn(output.n, output.n)) network.add_connection(C1, source="I", target="O") network.add_connection(C2, source="O", target="O")