def __init__(self, T: int, channels: int, use_cupy=False):
        super().__init__()
        self.T = T

        self.conv_fc = nn.Sequential(
            layer.Conv2d(1, channels, kernel_size=3, padding=1, bias=False),
            layer.BatchNorm2d(channels),
            neuron.IFNode(surrogate_function=surrogate.ATan()),
            layer.MaxPool2d(2, 2),  # 14 * 14
            layer.Conv2d(channels,
                         channels,
                         kernel_size=3,
                         padding=1,
                         bias=False),
            layer.BatchNorm2d(channels),
            neuron.IFNode(surrogate_function=surrogate.ATan()),
            layer.MaxPool2d(2, 2),  # 7 * 7
            layer.Flatten(),
            layer.Linear(channels * 7 * 7, channels * 4 * 4, bias=False),
            neuron.IFNode(surrogate_function=surrogate.ATan()),
            layer.Linear(channels * 4 * 4, 10, bias=False),
            neuron.IFNode(surrogate_function=surrogate.ATan()),
        )

        functional.set_step_mode(self, step_mode='m')

        if use_cupy:
            functional.set_backend(self, backend='cupy')
 def __init__(self):
     super().__init__()
     self.fc = nn.Sequential(
         layer.Linear(28, 32),
         neuron.IFNode(surrogate_function=surrogate.ATan()),
         layer.Linear(32, 10),
         neuron.IFNode(surrogate_function=surrogate.ATan())
     )
 def __init__(self):
     super().__init__()
     self.fc = nn.Sequential(
         layer.Linear(28, 32),
         neuron.IFNode(surrogate_function=surrogate.ATan()),
         layer.SynapseFilter(tau=2., learnable=True),
         layer.Linear(32, 10),
         neuron.IFNode(surrogate_function=surrogate.ATan())
     )
    def __init__(self):
        super().__init__()

        self.fc = nn.Sequential(
            layer.Linear(28, 32),
            layer.LinearRecurrentContainer(
                neuron.IFNode(surrogate_function=surrogate.ATan(), detach_reset=True),
                in_features=32, out_features=32, bias=True
            ),
            layer.Linear(32, 10),
            neuron.IFNode(surrogate_function=surrogate.ATan())
        )
예제 #5
0
    def __init__(self, num_inputs, num_outputs, hidden_size, T=16):
        super(ActorCritic, self).__init__()

        self.critic = nn.Sequential(nn.Linear(num_inputs, hidden_size),
                                    neuron.IFNode(), nn.Linear(hidden_size, 1),
                                    NonSpikingLIFNode(tau=2.0))

        self.actor = nn.Sequential(nn.Linear(num_inputs, hidden_size),
                                   neuron.IFNode(),
                                   nn.Linear(hidden_size, num_outputs),
                                   NonSpikingLIFNode(tau=2.0))

        self.T = T
예제 #6
0
 def __init__(self, hidden_num):
     super().__init__()
     self.fc = nn.Sequential(
         nn.Linear(4, hidden_num),
         neuron.IFNode(),
         nn.Linear(hidden_num, 2),
         NonSpikingLIFNode(tau=2.0)
     )
     self.T = 16
예제 #7
0
        def __init__(self,
                     num_inputs,
                     num_outputs,
                     hidden_size,
                     T=16,
                     std=0.0):
            super(ActorCritic, self).__init__()

            self.critic = nn.Sequential(nn.Linear(num_inputs, hidden_size),
                                        neuron.IFNode(),
                                        nn.Linear(hidden_size, 1),
                                        NonSpikingLIFNode(tau=2.0))

            self.actor = nn.Sequential(nn.Linear(num_inputs, hidden_size),
                                       neuron.IFNode(),
                                       nn.Linear(hidden_size, num_outputs),
                                       NonSpikingLIFNode(tau=2.0))

            self.log_std = nn.Parameter(torch.ones(1, num_outputs) * std)

            self.T = T
예제 #8
0
 def replace_by_ifnode(model):
     for name, module in model._modules.items():
         if hasattr(module, "_modules"):
             model._modules[name] = Converter.replace_by_ifnode(module)
             if module.__class__.__name__ == 'Sequential' and len(module) == 2 and \
                 module[0].__class__.__name__ == 'ReLU' and \
                 module[1].__class__.__name__ == 'VoltageHook':
                 s = module[1].scale.item()
                 model._modules[name] = nn.Sequential(
                     VoltageScaler(1.0 / s),
                     neuron.IFNode(v_threshold=1., v_reset=None),
                     VoltageScaler(s))
     return model
예제 #9
0
from matplotlib import pyplot as plt
import torch
from spikingjelly.activation_based import neuron
from spikingjelly import visualizing
import numpy as np
import matplotlib

with torch.no_grad():
    # Requires SciencePlots package: https://github.com/garrettj403/SciencePlots
    plt.style.use(['science'])

    if_node = neuron.IFNode(v_reset=None, monitor_state=True)
    T = 25
    x = torch.arange(0, 1.04, 0.04)
    for t in range(T):
        if_node(x)

    s_t_array = np.asarray(
        if_node.monitor['s']).T  # s_t_array[i][j]表示神经元i在j时刻释放的脉冲,为0或1
    v_t_array = np.asarray(
        if_node.monitor['v']).T  # v_t_array[i][j]表示神经元i在j时刻的电压值

    fig = plt.figure(dpi=125, tight_layout=True)
    gs = matplotlib.gridspec.GridSpec(2, 5)

    # plot_1d_spikes
    spikes_map = fig.add_subplot(gs[0, :4])
    firing_rate_map = fig.add_subplot(gs[0, 4])

    spikes_map.set_title('Spike Events')
    spikes_map.set_xlabel('t')