コード例 #1
0
    def __init__(self, tau, T, v_threshold=1.0, v_reset=0.0):
        super().__init__()
        self.T = T

        self.static_conv = nn.Sequential(
            nn.Conv2d(1, 128, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(128),
        )

        self.conv = nn.Sequential(
            neuron.IFNode(v_threshold=v_threshold, v_reset=v_reset, surrogate_function=surrogate.ATan()),
            nn.MaxPool2d(2, 2),  # 14 * 14

            nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(128),
            neuron.IFNode(v_threshold=v_threshold, v_reset=v_reset, surrogate_function=surrogate.ATan()),
            nn.MaxPool2d(2, 2)  # 7 * 7

        )
        self.fc = nn.Sequential(
            nn.Flatten(),
            layer.Dropout(0.7),
            nn.Linear(128 * 7 * 7, 128 * 3 * 3, bias=False),
            neuron.LIFNode(tau=tau, v_threshold=v_threshold, v_reset=v_reset, surrogate_function=surrogate.ATan()),
            layer.Dropout(0.7),
            nn.Linear(128 * 3 * 3, 128, bias=False),
            neuron.LIFNode(tau=tau, v_threshold=v_threshold, v_reset=v_reset, surrogate_function=surrogate.ATan()),
            nn.Linear(128, 10, bias=False),
            neuron.LIFNode(tau=tau, v_threshold=v_threshold, v_reset=v_reset, surrogate_function=surrogate.ATan()),
        )
コード例 #2
0
    def __init__(self,
                 T=8,
                 v_threshold=1.0,
                 v_reset=0.0,
                 tau=2.0,
                 surrogate_function=surrogate.ATan()):
        super().__init__()

        self.train_times = 0
        self.epochs = 0
        self.max_test_acccuracy = 0
        self.T = T

        self.static_fc = nn.Sequential(
            nn.Flatten(),
            nn.Linear(784, 800, bias=False),
        )
        self.fc = nn.Sequential(
            neuron.LIFNode(v_threshold=v_threshold,
                           v_reset=v_reset,
                           tau=tau,
                           surrogate_function=surrogate_function,
                           detach_reset=True), nn.Linear(800, 10, bias=False),
            neuron.LIFNode(v_threshold=v_threshold,
                           v_reset=v_reset,
                           tau=tau,
                           surrogate_function=surrogate_function,
                           detach_reset=True))
コード例 #3
0
def cmp_speed():
    def forward_backward(lif, x, T):
        spikes = 0
        for t in range(T):
            spikes += lif(x)
        spikes.sum().backward()
        x.grad.zero_()
        lif.reset()

    lif_c = wrapper.neuron.LIFNode(tau=100.0)
    lif_p = sj_neuron.LIFNode(tau=100.0,
                              surrogate_function=sj_surrogate.ATan(alpha=2))
    print(lif_c, lif_p)
    device = 'cuda:0'
    lif_c.to(device)
    lif_p.to(device)

    x = torch.rand([64, 1024], device=device) * 2
    print(x)
    x.requires_grad_(True)

    t_p = wrapper.cal_fun_t(1024, device, forward_backward, lif_p, x, 16)
    x.grad.zero_()
    t_c = wrapper.cal_fun_t(1024, device, forward_backward, lif_c, x, 16)
    x.grad.zero_()

    print(t_c, t_p, 'CUDA speed up =', t_p / t_c)
コード例 #4
0
ファイル: models.py プロジェクト: Thvnvtos/DVSGesture-CFSL
 def __init__(self, channels: int = 128):
     super().__init__()
     conv = []
     conv.extend(SJSNN.conv3x3(2, channels))
     conv.append(nn.MaxPool2d(2, 2))
     for i in range(4):
         conv.extend(SJSNN.conv3x3(channels, channels))
         conv.append(nn.MaxPool2d(2, 2))
         self.conv = nn.Sequential(*conv)
         self.fc = nn.Sequential(
             nn.Flatten(), layer.Dropout(0.5),
             nn.Linear(channels * 4 * 4, channels * 2 * 2, bias=False),
             neuron.LIFNode(tau=2.0,
                            surrogate_function=surrogate.ATan(),
                            detach_reset=True), layer.Dropout(0.5),
             nn.Linear(channels * 2 * 2, 110, bias=False),
             neuron.LIFNode(tau=2.0,
                            surrogate_function=surrogate.ATan(),
                            detach_reset=True))
         self.vote = VotingLayer(10)
コード例 #5
0
ファイル: models.py プロジェクト: Thvnvtos/DVSGesture-CFSL
 def conv3x3(in_channels: int, out_channels):
     return [
         nn.Conv2d(in_channels,
                   out_channels,
                   kernel_size=3,
                   padding=1,
                   bias=False),
         nn.BatchNorm2d(out_channels),
         neuron.LIFNode(tau=2.0,
                        surrogate_function=surrogate.ATan(),
                        detach_reset=True)
     ]
コード例 #6
0
def cmp_voltage():
    lif_c = wrapper.neuron.LIFNode(tau=100.0)
    lif_p = sj_neuron.LIFNode(tau=100.0,
                              surrogate_function=sj_surrogate.ATan(alpha=2))
    print(lif_c, lif_p)
    device = 'cuda:0'
    lif_c.to(device)
    lif_p.to(device)
    lif_c = lif_c.half()
    lif_p = lif_p.half()
    T = 100
    neuron_num = 1024
    x = torch.rand([T, neuron_num], device=device) * 5
    x = x.half()
    with torch.no_grad():
        for t in range(T):
            lif_c(x[t])
            lif_p(x[t])
            print((lif_c.v - lif_p.v).abs_().max().item())
        lif_c.reset()
        lif_p.reset()

    s_c = 0
    s_p = 0
    x_c = x.clone()
    x_c.requires_grad_(True)
    x_p = x.clone()
    x_p.requires_grad_(True)

    for t in range(T):
        s_c += lif_c(x_c[t])
        s_p += lif_p(x_p[t])
    print(s_c)
    print(s_p)
    lif_ctt = wrapper.neuron.MultiStepLIFNode(tau=100.0)
    lif_ctt.to(device)
    lif_ctt = lif_ctt.half()
    x_ctt = x.clone()
    x_ctt.requires_grad_(True)
    s_ctt = lif_ctt(x_ctt)
    with torch.no_grad():
        print(s_ctt.sum(0))
        print((lif_c.v - lif_ctt.v).abs_().max().item())
    s_ctt.sum().backward()
    print('CTT grad', x_ctt.grad)

    s_p.sum().backward()
    print('Python grad', x_p.grad)
    s_c.sum().backward()
    print('CUDA grad', x_c.grad)
コード例 #7
0
ファイル: models.py プロジェクト: Thvnvtos/SNN_vs_ANN
	def __init__(self, in_c = 1,nf = [16,32], ks=[5,5], T = 10, v_threshold = 1.0, v_reset = 0.0, dropout=0.5, use_softmax = False, tau=2.0, lif=False):
		super().__init__()
		self.T = T
		self.use_softmax = use_softmax

		self.static_conv = nn.Sequential(
			nn.Conv2d(in_c, nf[0], kernel_size=ks[0], bias=False),
			nn.BatchNorm2d(nf[0])
			)

		self.dim1 = (28 - ks[0] + 1) // 2
		self.conv = nn.Sequential(
			neuron.IFNode(v_threshold=v_threshold, v_reset=v_reset, surrogate_function=surrogate.ATan(), detach_reset=True),
			nn.MaxPool2d(2, 2),

			nn.Conv2d(nf[0], nf[1], kernel_size=ks[1], bias=False),
			nn.BatchNorm2d(nf[1]),
			neuron.IFNode(v_threshold=v_threshold, v_reset=v_reset, surrogate_function=surrogate.ATan(), detach_reset=True),
			nn.MaxPool2d(2, 2)
		)
		if lif:
			self.conv[0] = neuron.LIFNode(tau=tau, v_threshold=v_threshold, v_reset=v_reset, surrogate_function=surrogate.ATan())
			self.conv[4] = neuron.LIFNode(tau=tau, v_threshold=v_threshold, v_reset=v_reset, surrogate_function=surrogate.ATan())

		self.dim2 = (self.dim1 - ks[1] + 1) // 2
		self.fc = nn.Sequential(
			nn.Flatten(),
			layer.Dropout(dropout),
			nn.Linear(nf[1]*self.dim2**2, 10, bias=False),
			neuron.IFNode(v_threshold=v_threshold, v_reset=v_reset, surrogate_function=surrogate.ATan(), detach_reset=True)
		)
			
		if lif:
			self.fc[-1] = neuron.LIFNode(tau=tau, v_threshold=v_threshold, v_reset=v_reset, surrogate_function=surrogate.ATan())
			
		if self.use_softmax:
			self.softmax = nn.Softmax(dim=1)
コード例 #8
0
    def __init__(self,
                 T=8,
                 v_threshold=1.0,
                 v_reset=0.0,
                 tau=2.0,
                 surrogate_function=surrogate.ATan()):
        super().__init__()

        self.train_times = 0
        self.epochs = 0
        self.max_test_acccuracy = 0
        self.T = T

        self.static_conv = nn.Sequential(
            nn.Conv2d(3, 256, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(256),
        )

        self.conv = nn.Sequential(
            neuron.LIFNode(v_threshold=v_threshold,
                           v_reset=v_reset,
                           tau=tau,
                           surrogate_function=surrogate_function,
                           detach_reset=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(256),
            neuron.LIFNode(v_threshold=v_threshold,
                           v_reset=v_reset,
                           tau=tau,
                           surrogate_function=surrogate_function,
                           detach_reset=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(256),
            neuron.LIFNode(v_threshold=v_threshold,
                           v_reset=v_reset,
                           tau=tau,
                           surrogate_function=surrogate_function,
                           detach_reset=True),
            nn.MaxPool2d(2, 2),  # 16 * 16
            nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(256),
            neuron.LIFNode(v_threshold=v_threshold,
                           v_reset=v_reset,
                           tau=tau,
                           surrogate_function=surrogate_function,
                           detach_reset=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(256),
            neuron.LIFNode(v_threshold=v_threshold,
                           v_reset=v_reset,
                           tau=tau,
                           surrogate_function=surrogate_function,
                           detach_reset=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(256),
            neuron.LIFNode(v_threshold=v_threshold,
                           v_reset=v_reset,
                           tau=tau,
                           surrogate_function=surrogate_function,
                           detach_reset=True),
            nn.MaxPool2d(2, 2)  # 8 * 8
        )
        self.fc = nn.Sequential(
            nn.Flatten(), layer.Dropout(0.5),
            nn.Linear(256 * 8 * 8, 128 * 4 * 4, bias=False),
            neuron.LIFNode(v_threshold=v_threshold,
                           v_reset=v_reset,
                           tau=tau,
                           surrogate_function=surrogate_function,
                           detach_reset=True),
            nn.Linear(128 * 4 * 4, 100, bias=False),
            neuron.LIFNode(v_threshold=v_threshold,
                           v_reset=v_reset,
                           tau=tau,
                           surrogate_function=surrogate_function,
                           detach_reset=True))
        self.boost = nn.AvgPool1d(10, 10)
コード例 #9
0
def main():
    '''
    * :ref:`API in English <lif_fc_mnist.main-en>`

    .. _lif_fc_mnist.main-cn:

    :return: None

    使用全连接-LIF-全连接-LIF的网络结构,进行MNIST识别。这个函数会初始化网络进行训练,并显示训练过程中在测试集的正确率。

    * :ref:`中文API <lif_fc_mnist.main-cn>`

    .. _lif_fc_mnist.main-en:

    The network with FC-LIF-FC-LIF structure for classifying MNIST. This function initials the network, starts training
    and shows accuracy on test dataset.
    '''
    device = input(
        '输入运行的设备,例如“cpu”或“cuda:0”\n input device, e.g., "cpu" or "cuda:0": ')
    dataset_dir = input(
        '输入保存MNIST数据集的位置,例如“./”\n input root directory for saving MNIST dataset, e.g., "./": '
    )
    batch_size = int(
        input('输入batch_size,例如“64”\n input batch_size, e.g., "64": '))
    learning_rate = float(
        input('输入学习率,例如“1e-3”\n input learning rate, e.g., "1e-3": '))
    T = int(input('输入仿真时长,例如“100”\n input simulating steps, e.g., "100": '))
    tau = float(
        input(
            '输入LIF神经元的时间常数tau,例如“100.0”\n input membrane time constant, tau, for LIF neurons, e.g., "100.0": '
        ))
    train_epoch = int(
        input(
            '输入训练轮数,即遍历训练集的次数,例如“100”\n input training epochs, e.g., "100": '))
    log_dir = input(
        '输入保存tensorboard日志文件的位置,例如“./”\n input root directory for saving tensorboard logs, e.g., "./": '
    )

    writer = SummaryWriter(log_dir)

    # 初始化数据加载器
    train_dataset = torchvision.datasets.MNIST(
        root=dataset_dir,
        train=True,
        transform=torchvision.transforms.ToTensor(),
        download=True)
    test_dataset = torchvision.datasets.MNIST(
        root=dataset_dir,
        train=False,
        transform=torchvision.transforms.ToTensor(),
        download=True)

    train_data_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                                    batch_size=batch_size,
                                                    shuffle=True,
                                                    drop_last=True)
    test_data_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                                   batch_size=batch_size,
                                                   shuffle=False,
                                                   drop_last=False)

    # 定义并初始化网络
    net = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10, bias=False),
                        neuron.LIFNode(tau=tau))
    net = net.to(device)
    # 使用Adam优化器
    optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
    # 使用泊松编码器
    encoder = encoding.PoissonEncoder()
    train_times = 0
    max_test_accuracy = 0

    test_accs = []
    train_accs = []

    for epoch in range(train_epoch):
        net.train()
        for img, label in tqdm(train_data_loader):
            img = img.to(device)
            label = label.to(device)
            label_one_hot = F.one_hot(label, 10).float()

            optimizer.zero_grad()

            # 运行T个时长,out_spikes_counter是shape=[batch_size, 10]的tensor
            # 记录整个仿真时长内,输出层的10个神经元的脉冲发放次数
            for t in range(T):
                if t == 0:
                    out_spikes_counter = net(encoder(img).float())
                else:
                    out_spikes_counter += net(encoder(img).float())

            # out_spikes_counter / T 得到输出层10个神经元在仿真时长内的脉冲发放频率
            out_spikes_counter_frequency = out_spikes_counter / T

            # 损失函数为输出层神经元的脉冲发放频率,与真实类别的MSE
            # 这样的损失函数会使,当类别i输入时,输出层中第i个神经元的脉冲发放频率趋近1,而其他神经元的脉冲发放频率趋近0
            loss = F.mse_loss(out_spikes_counter_frequency, label_one_hot)
            loss.backward()
            optimizer.step()
            # 优化一次参数后,需要重置网络的状态,因为SNN的神经元是有“记忆”的
            functional.reset_net(net)

            # 正确率的计算方法如下。认为输出层中脉冲发放频率最大的神经元的下标i是分类结果
            accuracy = (out_spikes_counter_frequency.max(1)[1] == label.to(
                device)).float().mean().item()

            writer.add_scalar('train_accuracy', accuracy, train_times)
            train_accs.append(accuracy)

            train_times += 1
        net.eval()
        with torch.no_grad():
            # 每遍历一次全部数据集,就在测试集上测试一次
            test_sum = 0
            correct_sum = 0
            for img, label in test_data_loader:
                img = img.to(device)
                for t in range(T):
                    if t == 0:
                        out_spikes_counter = net(encoder(img).float())
                    else:
                        out_spikes_counter += net(encoder(img).float())

                correct_sum += (out_spikes_counter.max(1)[1] == label.to(
                    device)).float().sum().item()
                test_sum += label.numel()
                functional.reset_net(net)
            test_accuracy = correct_sum / test_sum
            writer.add_scalar('test_accuracy', test_accuracy, epoch)
            test_accs.append(test_accuracy)
            max_test_accuracy = max(max_test_accuracy, test_accuracy)
        print(
            f'Epoch {epoch}: device={device}, dataset_dir={dataset_dir}, batch_size={batch_size}, learning_rate={learning_rate}, T={T}, log_dir={log_dir}, max_test_accuracy={max_test_accuracy}, train_times={train_times}'
        )

    # 保存绘图用数据
    net.eval()
    functional.set_monitor(net, True)
    with torch.no_grad():
        img, label = test_dataset[0]
        img = img.to(device)
        for t in range(T):
            if t == 0:
                out_spikes_counter = net(encoder(img).float())
            else:
                out_spikes_counter += net(encoder(img).float())
        out_spikes_counter_frequency = (out_spikes_counter / T).cpu().numpy()
        print(f'Firing rate: {out_spikes_counter_frequency}')
        output_layer = net[-1]  # 输出层
        v_t_array = np.asarray(output_layer.monitor['v']).squeeze(
        ).T  # v_t_array[i][j]表示神经元i在j时刻的电压值
        np.save("v_t_array.npy", v_t_array)
        s_t_array = np.asarray(output_layer.monitor['s']).squeeze(
        ).T  # s_t_array[i][j]表示神经元i在j时刻释放的脉冲,为0或1
        np.save("s_t_array.npy", s_t_array)

    train_accs = np.array(train_accs)
    np.save('train_accs.npy', train_accs)
    test_accs = np.array(test_accs)
    np.save('test_accs.npy', test_accs)
コード例 #10
0
def forward_backward(multi_step_neuron, x):
    multi_step_neuron(x).sum().backward()
    multi_step_neuron.reset()
    x.grad.zero_()


def cal_forward_backward_t(multi_step_neuron, x, repeat_times):
    x.requires_grad_(True)
    used_t = cext.cal_fun_t(repeat_times, x.device, forward_backward,
                            multi_step_neuron, x)
    return used_t


device = 'cuda:0'
lif = layer.MultiStepContainer(
    neuron.LIFNode(surrogate_function=surrogate.ATan(alpha=2.0)))
lif_cuda = layer.MultiStepContainer(
    cext_neuron.LIFNode(surrogate_function='ATan', alpha=2.0))
lif_cuda_tt = cext_neuron.MultiStepLIFNode(surrogate_function='ATan',
                                           alpha=2.0)
lif.to(device)
lif_cuda.to(device)
lif_cuda_tt.to(device)
N = 128 * 16 * 16
T = 64
x = torch.rand(T, N, device=device)
print(cal_forward_t(lif, x, 1024))
print(cal_forward_t(lif_cuda, x, 1024))
print(cal_forward_t(lif_cuda_tt, x, 1024))

print(cal_forward_backward_t(lif, x, 1024))