Пример #1
0
    def __init__(self):
        super(MozafariMNIST2018, self).__init__()

        self.conv1 = snn.Convolution(6, 30, 5, 0.8, 0.05)
        self.conv1_t = 15
        self.k1 = 5
        self.r1 = 3

        self.conv2 = snn.Convolution(30, 250, 3, 0.8, 0.05)
        self.conv2_t = 10
        self.k2 = 8
        self.r2 = 2

        self.conv3 = snn.Convolution(250, 200, 5, 0.8, 0.05)

        self.stdp1 = snn.STDP(self.conv1, (0.004, -0.003))
        self.stdp2 = snn.STDP(self.conv2, (0.004, -0.003))
        self.stdp3 = snn.STDP(self.conv3, (0.004, -0.003), False, 0.2, 0.8)
        self.anti_stdp3 = snn.STDP(self.conv3, (-0.004, 0.0005), False, 0.2,
                                   0.8)
        self.max_ap = Parameter(torch.Tensor([0.15]))

        self.decision_map = []
        for i in range(10):
            self.decision_map.extend([i] * 20)

        self.ctx = {
            "input_spikes": None,
            "potentials": None,
            "output_spikes": None,
            "winners": None
        }
        self.spk_cnt1 = 0
        self.spk_cnt2 = 0
Пример #2
0
    def __init__(self):
        super(KheradpishehMNIST, self).__init__()

        self.conv1 = snn.Convolution(2, 32, 5, 0.8, 0.05)
        self.conv1_t = 10
        self.k1 = 5
        self.r1 = 2

        self.conv2 = snn.Convolution(32, 150, 2, 0.8, 0.05)
        self.conv2_t = 1
        self.k2 = 8
        self.r2 = 1

        self.stdp1 = snn.STDP(self.conv1, (0.004, -0.003))
        self.stdp2 = snn.STDP(self.conv2, (0.004, -0.003))
        self.max_ap = Parameter(torch.Tensor([0.15]))

        self.ctx = {
            "input_spikes": None,
            "potentials": None,
            "output_spikes": None,
            "winners": None
        }
        self.spk_cnt1 = 0
        self.spk_cnt2 = 0
Пример #3
0
    def __init__(self):
        super(CTNN, self).__init__()
        self.conv1 = snn.Convolution(2, 30, 5, 0.8, 0.02)  #(in_channels, out_channels, kernel_size, weight_mean=0.8, weight_std=0.02)
        self.conv2 = snn.Convolution(30, 100, 5, 0.8, 0.02)
        #self.conv3 = snn.Convolution(250, 200, 5, 0.8, 0.05)

        self.stdp1 = snn.STDP(self.conv1, (0.004, -0.003))
        self.stdp2 = snn.STDP(self.conv2, (0.004, -0.003))
        self.ctx = {"input_spikes": None, "potentials": None, "output_spikes":None, "winners":None}
Пример #4
0
    def __init__(self):
        super(CTNN, self).__init__()
        self.conv1 = snn.Convolution(2, 30, 7, 0.8, 0.02)  #(in_channels, out_channels, kernel_size, weight_mean=0.8, weight_std=0.02)
        self.conv2 = snn.Convolution(30, 100, 5, 0.8, 0.05)
        self.conv3 = snn.Convolution(2, 30, 7, 0.8, 0.02)
        #self.conv4 = snn.Convolution(100, 200, 3, 0.8, 0.05)

        self.stdp1 = snn.STDP(self.conv1, (0.004, -0.003))
        self.stdp2 = snn.STDP(self.conv2, (0.004, -0.003))
        self.stdp3 = snn.STDP(self.conv3, (0.004, -0.003))
        self.ctx = {"input_spikes": None, "potentials": None, "output_spikes":None, "winners":None}

        self.pool = torch.nn.AdaptiveMaxPool3d((30,18,18))
Пример #5
0
    def __init__(self,
                 input_channels,
                 features_per_class,
                 number_of_classes,
                 s2_kernel_size,
                 threshold,
                 stdp_lr,
                 anti_stdp_lr,
                 dropout=0.):
        super(Mozafari2018, self).__init__()
        self.features_per_class = features_per_class
        self.number_of_classes = number_of_classes
        self.number_of_features = features_per_class * number_of_classes
        self.kernel_size = s2_kernel_size
        self.threshold = threshold
        self.stdp_lr = stdp_lr
        self.anti_stdp_lr = anti_stdp_lr
        self.dropout = torch.ones(self.number_of_features) * dropout
        self.to_be_dropped = torch.bernoulli(self.dropout).nonzero()

        self.s2 = snn.Convolution(input_channels, self.number_of_features,
                                  self.kernel_size, 0.8, 0.05)
        self.stdp = snn.STDP(self.s2, stdp_lr)
        self.anti_stdp = snn.STDP(self.s2, anti_stdp_lr)
        self.decision_map = []
        for i in range(number_of_classes):
            self.decision_map.extend([i] * features_per_class)

        self.ctx = {
            "input_spikes": None,
            "potentials": None,
            "output_spikes": None,
            "winners": None
        }
Пример #6
0
split_point = int(0.75 * len(indices))
train_indices = indices[:split_point]
test_indices = indices[split_point:]
print("Size of the training set:", len(train_indices))
print("Size of the  testing set:", len(test_indices))
from torch.utils.data import DataLoader
from torch.utils.data import SubsetRandomSampler

dataset = utils.CacheDataset(dataset)
train_loader = DataLoader(dataset, sampler=SubsetRandomSampler(train_indices))
test_loader = DataLoader(dataset, sampler=SubsetRandomSampler(test_indices))

import SpykeTorch.snn as snn

pool = snn.Pooling(kernel_size=3, stride=2)
conv = snn.Convolution(in_channels=4, out_channels=20, kernel_size=30)
stdp = snn.STDP(conv_layer=conv, learning_rate=(0.05, -0.015))
for iter in range(300):
    print('\rIteration:', iter, end="")
    for data, _ in train_loader:
        for x in data:
            x = pool(x)
            p = conv(x)
            o, p = sf.fire(p, 20, return_thresholded_potentials=True)
            winners = sf.get_k_winners(p,
                                       kwta=1,
                                       inhibition_radius=0,
                                       spikes=o)
            stdp(x, p, o, winners)
print()
print("Unsupervised Training is Done.")