コード例 #1
0
    def setUp(self):
        self.netParams = snn.params('../data/mnist/network.yaml')

        self.dataset = SMNIST(datasetPath=self.netParams['training']['path']['in'],
                              samplingTime=self.netParams['simulation']['Ts'],
                              sampleLength=self.netParams['simulation']['tSample'])
        self.m = UpSampling2D(scale_factor=2, mode='nearest')
コード例 #2
0
ファイル: train_snn_mlp.py プロジェクト: m990130/AML_SNN
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.utils.data import Dataset, DataLoader
import lib.snn as snn
import lib.spikeFileIO as io
import zipfile
import os

# CONSTANTS

USE_CUDA = torch.cuda.is_available()
#

netParams = snn.params('data/mnistMLP/network.yaml')
print(netParams)


# Dataset definition
class nmnistDataset(Dataset):
    def __init__(self, datasetPath, sampleFile, samplingTime, sampleLength):
        self.path = datasetPath
        self.samples = np.loadtxt(sampleFile).astype('int')
        self.samplingTime = samplingTime
        self.nTimeBins = int(sampleLength / samplingTime)

    def __getitem__(self, index):
        inputIndex = self.samples[index, 0]
        classLabel = self.samples[index, 1]
コード例 #3
0
ファイル: plot_pca.py プロジェクト: m990130/AML_SNN
from tqdm import tqdm

import lib.snn as snn
from lib.datasets import SMNIST
from models.Classifiers import SlayerVgg16, SlayerSNN, SlayerSNN2
from lib.process.Training import Training
from itertools import combinations

# CONSTANTS:
USE_CUDA = torch.cuda.is_available()
EPOCHs = 6
SMALL = False
DATASETMODE = 'classification'
TRAIN = True

netParams = snn.params('network_specs/slayer_snn.yaml')
print(netParams)

if TRAIN:
    device = torch.device("cuda" if USE_CUDA else "cpu")

    # Create network instance.
    model = SlayerSNN2(netParams, input_channels=1).to(device)

    # Learning stats instance.
    stats = snn.learningStats()

    trainingSet = SMNIST(datasetPath=netParams['training']['path']['in'],
                         samplingTime=netParams['simulation']['Ts'],
                         sampleLength=netParams['simulation']['tSample'],
                         mode=DATASETMODE,
コード例 #4
0
ファイル: Visualization.py プロジェクト: m990130/AML_SNN
    def visualize(self, layer, kernel, size=56, lr=0.1, upscaling_steps=12, upscaling_factor=1.2,
                  optimization_steps=20):
        # generate random image
        img = np.random.uniform(0, 255, (size, size, 3)) / 255
        # register hook
        activations = SaveFeatures(list(self.model.children())[layer])
        # create tranformers;

        # means = [0.485, 0.456, 0.406]
        # stds = [0.229, 0.224, 0.225]
        # normalize = transforms.Normalize(mean=means,
        #                                  std=stds)
        tranformation = transforms.Compose([
            # transforms.ToPILImage(),
            # transforms.Resize(size),
            transforms.ToTensor(),
            # normalize,
        ])
        netParams = snn.params('network_specs/slayer_cifar.yaml')
        encoder_psp = encoderpsp(netParams)
        encoder_spike = encoderSpikes(netParams)
        encoder_psp.to(self.device)
        encoder_spike.to(self.device)

        for i in range(upscaling_steps):

            img_tensor = tranformation(img)
            img_tensor = img_tensor.unsqueeze(0).float()
            img_spike = uniform_spike(img_tensor, netParams['simulation']['tSample'])
            img_spike = img_spike.to(self.device)
            img_voltage = encoder_psp(img_spike)
            img_voltage = img_voltage.to(self.device)
            img_opt = Variable(img_voltage, requires_grad=True)

            optimizer = torch.optim.Adam([img_opt], lr=lr, weight_decay=1e-6)

            for ii in range(optimization_steps):
                optimizer.zero_grad()
                spikes = encoder_spike(img_opt)
                self.model(spikes)
                loss = -activations.features[0, kernel].mean()
                # loss = -img_opt.mean()
                loss.backward()
                optimizer.step()
                print('Current loss: opt_step ', ii, 'upscaling step', i, loss.item())

            # denormalization
            img = img_opt.data.cpu().numpy()[0]
            img = img.sum(axis=-1)
            img = img - img.min()
            img = img / img.max()
            # for c, (m, s) in enumerate(zip(means, stds)):
            #     img[c] = s * img[c] + m

            img = img.transpose(1, 2, 0)

            output = img.copy()
            plt.imshow(output), plt.show()

            size = int(size * upscaling_factor)
            img = resize(img, (size, size, 3), order=3)

            # img = cv2.resize(img, (size, size), interpolation = cv2.INTER_CUBIC)  # scale image up
            img = cv2.blur(img, (3, 3))  # blur image to reduce high frequency patterns
            # img = uniform_filter(img, 3)
            # img = gaussian_filter(img, 0.08)
            # blur = 5
            # img = cv2.blur(img, (blur, blur))
            # img[:, :, 0] = gaussian_filter(img[:, :, 0], 0.5)
            # img[:, :, 1] = gaussian_filter(img[:, :, 1], 0.5)
            # img[:, :, 2] = gaussian_filter(img[:, :, 2], 0.5)
            # skimage.filters.gaussian_filter(im, 2, multichannel=True, mode='reflect', 'truncate=2')

        cropped_output = np.clip(output, 0, 1)
        fig = plt.figure()
        plt.imshow(cropped_output)
        plt.show()
        plt.imsave("layer_" + str(layer) + "_filter_" + str(kernel) + ".jpg", cropped_output)
        plt.close()
        activations.close()
コード例 #5
0
from lib.datasets.mnistdataset import SMNIST
from lib.utils import spikeTensorToProb, save_model, load_model

from lib.process.Training import Training
from lib.process.Evaluation import Evaluation
from models.Autoencoder import VAE

# CONSTANTS:
USE_CUDA = torch.cuda.is_available()
EPOCHs = 500
SMALL = True
DATASETMODE = 'autoencoderSpike'
MODEL_PTH = 'vae'
LR = 0.0001

netParams = snn.params('network_specs/vae.yaml')
print(netParams)

device = torch.device("cuda" if USE_CUDA else "cpu")

# Create network instance.
model = VAE(netParams, hidden_size=100, latent_size=2).to(device)

# Load model
load_model(MODEL_PTH, model)
# Define optimizer module.
optimizer = torch.optim.Adam(model.parameters(), lr=LR, amsgrad=True)

# Learning stats instance.
stats = snn.learningStats()
コード例 #6
0
from lib.datasets.mnistdataset import SMNIST
from lib.utils import spikeTensorToProb, save_model, load_model

from lib.process.Training import Training
from lib.process.Evaluation import Evaluation
from models.Autoencoder import UpsamplingAutoencoder, SimpleAutoencoder

# CONSTANTS:
USE_CUDA = torch.cuda.is_available()
EPOCHs = 200
SMALL = False
DATASETMODE = 'autoencoderSpike'
MODEL_PTH = 'simple_autoencoder'
LR = 0.001

netParams = snn.params('network_specs/simple_autoencoder.yaml')
print(netParams)

device = torch.device("cuda" if USE_CUDA else "cpu")

# Create network instance.
model = SimpleAutoencoder(netParams, hidden_size=100,
                          latent_size=50).to(device)

# Load model
load_model(MODEL_PTH, model)
# Define optimizer module.
optimizer = torch.optim.Adam(model.parameters(), lr=LR, amsgrad=True)

# Learning stats instance.
stats = snn.learningStats()