def calculate_metrics(data, target, receptive_field_length, threshold, parameters=None, num_data=2000, isForced=False, isSorted=False, isRSTDP=False): # Structure of the TNN num_outputs = 10 #threshold indicates the highest filter spiketime that can be condsidered layer1 = firstlayer.FirstLayer( layer_id=1, training_raw_data=data[0], threshold=8, receptive_field_length=receptive_field_length) receptive_field = (int(14 - receptive_field_length / 2), int(14 - receptive_field_length / 2)) # threshold indicates the max neuron sum before firing layer2 = layer.Layer(layer_id=2, num_neurons=num_outputs, prev_layer=layer1, threshold=threshold) #layer3 = layer.Layer(layer_id=3, num_neurons=num_outputs, prev_layer=layer2, threshold=threshold) #layer4 = layer.Layer(layer_id=4, num_neurons=num_outputs, prev_layer=layer3, threshold=threshold) #layer5 = layer.Layer(layer_id=5, num_neurons=num_outputs, prev_layer=layer4, threshold=threshold) hidden_layers = [] hidden_layers.append(layer2) #hidden_layers.append(layer3) #hidden_layers.append(layer4) #hidden_layers.append(layer5) # selects 10000 random images for training and testing permutation = np.random.permutation(len(data)) training = permutation[int(num_data / 2):num_data] test = permutation[:int(num_data / 2)] if isSorted: training = np.sort(training) # Generates spikes for layer 1 using 2 different filters # this is the testing phase #pdb.set_trace() training_results, assignments = evaluate(layer1, hidden_layers, data[training], target[training], receptive_field, parameters, True, None, isForced, isRSTDP) print(assignments) test_results = evaluate(layer1, hidden_layers, data[test], target[test], receptive_field, parameters, False, assignments) return [training_results, test_results]
def apply_filter(image, filter_type, cell_dict, image_width, image_height, max_pixel_value): """Apply filter on a normalized image Args: image(np.arr): image to apply filter to cell_dict (dict): contains keys for each surounding pixel image_width(int): number of pixels wide image_height(int): number of pixels high max_pixel_value(int): the max value a spike position can have Returns: (pixel_vals(np.arr: int)): spike positions for image """ # create pixel values pixel_vals = my_filter.run(cell_dict, image, image_width, image_height) if filter_type == 'sobel': sobel_data = firstlayer.FirstLayer(1, pixel_vals, 0) pixel_vals = sobel_data.scale_data(pixel_vals, 0, max_pixel_value + 1) return (pixel_vals)
import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_mldata import firstlayer as firstlayer mnist = fetch_mldata('MNIST original') N, _ = mnist.data.shape # Reshape the data to be square mnist.square_data = mnist.data.reshape(N, 28, 28) layer1 = firstlayer.FirstLayer(1, mnist.square_data, mnist.target) # 3. On Center Off Center Filtering def off_center_filter(data): ret = np.zeros(data.shape) for n in range(len(data)): #for n in [40000]: pic = data[n] for x in range(1, 27): for y in range(1, 27): # Calculate average of surrounding s = pic[x-1][y-1] + pic[x-1][y] + pic[x-1][y+1] + \ pic[x][y-1] + pic[x][y+1] + \ pic[x+1][y-1] + pic[x+1][y] + pic[x+1][y+1] avg = s // 8 s_max = max([pic[x-1][y-1], pic[x-1][y], pic[x-1][y+1],\ pic[x][y-1], pic[x][y], pic[x][y+1], \ pic[x+1][y-1], pic[x+1][y],pic[x+1][y+1]]) off_center = s_max - avg - data[n][x][y]
def calculate_metrics(data, target, receptive_field): # Structure of the TNN #threshold indicates the highest filter spiketime that can be condsidered layer1 = firstlayer.FirstLayer(layer_id=1, training_raw_data=data[0], threshold=8) # threshold indicates the max neuron sum before firing layer2 = layer.Layer(layer_id=2, num_neurons=16, prev_layer=layer1, threshold=15) # number of time steps for each image num_iterations = 9 results = np.zeros(shape=(16, 10)) # selects 10000 random images for training and testing permutation = np.random.permutation(len(data)) training = permutation[:10000] test = permutation[10000:20000] start_time = time.time() # Generates spikes for layer 1 using 2 different filters # this is the testing phase for itr in range(len(training)): i = permutation[itr] layer1.raw_data = data[i] layer1.generate_spikes(OnCenterFilter, OffCenterFilter, receptive_field) #for each image go through all time steps for j in range(num_iterations): #feedforward inhibitionn with max 3 spikes layer1.feedforward_inhibition(3) layer2.generate_spikes() # only select one of the 8 spikes layer2.wta(1, 8) layer2.stdp_update_rule() layer1.increment_time() layer2.increment_time() layer1.reset() layer2.reset() print("\rComplete: ", itr + 1, end="") end_time = time.time() print("Training time: ", end_time - start_time, "s") start_time = time.time() # testing phase for itr in range(len(test)): i = permutation[itr] layer1.raw_data = data[i] layer1.generate_spikes(OnCenterFilter, OffCenterFilter, receptive_field) #for each image, go through all the time steps for j in range(num_iterations): # max 3 spikes in first layer layer1.feedforward_inhibition(3) layer2.generate_spikes() layer2.wta(1, 8) # result array is num_patterns x num_labels, where value is number of occurrences for k in range(layer2.spikes.shape[0]): if (layer2.spikes[k] == 0): image_number = i spike_position = k spike_time = j results[k, int(target[i])] += 1 layer1.increment_time() layer2.increment_time() layer1.reset() layer2.reset() print("\rComplete: ", itr + 1, end="") end_time = time.time() print("Test time: ", end_time - start_time, "s") return results
mnist = fetch_mldata('MNIST original') N, _ = mnist.data.shape # Shuffle data np.random.seed(2) shuffledindex = np.random.permutation([i for i in (range(70000))]) mnistdata = mnist.data[shuffledindex] mnisttarget = mnist.target[shuffledindex] # Reshape the data to be square mnistdata = mnistdata.reshape(N, 28, 28) layer1 = firstlayer.FirstLayer(1) bc = [] for r in range(0, 4): bc.append(BandCorelator(rf_size, layer1, rf_size * rf_size * 2)) num_training_imgs = 60000 num_testing_imgs = 10000 batch_size = 1 num_batch = num_training_imgs // batch_size test_data = mnistdata[-num_testing_imgs:] test_label = mnisttarget[-num_testing_imgs:] #start = time.time() offset = 0 train_data = mnistdata[offset:num_training_imgs + offset] final_bc = BandCorelator(rf_size, layer1, 16)
import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_mldata import firstlayer as firstlayer import layer as layer from filters import OffCenterFilter, OnCenterFilter import csv mnist = fetch_mldata('MNIST original') N, _ = mnist.data.shape # Reshape the data to be square mnist.square_data = mnist.data.reshape(N,28,28) layer1 = firstlayer.FirstLayer(layer_id=1, training_raw_data=mnist.square_data[0], threshold=8, inhibit_k=3) layer2 = layer.Layer(layer_id=2, num_neurons=10, prev_layer=layer1, threshold=2.5) num_iterations = 9 with open('spiketimes.csv', 'w', newline='') as csvfile: writer = csv.writer(csvfile) accuracy = 0 for i in range(len(mnist.square_data)): # set the data to the new image layer1.raw_data = mnist.square_data[i] # Generates spikes for layer 1 using 2 different filters layer1.generate_spikes(OnCenterFilter, OffCenterFilter)