def testSegment(self): # TODO return True sensor_params = {"width": 32, "height": 32, "background": 1, "mode": "bw"} net_params = [ { "nodeCloning": True, "size": [2, 2], "overlap": [0, 0], # Spatial pooler "maxCoincidenceCount": 128, "spatialPoolerAlgorithm": "gaussian", "sigma": 1, "maxDistance": 0.1, # Temporal pooler "requestedGroupsCount": 20, "temporalPoolerAlgorithm": "maxProp", "transitionMemory": 4, } ] learning_params = [ # Level 0 { "sp": # Spatial pooler { "explorer": ["RandomSweep", {"sweepOffObject": False, "sweepDirections": "all"}], "numIterations": 100, }, "tp": # Temporal pooler { "explorer": ["RandomSweep", {"sweepOffObject": False, "sweepDirections": "all"}], "numIterations": 1000, }, } ] train_data = "data/pictures-subset/train" sensor = ImageSensor( width=sensor_params["width"], height=sensor_params["height"], background=sensor_params["background"], mode=sensor_params["mode"], ) net = HTM(sensor, verbose=False) net.create_network(net_params) sensor.loadMultipleImages(train_data) net.learn(learning_params) sensor.clearImageList() sensor.loadSingleImage("data/test_clean.png") data = sensor.compute() im_clean, cat = data["data"], data["category"] patch_size = net.patch_size im_clean_fw = net.infer(utils.extract_patches(im_clean, patch_size), merge_output=True) weights = np.asarray(net.segment(im_clean_fw))
def testMultipleLevelInference(self): return from datasets import DatasetConfig dataset = DatasetConfig().load("pictures-subset") train_data = dataset["test_data_path"] # dataset['train_data_path'] test_data = dataset["test_data_path"] sensor_params = { "width": dataset["image_width"], "height": dataset["image_height"], "background": dataset["image_background"], "mode": dataset["image_mode"], } net_params = [ # Level 0 { "nodeCloning": True, "size": [4, 4], "overlap": [0, 0], # Spatial pooler "maxCoincidenceCount": 64, "spatialPoolerAlgorithm": "gaussian", "sigma": 1, "maxDistance": 0.1, # Temporal pooler "requestedGroupsCount": 20, "temporalPoolerAlgorithm": "sumProp", "transitionMemory": 4, }, # Level 1 { "nodeCloning": True, "size": [2, 2], "overlap": [0, 0], # Spatial pooler "maxCoincidenceCount": 128, "spatialPoolerAlgorithm": "product", "sigma": 1, "maxDistance": 0.1, # Temporal pooler "requestedGroupsCount": 10, "temporalPoolerAlgorithm": "sumProp", "transitionMemory": 8, }, # Level 2 { "nodeCloning": True, "size": [1], "overlap": [0, 0], # Spatial pooler "maxCoincidenceCount": 128, "spatialPoolerAlgorithm": "product", "sigma": 1, "maxDistance": 0.1, # Temporal pooler "requestedGroupsCount": 20, "temporalPoolerAlgorithm": "sumProp", "transitionMemory": 10, }, # Level 3 # { # 'nodeCloning': True, # 'size': [1], # 'overlap': [0, 0], # # Spatial pooler # 'maxCoincidenceCount': 128, # 'spatialPoolerAlgorithm': 'gaussian', # 'sigma': 1, # 'maxDistance': 0.1, # # Temporal pooler # 'requestedGroupsCount': 30, # 'temporalPoolerAlgorithm': 'maxProp', # 'transitionMemory': 10, # } ] learning_params = [ # Level 0 { "sp": # Spatial pooler { "explorer": ["RandomSweep", {"sweepOffObject": False, "sweepDirections": "all"}], "numIterations": 300, }, "tp": # Temporal pooler { "explorer": ["RandomSweep", {"sweepOffObject": False, "sweepDirections": "all"}], "numIterations": 10000, }, }, # Level 1 { "sp": # Spatial pooler { "explorer": ["RandomSweep", {"sweepOffObject": False, "sweepDirections": "all"}], "numIterations": 200, }, "tp": # Temporal pooler { "explorer": ["RandomSweep", {"sweepOffObject": False, "sweepDirections": "all"}], "numIterations": 5000, }, }, # Level 2 { "sp": # Spatial pooler { "explorer": ["RandomSweep", {"sweepOffObject": False, "sweepDirections": "all"}], "numIterations": 200, }, "tp": # Temporal pooler { "explorer": ["RandomSweep", {"sweepOffObject": False, "sweepDirections": "all"}], "numIterations": 5000, }, }, # Level 3 { "sp": # Spatial pooler { "explorer": ["RandomSweep", {"sweepOffObject": False, "sweepDirections": "all"}], "numIterations": 200, }, "tp": # Temporal pooler { "explorer": ["RandomSweep", {"sweepOffObject": False, "sweepDirections": "all"}], "numIterations": 5000, }, }, ] train_data = "data/pictures-subset/train" test_data = "data/pictures-subset/test" sensor = ImageSensor( width=sensor_params["width"], height=sensor_params["height"], background=sensor_params["background"], mode=sensor_params["mode"], ) net = HTM(sensor, verbose=False) net.create_network(net_params) sensor.loadMultipleImages(train_data) net.learn(learning_params) # getting testing data sensor.clearImageList() sensor.loadMultipleImages(test_data) sensor.setParameter("explorer", "Flash") import matplotlib.pyplot as plt import matplotlib.cm as cm for i in range(40): data = sensor.compute() im, cat = data["data"], data["category"] print(cat) patterns = utils.extract_patches(im, net.patch_size) if cat == 0: plt.plot(net.infer(patterns), color="b") else: plt.plot(net.infer(patterns), color="r") plt.show()
def testSegment(self): # TODO return True sensor_params = { 'width': 32, 'height': 32, 'background': 1, 'mode': 'bw' } net_params = [{ 'nodeCloning': True, 'size': [2, 2], 'overlap': [0, 0], # Spatial pooler 'maxCoincidenceCount': 128, 'spatialPoolerAlgorithm': 'gaussian', 'sigma': 1, 'maxDistance': 0.1, # Temporal pooler 'requestedGroupsCount': 20, 'temporalPoolerAlgorithm': 'maxProp', 'transitionMemory': 4, }] learning_params = [ # Level 0 { 'sp': # Spatial pooler { 'explorer': [ 'RandomSweep', { 'sweepOffObject': False, 'sweepDirections': 'all' } ], 'numIterations': 100 }, 'tp': # Temporal pooler { 'explorer': [ 'RandomSweep', { 'sweepOffObject': False, 'sweepDirections': 'all' } ], 'numIterations': 1000 }, }, ] train_data = 'data/pictures-subset/train' sensor = ImageSensor(width=sensor_params['width'], height=sensor_params['height'], background=sensor_params['background'], mode=sensor_params['mode']) net = HTM(sensor, verbose=False) net.create_network(net_params) sensor.loadMultipleImages(train_data) net.learn(learning_params) sensor.clearImageList() sensor.loadSingleImage('data/test_clean.png') data = sensor.compute() im_clean, cat = data['data'], data['category'] patch_size = net.patch_size im_clean_fw = net.infer(utils.extract_patches(im_clean, patch_size), merge_output=True) weights = np.asarray(net.segment(im_clean_fw))
sensor.setParameter('explorer', 'Flash') print(' Num of train images: %d' % sensor.getNumIterations()) train = [] # infered output of HTM for each of the original images train_set_orig = [] # original training images train_labels = [] # labels of training original images for i in range(sensor.getNumIterations()): data = sensor.compute() im, cat = data['data'], data['category'] train_set_orig.append(im.reshape(np.prod(im.shape), )) train_labels.append(cat) out = htmNet.infer(extract_patches(im, patch_size), merge_output=True) train.append(np.asarray(out)) # getting testing data sensor.clearImageList() sensor.loadMultipleImages(test_data) sensor.setParameter('explorer', 'Flash') print(' Num of test images: %d' % sensor.getNumIterations()) test = [] # infered output of HTM for each of the original images test_set_orig = [] # original testing images test_labels = [] # labels oftesting original images for i in range(sensor.getNumIterations()): data = sensor.compute() im, cat = data['data'], data['category'] test_set_orig.append(im.reshape(np.prod(im.shape), )) test_labels.append(cat) out = htmNet.infer(extract_patches(im, patch_size), merge_output=True) test.append(np.asarray(out))
def testMultipleLevelInference(self): return from datasets import DatasetConfig dataset = DatasetConfig().load('pictures-subset') train_data = dataset['test_data_path'] #dataset['train_data_path'] test_data = dataset['test_data_path'] sensor_params = { 'width': dataset['image_width'], 'height': dataset['image_height'], 'background': dataset['image_background'], 'mode': dataset['image_mode'] } net_params = [ # Level 0 { 'nodeCloning': True, 'size': [4, 4], 'overlap': [0, 0], # Spatial pooler 'maxCoincidenceCount': 64, 'spatialPoolerAlgorithm': 'gaussian', 'sigma': 1, 'maxDistance': 0.1, # Temporal pooler 'requestedGroupsCount': 20, 'temporalPoolerAlgorithm': 'sumProp', 'transitionMemory': 4, }, # Level 1 { 'nodeCloning': True, 'size': [2, 2], 'overlap': [0, 0], # Spatial pooler 'maxCoincidenceCount': 128, 'spatialPoolerAlgorithm': 'product', 'sigma': 1, 'maxDistance': 0.1, # Temporal pooler 'requestedGroupsCount': 10, 'temporalPoolerAlgorithm': 'sumProp', 'transitionMemory': 8, }, # Level 2 { 'nodeCloning': True, 'size': [1], 'overlap': [0, 0], # Spatial pooler 'maxCoincidenceCount': 128, 'spatialPoolerAlgorithm': 'product', 'sigma': 1, 'maxDistance': 0.1, # Temporal pooler 'requestedGroupsCount': 20, 'temporalPoolerAlgorithm': 'sumProp', 'transitionMemory': 10, }, # Level 3 # { # 'nodeCloning': True, # 'size': [1], # 'overlap': [0, 0], # # Spatial pooler # 'maxCoincidenceCount': 128, # 'spatialPoolerAlgorithm': 'gaussian', # 'sigma': 1, # 'maxDistance': 0.1, # # Temporal pooler # 'requestedGroupsCount': 30, # 'temporalPoolerAlgorithm': 'maxProp', # 'transitionMemory': 10, # } ] learning_params = [ # Level 0 { 'sp': # Spatial pooler { 'explorer': [ 'RandomSweep', { 'sweepOffObject': False, 'sweepDirections': 'all' } ], 'numIterations': 300 }, 'tp': # Temporal pooler { 'explorer': [ 'RandomSweep', { 'sweepOffObject': False, 'sweepDirections': 'all' } ], 'numIterations': 10000 }, }, # Level 1 { 'sp': # Spatial pooler { 'explorer': [ 'RandomSweep', { 'sweepOffObject': False, 'sweepDirections': 'all' } ], 'numIterations': 200 }, 'tp': # Temporal pooler { 'explorer': [ 'RandomSweep', { 'sweepOffObject': False, 'sweepDirections': 'all' } ], 'numIterations': 5000 }, }, # Level 2 { 'sp': # Spatial pooler { 'explorer': [ 'RandomSweep', { 'sweepOffObject': False, 'sweepDirections': 'all' } ], 'numIterations': 200 }, 'tp': # Temporal pooler { 'explorer': [ 'RandomSweep', { 'sweepOffObject': False, 'sweepDirections': 'all' } ], 'numIterations': 5000 }, }, # Level 3 { 'sp': # Spatial pooler { 'explorer': [ 'RandomSweep', { 'sweepOffObject': False, 'sweepDirections': 'all' } ], 'numIterations': 200 }, 'tp': # Temporal pooler { 'explorer': [ 'RandomSweep', { 'sweepOffObject': False, 'sweepDirections': 'all' } ], 'numIterations': 5000 }, } ] train_data = 'data/pictures-subset/train' test_data = 'data/pictures-subset/test' sensor = ImageSensor(width=sensor_params['width'], height=sensor_params['height'], background=sensor_params['background'], mode=sensor_params['mode']) net = HTM(sensor, verbose=False) net.create_network(net_params) sensor.loadMultipleImages(train_data) net.learn(learning_params) # getting testing data sensor.clearImageList() sensor.loadMultipleImages(test_data) sensor.setParameter('explorer', 'Flash') import matplotlib.pyplot as plt import matplotlib.cm as cm for i in range(40): data = sensor.compute() im, cat = data['data'], data['category'] print(cat) patterns = utils.extract_patches(im, net.patch_size) if cat == 0: plt.plot(net.infer(patterns), color='b') else: plt.plot(net.infer(patterns), color='r') plt.show()