def template_segmentation_image(image_file, parameters, steps, time, ccore_flag = True): image = read_image(image_file); stimulus = rgb2gray(image); for pixel_index in range(len(stimulus)): if (stimulus[pixel_index] < 235): stimulus[pixel_index] = 1; else: stimulus[pixel_index] = 0; if (parameters is None): parameters = legion_parameters(); net = legion_network(len(stimulus), parameters, conn_type.GRID_FOUR, ccore = ccore_flag); output_dynamic = net.simulate(steps, time, stimulus); ensembles = output_dynamic.allocate_sync_ensembles(); draw_image_mask_segments(image_file, ensembles); # draw_dynamics(output_dynamic.time, output_dynamic.output, x_title = "Time", y_title = "x(t)", separate = ensembles); # just for checking correctness of results - let's use classical algorithm dbscan_instance = dbscan(image, 3, 4, True); dbscan_instance.process(); trustable_clusters = dbscan_instance.get_clusters(); draw_dynamics(output_dynamic.time, output_dynamic.output, x_title = "Time", y_title = "x(t)", separate = trustable_clusters);
def train(self): samples = []; print("Digit images preprocessing..."); for index_digit in range(0, 10, 1): list_file_digit_sample = IMAGE_DIGIT_SAMPLES.GET_LIST_IMAGE_SAMPLES(index_digit); for file_name in list_file_digit_sample: data = read_image(file_name); image_pattern = rgb2gray(data); for index_pixel in range(len(image_pattern)): if (image_pattern[index_pixel] < 128): image_pattern[index_pixel] = 1; else: image_pattern[index_pixel] = 0; samples += [ image_pattern ]; print("SOM initialization..."); self.__network = som(2, 5, type_conn.grid_four, None, True); print("SOM training..."); self.__network.train(samples, 300); print("SOM is ready...");
def train(self): samples = []; print("Digit images preprocessing..."); for index_digit in range(0, 10, 1): list_file_digit_sample = IMAGE_DIGIT_SAMPLES.GET_LIST_IMAGE_SAMPLES(index_digit); for file_name in list_file_digit_sample: data = read_image(file_name); image_pattern = rgb2gray(data); for index_pixel in range(len(image_pattern)): if (image_pattern[index_pixel] < 128): image_pattern[index_pixel] = 1; else: image_pattern[index_pixel] = 0; samples += [ image_pattern ]; print("SOM initialization..."); self.__network = som(2, 5, type_conn.grid_four, None, True); print("SOM training..."); self.__network.train(samples, 300); print("SOM is ready...");
def click_image_load(self): self.__user_pattern = [0 for i in range(32 * 32)] Canvas.delete(self.__widget, "all") index_digit = int(math.floor(random.random() * 10)) list_file_digit_sample = IMAGE_DIGIT_SAMPLES.GET_LIST_IMAGE_SAMPLES( index_digit) index_image = int( math.floor(random.random() * len(list_file_digit_sample))) file_name = list_file_digit_sample[index_image] data = read_image(file_name) image_pattern = rgb2gray(data) for y in range(32): for x in range(32): linear_index = y * 32 + x if (image_pattern[linear_index] < 128): self.__user_pattern[linear_index] = 1 self.__widget.create_rectangle(x * 10, y * 10, x * 10 + 10, y * 10 + 10, fill=self.__color, width=0)
def template_segmentation_image(image_file, parameters, steps, time, ccore_flag = True): image = read_image(image_file); stimulus = rgb2gray(image); for pixel_index in range(len(stimulus)): if (stimulus[pixel_index] < 235): stimulus[pixel_index] = 1; else: stimulus[pixel_index] = 0; if (parameters is None): parameters = legion_parameters(); net = legion_network(len(stimulus), parameters, conn_type.GRID_FOUR, ccore = ccore_flag); output_dynamic = net.simulate(steps, time, stimulus); ensembles = output_dynamic.allocate_sync_ensembles(); draw_image_mask_segments(image_file, ensembles); # draw_dynamics(output_dynamic.time, output_dynamic.output, x_title = "Time", y_title = "x(t)", separate = ensembles); # just for checking correctness of results - let's use classical algorithm dbscan_instance = dbscan(image, 3, 4, True); dbscan_instance.process(); trustable_clusters = dbscan_instance.get_clusters(); draw_dynamics(output_dynamic.time, output_dynamic.output, x_title = "Time", y_title = "x(t)", separate = trustable_clusters);
def testRgbToGray(self): rgb_pixels = [[127, 127, 127], [255, 255, 255], [0, 0, 0]] result = rgb2gray(rgb_pixels) assert 3 == len(result) assert 127 == round(result[0]) assert 255 == round(result[1]) assert 0 == round(result[2])
def testRgbToGray(self): rgb_pixels = [ [127, 127, 127], [255, 255, 255], [0, 0, 0] ]; result = rgb2gray(rgb_pixels); assert 3 == len(result); assert 127 == round(result[0]); assert 255 == round(result[1]); assert 0 == round(result[2]);
def template_segmentation_image(image, parameters, simulation_time, brightness, scale_color = True, fastlinking = False, show_spikes = False, ccore_flag = True): image_source = Image.open(image); image_size = image_source.size; width = image_size[0]; height = image_size[1]; stimulus = read_image(image); stimulus = rgb2gray(stimulus); if (brightness != None): for pixel_index in range(len(stimulus)): if (stimulus[pixel_index] < brightness): stimulus[pixel_index] = 1; else: stimulus[pixel_index] = 0; else: maximum_stimulus = float(max(stimulus)); minimum_stimulus = float(min(stimulus)); delta = maximum_stimulus - minimum_stimulus; for pixel_index in range(len(stimulus)): if (scale_color is True): stimulus[pixel_index] = 1.0 - ((float(stimulus[pixel_index]) - minimum_stimulus) / delta); else: stimulus[pixel_index] = float(stimulus[pixel_index]) / 255; if (parameters is None): parameters = pcnn_parameters(); parameters.AF = 0.1; parameters.AL = 0.1; parameters.AT = 0.8; parameters.VF = 1.0; parameters.VL = 1.0; parameters.VT = 30.0; parameters.W = 1.0; parameters.M = 1.0; parameters.FAST_LINKING = fastlinking; net = pcnn_network(len(stimulus), parameters, conn_type.GRID_EIGHT, height = height, width = width, ccore = ccore_flag); output_dynamic = net.simulate(simulation_time, stimulus); pcnn_visualizer.show_output_dynamic(output_dynamic); ensembles = output_dynamic.allocate_sync_ensembles(); draw_image_mask_segments(image, ensembles); pcnn_visualizer.show_time_signal(output_dynamic); if (show_spikes is True): spikes = output_dynamic.allocate_spike_ensembles(); draw_image_mask_segments(image, spikes); pcnn_visualizer.animate_spike_ensembles(output_dynamic, image_size);
def template_segmentation_image(image, parameters, simulation_time, brightness, scale_color = True, fastlinking = False, show_spikes = False, ccore_flag = True): image_source = Image.open(image); image_size = image_source.size; width = image_size[0]; height = image_size[1]; stimulus = read_image(image); stimulus = rgb2gray(stimulus); if (brightness != None): for pixel_index in range(len(stimulus)): if (stimulus[pixel_index] < brightness): stimulus[pixel_index] = 1; else: stimulus[pixel_index] = 0; else: maximum_stimulus = float(max(stimulus)); minimum_stimulus = float(min(stimulus)); delta = maximum_stimulus - minimum_stimulus; for pixel_index in range(len(stimulus)): if (scale_color is True): stimulus[pixel_index] = 1.0 - ((float(stimulus[pixel_index]) - minimum_stimulus) / delta); else: stimulus[pixel_index] = float(stimulus[pixel_index]) / 255; if (parameters is None): parameters = pcnn_parameters(); parameters.AF = 0.1; parameters.AL = 0.1; parameters.AT = 0.8; parameters.VF = 1.0; parameters.VL = 1.0; parameters.VT = 30.0; parameters.W = 1.0; parameters.M = 1.0; parameters.FAST_LINKING = fastlinking; net = pcnn_network(len(stimulus), parameters, conn_type.GRID_EIGHT, height = height, width = width, ccore = ccore_flag); output_dynamic = net.simulate(simulation_time, stimulus); pcnn_visualizer.show_output_dynamic(output_dynamic); ensembles = output_dynamic.allocate_sync_ensembles(); draw_image_mask_segments(image, ensembles); pcnn_visualizer.show_time_signal(output_dynamic); if (show_spikes is True): spikes = output_dynamic.allocate_spike_ensembles(); draw_image_mask_segments(image, spikes); pcnn_visualizer.animate_spike_ensembles(output_dynamic, image_size);
def click_image_load(self): self.__user_pattern = [ 0 for i in range(32 * 32) ]; Canvas.delete(self.__widget, "all"); index_digit = int(math.floor(random.random() * 10)); list_file_digit_sample = IMAGE_DIGIT_SAMPLES.GET_LIST_IMAGE_SAMPLES(index_digit); index_image = int(math.floor( random.random() * len(list_file_digit_sample) )); file_name = list_file_digit_sample[index_image]; data = read_image(file_name); image_pattern = rgb2gray(data); for y in range(32): for x in range(32): linear_index = y * 32 + x; if (image_pattern[linear_index] < 128): self.__user_pattern[linear_index] = 1; self.__widget.create_rectangle(x * 10, y * 10, x * 10 + 10, y * 10 + 10, fill = self.__color, width = 0);
def template_recognition_image(images, steps, time, corruption=0.1): samples = [] for file_name in images: data = read_image(file_name) image_pattern = rgb2gray(data) for index_pixel in range(len(image_pattern)): if (image_pattern[index_pixel] < 128): image_pattern[index_pixel] = 1 else: image_pattern[index_pixel] = -1 samples += [image_pattern] net = syncpr(len(samples[0]), 0.3, 0.3, ccore=True) net.train(samples) # Recognize the each learned pattern for i in range(len(samples)): sync_output_dynamic = net.simulate(steps, time, samples[i], solve_type.RK4, True) syncpr_visualizer.show_output_dynamic(sync_output_dynamic) syncpr_visualizer.show_pattern(sync_output_dynamic, 10, 10) # corrupt a little bit by black and white pixels for _ in range(math.floor(len(samples[i]) * corruption)): random.seed() random_pixel = math.floor(random.random() * len(samples[i])) samples[i][random_pixel] = 1 random_pixel = math.floor(random.random() * len(samples[i])) samples[i][random_pixel] = -1 sync_output_dynamic = net.simulate(steps, time, samples[i], solve_type.RK4, True) syncpr_visualizer.show_output_dynamic(sync_output_dynamic) syncpr_visualizer.show_pattern(sync_output_dynamic, 10, 10) syncpr_visualizer.animate_pattern_recognition( sync_output_dynamic, 10, 10, title="Pattern Recognition")
def template_recognition_image(images, steps, time, corruption = 0.1): samples = []; for file_name in images: data = read_image(file_name); image_pattern = rgb2gray(data); for index_pixel in range(len(image_pattern)): if (image_pattern[index_pixel] < 128): image_pattern[index_pixel] = 1; else: image_pattern[index_pixel] = -1; samples += [ image_pattern ]; net = syncpr(len(samples[0]), 0.3, 0.3, ccore = True); net.train(samples); # Recognize the each learned pattern for i in range(len(samples)): sync_output_dynamic = net.simulate(steps, time, samples[i], solve_type.RK4, True); syncpr_visualizer.show_output_dynamic(sync_output_dynamic); syncpr_visualizer.show_pattern(sync_output_dynamic, 10, 10); # corrupt a little bit by black and white pixels for _ in range( math.floor(len(samples[i]) * corruption) ): random.seed(); random_pixel = math.floor(random.random() * len(samples[i])); samples[i][random_pixel] = 1; random_pixel = math.floor(random.random() * len(samples[i])); samples[i][random_pixel] = -1; sync_output_dynamic = net.simulate(steps, time, samples[i], solve_type.RK4, True); syncpr_visualizer.show_output_dynamic(sync_output_dynamic); syncpr_visualizer.show_pattern(sync_output_dynamic, 10, 10); syncpr_visualizer.animate_pattern_recognition(sync_output_dynamic, 10, 10, title = "Pattern Recognition");
def segmentation_double_t(): image = read_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE10); image = rgb2gray(image); for pixel_index in range(len(image)): if (image[pixel_index] < 128): image[pixel_index] = 1; else: image[pixel_index] = 0; params = pcnn_parameters(); params.AF = 0.1; params.AL = 0.1; params.AT = 0.8; params.VF = 1.0; params.VL = 1.0; params.VT = 20.0; params.W = 1.0; params.M = 1.0; ensembles = template_dynamic_pcnn(32 * 32, 28, image, params, conn_type.GRID_EIGHT, False); draw_image_mask_segments(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE10, ensembles);
def segmentation_double_t(): image = read_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE10); image = rgb2gray(image); for pixel_index in range(len(image)): if (image[pixel_index] < 128): image[pixel_index] = 1; else: image[pixel_index] = 0; params = pcnn_parameters(); params.AF = 0.1; params.AL = 0.1; params.AT = 0.8; params.VF = 1.0; params.VL = 1.0; params.VT = 20.0; params.W = 1.0; params.M = 1.0; ensembles = template_dynamic_pcnn(32 * 32, 28, image, params, conn_type.GRID_EIGHT, False); draw_image_mask_segments(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE10, ensembles);
def template_image_segmentation(image_file, steps, time, dynamic_file_prefix): image = read_image(image_file) stimulus = rgb2gray(image) params = hhn_parameters() params.deltah = 650 params.w1 = 0.1 params.w2 = 9.0 params.w3 = 5.0 params.threshold = -10 stimulus = [255.0 - pixel for pixel in stimulus] divider = max(stimulus) / 50.0 stimulus = [int(pixel / divider) for pixel in stimulus] t, dyn_peripheral, dyn_central = None, None, None if (not os.path.exists(dynamic_file_prefix + 'dynamic_time.txt') or not os.path.exists(dynamic_file_prefix + 'dynamic_peripheral.txt') or not os.path.exists(dynamic_file_prefix + 'dynamic_dyn_central.txt')): print( "File with output dynamic is not found - simulation will be performed - it may take some time, be patient." ) net = hhn_network(len(stimulus), stimulus, params, ccore=True) (t, dyn_peripheral, dyn_central) = net.simulate(steps, time) print("Store dynamic to save time for simulation next time.") with open(dynamic_file_prefix + 'dynamic_time.txt', 'wb') as file_descriptor: pickle.dump(t, file_descriptor) with open(dynamic_file_prefix + 'dynamic_peripheral.txt', 'wb') as file_descriptor: pickle.dump(dyn_peripheral, file_descriptor) with open(dynamic_file_prefix + 'dynamic_dyn_central.txt', 'wb') as file_descriptor: pickle.dump(dyn_central, file_descriptor) else: print("Load output dynamic from file.") with open(dynamic_file_prefix + 'dynamic_time.txt', 'rb') as file_descriptor: t = pickle.load(file_descriptor) with open(dynamic_file_prefix + 'dynamic_peripheral.txt', 'rb') as file_descriptor: dyn_peripheral = pickle.load(file_descriptor) with open(dynamic_file_prefix + 'dynamic_dyn_central.txt', 'rb') as file_descriptor: dyn_central = pickle.load(file_descriptor) animate_segmentation(t, dyn_peripheral, image_file, 200) # just for checking correctness of results - let's use classical algorithm if (False): dbscan_instance = dbscan(image, 3, 4, True) dbscan_instance.process() trustable_clusters = dbscan_instance.get_clusters() amount_canvases = len(trustable_clusters) + 2 visualizer = dynamic_visualizer(amount_canvases, x_title="Time", y_title="V", y_labels=False) visualizer.append_dynamics(t, dyn_peripheral, 0, trustable_clusters) visualizer.append_dynamics(t, dyn_central, amount_canvases - 2, True) visualizer.show()
def template_segmentation_image(image, parameters, simulation_time, brightness, scale_color=True, fastlinking=False, show_spikes=False, ccore_flag=True): image_source = Image.open(image) image_size = image_source.size width = image_size[0] height = image_size[1] stimulus = read_image(image) stimulus = rgb2gray(stimulus) if brightness is not None: for pixel_index in range(len(stimulus)): if stimulus[pixel_index] < brightness: stimulus[pixel_index] = 1 else: stimulus[pixel_index] = 0 else: maximum_stimulus = float(max(stimulus)) minimum_stimulus = float(min(stimulus)) delta = maximum_stimulus - minimum_stimulus for pixel_index in range(len(stimulus)): if scale_color is True: stimulus[pixel_index] = 1.0 - ( (float(stimulus[pixel_index]) - minimum_stimulus) / delta) else: stimulus[pixel_index] = float(stimulus[pixel_index]) / 255 if parameters is None: parameters = pcnn_parameters() parameters.AF = 0.1 parameters.AL = 0.1 parameters.AT = 0.8 parameters.VF = 1.0 parameters.VL = 1.0 parameters.VT = 30.0 parameters.W = 1.0 parameters.M = 1.0 parameters.FAST_LINKING = fastlinking net = pcnn_network(len(stimulus), parameters, conn_type.GRID_EIGHT, height=height, width=width, ccore=ccore_flag) output_dynamic = net.simulate(simulation_time, stimulus) pcnn_visualizer.show_output_dynamic(output_dynamic) ensembles = output_dynamic.allocate_sync_ensembles() draw_image_mask_segments(image, ensembles) pcnn_visualizer.show_time_signal(output_dynamic) if show_spikes is True: spikes = output_dynamic.allocate_spike_ensembles() draw_image_mask_segments(image, spikes) pcnn_visualizer.animate_spike_ensembles(output_dynamic, image_size)
def template_image_segmentation(image_file, steps, time, dynamic_file_prefix): image = read_image(image_file); stimulus = rgb2gray(image); params = hhn_parameters(); params.deltah = 650; params.w1 = 0.1; params.w2 = 9.0; params.w3 = 5.0; params.threshold = -10; stimulus = [255.0 - pixel for pixel in stimulus]; divider = max(stimulus) / 50.0; stimulus = [int(pixel / divider) for pixel in stimulus]; t, dyn_peripheral, dyn_central = None, None, None; if ( not os.path.exists(dynamic_file_prefix + 'dynamic_time.txt') or not os.path.exists(dynamic_file_prefix + 'dynamic_peripheral.txt') or not os.path.exists(dynamic_file_prefix + 'dynamic_dyn_central.txt') ): print("File with output dynamic is not found - simulation will be performed - it may take some time, be patient."); net = hhn_network(len(stimulus), stimulus, params, ccore=True); (t, dyn_peripheral, dyn_central) = net.simulate(steps, time); print("Store dynamic to save time for simulation next time."); with open(dynamic_file_prefix + 'dynamic_time.txt', 'wb') as file_descriptor: pickle.dump(t, file_descriptor); with open(dynamic_file_prefix + 'dynamic_peripheral.txt', 'wb') as file_descriptor: pickle.dump(dyn_peripheral, file_descriptor); with open(dynamic_file_prefix + 'dynamic_dyn_central.txt', 'wb') as file_descriptor: pickle.dump(dyn_central, file_descriptor); else: print("Load output dynamic from file."); with open (dynamic_file_prefix + 'dynamic_time.txt', 'rb') as file_descriptor: t = pickle.load(file_descriptor); with open (dynamic_file_prefix + 'dynamic_peripheral.txt', 'rb') as file_descriptor: dyn_peripheral = pickle.load(file_descriptor); with open (dynamic_file_prefix + 'dynamic_dyn_central.txt', 'rb') as file_descriptor: dyn_central = pickle.load(file_descriptor); animate_segmentation(t, dyn_peripheral, image_file, 200); # just for checking correctness of results - let's use classical algorithm if (False): dbscan_instance = dbscan(image, 3, 4, True); dbscan_instance.process(); trustable_clusters = dbscan_instance.get_clusters(); amount_canvases = len(trustable_clusters) + 2; visualizer = dynamic_visualizer(amount_canvases, x_title = "Time", y_title = "V", y_labels = False); visualizer.append_dynamics(t, dyn_peripheral, 0, trustable_clusters); visualizer.append_dynamics(t, dyn_central, amount_canvases - 2, True); visualizer.show();