Esempio n. 1
0
 def train(self):
     samples = [];
     
     print("Digit images preprocessing...");
     
     for index_digit in range(0, 10, 1):
         list_file_digit_sample = IMAGE_DIGIT_SAMPLES.GET_LIST_IMAGE_SAMPLES(index_digit);
         
         for file_name in list_file_digit_sample:
             data = read_image(file_name);
             
             image_pattern = rgb2gray(data);
             
             for index_pixel in range(len(image_pattern)):
                 if (image_pattern[index_pixel] < 128):
                     image_pattern[index_pixel] = 1;
                 else:
                     image_pattern[index_pixel] = 0;
             
             samples += [ image_pattern ];
     
    
     print("SOM initialization...");
     self.__network = som(2, 5, type_conn.grid_four, None, True);
     
     print("SOM training...");
     self.__network.train(samples, 300);
     
     print("SOM is ready...");
Esempio n. 2
0
    def click_image_load(self):
        self.__user_pattern = [0 for i in range(32 * 32)]
        Canvas.delete(self.__widget, "all")

        index_digit = int(math.floor(random.random() * 10))
        list_file_digit_sample = IMAGE_DIGIT_SAMPLES.GET_LIST_IMAGE_SAMPLES(
            index_digit)

        index_image = int(
            math.floor(random.random() * len(list_file_digit_sample)))
        file_name = list_file_digit_sample[index_image]
        data = read_image(file_name)

        image_pattern = rgb2gray(data)
        for y in range(32):
            for x in range(32):
                linear_index = y * 32 + x
                if (image_pattern[linear_index] < 128):
                    self.__user_pattern[linear_index] = 1
                    self.__widget.create_rectangle(x * 10,
                                                   y * 10,
                                                   x * 10 + 10,
                                                   y * 10 + 10,
                                                   fill=self.__color,
                                                   width=0)
Esempio n. 3
0
    def process(self,
                image_source,
                collect_dynamic=False,
                order_color=0.9995,
                order_object=0.999):
        """!
        @brief Performs image segmentation.
        
        @param[in] image_source (string): Path to image file that should be processed.
        @param[in] collect_dynamic (bool): If 'True' then whole dynamic of each layer of the network is collected.
        @param[in] order_color (double): Local synchronization order for the first layer - coloring segmentation.
        @param[in] order_object (double): Local synchronization order for the second layer - object segmentation.
        
        @return (syncsegm_analyser) Analyser of segmentation results by the network.
        
        """

        self.__order_color = order_color
        self.__order_object = order_object

        data = read_image(image_source)
        color_analyser = self.__analyse_colors(data, collect_dynamic)

        if self.__object_radius is None:
            return syncsegm_analyser(color_analyser, None)

        object_segment_analysers = self.__analyse_objects(
            image_source, color_analyser, collect_dynamic)
        return syncsegm_analyser(color_analyser, object_segment_analysers)
def template_segmentation_image(source, color_radius, color_neighbors, object_radius, object_neighbors, noise_size):    
    data = read_image(source);

    dbscan_instance = dbscan(data, color_radius, color_neighbors, True);
    print("Segmentation: '", source, "', Dimensions:", len(data[0]));
    dbscan_instance.process();
    
    clusters = dbscan_instance.get_clusters();
    
    real_clusters = [cluster for cluster in clusters if len(cluster) > noise_size];
    
    print("Draw allocated color segments (back mask representation)...");
    draw_image_mask_segments(source, real_clusters);
    
    print("Draw allocated color segments (color segment representation)...");
    draw_image_color_segments(source, real_clusters);
    
    if (object_radius is None):
        return;
    
    # continue analysis
    pointer_image = Image.open(source);
    image_size = pointer_image.size;
    
    object_colored_clusters = [];
    
    for cluster in clusters:
        coordinates = [];
        for index in cluster:
            y = int(floor(index / image_size[0]));
            x = index - y * image_size[0];
            
            coordinates.append([x, y]);
        
        # perform clustering analysis of the colored objects
        if (len(coordinates) < noise_size):
            continue;
        
        dbscan_instance = dbscan(coordinates, object_radius, object_neighbors, True);
        dbscan_instance.process();
    
        object_clusters = dbscan_instance.get_clusters();
        
        # decode it
        real_description_clusters = [];
        for object_cluster in object_clusters:
            real_description = [];
            for index_object in object_cluster:
                real_description.append(cluster[index_object]);
            
            real_description_clusters.append(real_description);
            
            if (len(real_description) > noise_size):
                object_colored_clusters.append(real_description);
    
    print("Draw allocated object segments (back mask representation)...");
    draw_image_mask_segments(source, object_colored_clusters);
    
    print("Draw allocated object segments (color segment representation)...");
    draw_image_color_segments(source, object_colored_clusters);
def template_segmentation_image(source, color_radius, color_neighbors, object_radius, object_neighbors, noise_size):    
    data = read_image(source);

    dbscan_instance = dbscan(data, color_radius, color_neighbors, True);
    print("Segmentation: '", source, "', Dimensions:", len(data[0]));
    dbscan_instance.process();
    
    clusters = dbscan_instance.get_clusters();
    
    real_clusters = [cluster for cluster in clusters if len(cluster) > noise_size];
    
    print("Draw allocated color segments (back mask representation)...");
    draw_image_mask_segments(source, real_clusters);
    
    print("Draw allocated color segments (color segment representation)...");
    draw_image_color_segments(source, real_clusters);
    
    if (object_radius is None):
        return;
    
    # continue analysis
    pointer_image = Image.open(source);
    image_size = pointer_image.size;
    
    object_colored_clusters = [];
    
    for cluster in clusters:
        coordinates = [];
        for index in cluster:
            y = int(floor(index / image_size[0]));
            x = index - y * image_size[0];
            
            coordinates.append([x, y]);
        
        # perform clustering analysis of the colored objects
        if (len(coordinates) < noise_size):
            continue;
        
        dbscan_instance = dbscan(coordinates, object_radius, object_neighbors, True);
        dbscan_instance.process();
    
        object_clusters = dbscan_instance.get_clusters();
        
        # decode it
        real_description_clusters = [];
        for object_cluster in object_clusters:
            real_description = [];
            for index_object in object_cluster:
                real_description.append(cluster[index_object]);
            
            real_description_clusters.append(real_description);
            
            if (len(real_description) > noise_size):
                object_colored_clusters.append(real_description);
    
    print("Draw allocated object segments (back mask representation)...");
    draw_image_mask_segments(source, object_colored_clusters);
    
    print("Draw allocated object segments (color segment representation)...");
    draw_image_color_segments(source, object_colored_clusters);
Esempio n. 6
0
def template_segmentation_image(image_file, parameters, steps, time, ccore_flag = True):
    image = read_image(image_file);
    stimulus = rgb2gray(image);
    
    for pixel_index in range(len(stimulus)):
        if (stimulus[pixel_index] < 235): stimulus[pixel_index] = 1;
        else: stimulus[pixel_index] = 0;
    
    if (parameters is None):
        parameters = legion_parameters();
    
    net = legion_network(len(stimulus), parameters, conn_type.GRID_FOUR, ccore = ccore_flag);
    output_dynamic = net.simulate(steps, time, stimulus);
    
    ensembles = output_dynamic.allocate_sync_ensembles();
    
    draw_image_mask_segments(image_file, ensembles);
    # draw_dynamics(output_dynamic.time, output_dynamic.output, x_title = "Time", y_title = "x(t)", separate = ensembles);
    
    # just for checking correctness of results - let's use classical algorithm
    dbscan_instance = dbscan(image, 3, 4, True);
    dbscan_instance.process();
    trustable_clusters = dbscan_instance.get_clusters();
    
    draw_dynamics(output_dynamic.time, output_dynamic.output, x_title = "Time", y_title = "x(t)", separate = trustable_clusters);
Esempio n. 7
0
 def train(self):
     samples = [];
     
     print("Digit images preprocessing...");
     
     for index_digit in range(0, 10, 1):
         list_file_digit_sample = IMAGE_DIGIT_SAMPLES.GET_LIST_IMAGE_SAMPLES(index_digit);
         
         for file_name in list_file_digit_sample:
             data = read_image(file_name);
             
             image_pattern = rgb2gray(data);
             
             for index_pixel in range(len(image_pattern)):
                 if (image_pattern[index_pixel] < 128):
                     image_pattern[index_pixel] = 1;
                 else:
                     image_pattern[index_pixel] = 0;
             
             samples += [ image_pattern ];
     
    
     print("SOM initialization...");
     self.__network = som(2, 5, type_conn.grid_four, None, True);
     
     print("SOM training...");
     self.__network.train(samples, 300);
     
     print("SOM is ready...");
Esempio n. 8
0
def template_segmentation_image(image_file, parameters, steps, time, ccore_flag = True):
    image = read_image(image_file);
    stimulus = rgb2gray(image);
    
    for pixel_index in range(len(stimulus)):
        if (stimulus[pixel_index] < 235): stimulus[pixel_index] = 1;
        else: stimulus[pixel_index] = 0;
    
    if (parameters is None):
        parameters = legion_parameters();
    
    net = legion_network(len(stimulus), parameters, conn_type.GRID_FOUR, ccore = ccore_flag);
    output_dynamic = net.simulate(steps, time, stimulus);
    
    ensembles = output_dynamic.allocate_sync_ensembles();
    
    draw_image_mask_segments(image_file, ensembles);
    # draw_dynamics(output_dynamic.time, output_dynamic.output, x_title = "Time", y_title = "x(t)", separate = ensembles);
    
    # just for checking correctness of results - let's use classical algorithm
    dbscan_instance = dbscan(image, 3, 4, True);
    dbscan_instance.process();
    trustable_clusters = dbscan_instance.get_clusters();
    
    draw_dynamics(output_dynamic.time, output_dynamic.output, x_title = "Time", y_title = "x(t)", separate = trustable_clusters);
Esempio n. 9
0
def template_segmentation_image(source, start_centers):
    data = read_image(source)

    kmeans_instance = kmeans(data, start_centers)
    kmeans_instance.process()

    clusters = kmeans_instance.get_clusters()
    draw_image_mask_segments(source, clusters)
def template_segmentation_image(source, start_centers):    
    data = read_image(source);

    kmeans_instance = kmeans(data, start_centers);
    kmeans_instance.process();
    
    clusters = kmeans_instance.get_clusters();
    draw_image_mask_segments(source, clusters);
Esempio n. 11
0
def template_segmentation(source, levels, threshold):
    data = read_image(source)

    bang_instance = bang(data, levels, threshold)
    bang_instance.process()

    clusters = bang_instance.get_clusters()

    draw_image_mask_segments(source, clusters)
Esempio n. 12
0
 def testDrawSegmentationResultNoFailure(self):
     data = utils.read_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE01);
 
     kmeans_instance = kmeans(data, [[255, 0, 0], [0, 0, 255], [180, 136, 0], [255, 255, 255]]);
     kmeans_instance.process();
     
     clusters = kmeans_instance.get_clusters();
     utils.draw_image_mask_segments(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE01, clusters);
     utils.draw_image_color_segments(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE01, clusters);
Esempio n. 13
0
def template_segmentation(source, levels, threshold):
    data = read_image(source)

    bang_instance = bang(data, levels, threshold)
    bang_instance.process()

    clusters = bang_instance.get_clusters()

    draw_image_mask_segments(source, clusters)
Esempio n. 14
0
def template_segmentation_image(image, parameters, simulation_time, brightness, scale_color = True, fastlinking = False, show_spikes = False, ccore_flag = True):
    image_source = Image.open(image);
    image_size = image_source.size;
    
    width = image_size[0];
    height = image_size[1];
    
    stimulus = read_image(image);
    stimulus = rgb2gray(stimulus);
    
    if (brightness != None):
        for pixel_index in range(len(stimulus)):
            if (stimulus[pixel_index] < brightness): stimulus[pixel_index] = 1;
            else: stimulus[pixel_index] = 0;
    else:
        maximum_stimulus = float(max(stimulus));
        minimum_stimulus = float(min(stimulus));
        delta = maximum_stimulus - minimum_stimulus;
        
        for pixel_index in range(len(stimulus)):
            if (scale_color is True):
                stimulus[pixel_index] = 1.0 - ((float(stimulus[pixel_index]) - minimum_stimulus) / delta);
            else:
                stimulus[pixel_index] = float(stimulus[pixel_index]) / 255;
    
    if (parameters is None):
        parameters = pcnn_parameters();
    
        parameters.AF = 0.1;
        parameters.AL = 0.1;
        parameters.AT = 0.8;
        parameters.VF = 1.0;
        parameters.VL = 1.0;
        parameters.VT = 30.0;
        parameters.W = 1.0;
        parameters.M = 1.0;
        
        parameters.FAST_LINKING = fastlinking;
    
    net = pcnn_network(len(stimulus), parameters, conn_type.GRID_EIGHT, height = height, width = width, ccore = ccore_flag);
    output_dynamic = net.simulate(simulation_time, stimulus);
    
    pcnn_visualizer.show_output_dynamic(output_dynamic);
    
    ensembles = output_dynamic.allocate_sync_ensembles();
    draw_image_mask_segments(image, ensembles);
    
    pcnn_visualizer.show_time_signal(output_dynamic);
    
    if (show_spikes is True):
        spikes = output_dynamic.allocate_spike_ensembles();
        draw_image_mask_segments(image, spikes);
        
        pcnn_visualizer.animate_spike_ensembles(output_dynamic, image_size);
Esempio n. 15
0
def template_segmentation_image(image, parameters, simulation_time, brightness, scale_color = True, fastlinking = False, show_spikes = False, ccore_flag = True):
    image_source = Image.open(image);
    image_size = image_source.size;
    
    width = image_size[0];
    height = image_size[1];
    
    stimulus = read_image(image);
    stimulus = rgb2gray(stimulus);
    
    if (brightness != None):
        for pixel_index in range(len(stimulus)):
            if (stimulus[pixel_index] < brightness): stimulus[pixel_index] = 1;
            else: stimulus[pixel_index] = 0;
    else:
        maximum_stimulus = float(max(stimulus));
        minimum_stimulus = float(min(stimulus));
        delta = maximum_stimulus - minimum_stimulus;
        
        for pixel_index in range(len(stimulus)):
            if (scale_color is True):
                stimulus[pixel_index] = 1.0 - ((float(stimulus[pixel_index]) - minimum_stimulus) / delta);
            else:
                stimulus[pixel_index] = float(stimulus[pixel_index]) / 255;
    
    if (parameters is None):
        parameters = pcnn_parameters();
    
        parameters.AF = 0.1;
        parameters.AL = 0.1;
        parameters.AT = 0.8;
        parameters.VF = 1.0;
        parameters.VL = 1.0;
        parameters.VT = 30.0;
        parameters.W = 1.0;
        parameters.M = 1.0;
        
        parameters.FAST_LINKING = fastlinking;
    
    net = pcnn_network(len(stimulus), parameters, conn_type.GRID_EIGHT, height = height, width = width, ccore = ccore_flag);
    output_dynamic = net.simulate(simulation_time, stimulus);
    
    pcnn_visualizer.show_output_dynamic(output_dynamic);
    
    ensembles = output_dynamic.allocate_sync_ensembles();
    draw_image_mask_segments(image, ensembles);
    
    pcnn_visualizer.show_time_signal(output_dynamic);
    
    if (show_spikes is True):
        spikes = output_dynamic.allocate_spike_ensembles();
        draw_image_mask_segments(image, spikes);
        
        pcnn_visualizer.animate_spike_ensembles(output_dynamic, image_size);
Esempio n. 16
0
def template_segmentation_image_amount_colors(source, amount):
    data = read_image(source)

    centers = kmeans_plusplus_initializer(
        data, amount,
        kmeans_plusplus_initializer.FARTHEST_CENTER_CANDIDATE).initialize()
    kmeans_instance = kmeans(data, centers)
    kmeans_instance.process()

    clusters = kmeans_instance.get_clusters()
    draw_image_mask_segments(source, clusters)
def template_segmentation_image(source, map_som_size = [5, 5], radius = 128.0, sync_order = 0.998, show_dyn = False, show_som_map = False):
    data = read_image(source);
    
    network = syncsom(data, map_som_size[0], map_som_size[1], 1.0);
    (ticks, (dyn_time, dyn_phase)) = timedcall(network.process, show_dyn, sync_order);
    print("Sample: ", source, "\t\tExecution time: ", ticks, "\t\tWinners: ", network.som_layer.get_winner_number(), "\n");
    
    if (show_dyn is True):
        draw_dynamics(dyn_time, dyn_phase);
    
    clusters = network.get_clusters();
    draw_image_mask_segments(source, clusters);
Esempio n. 18
0
def template_segmentation_image(source, map_som_size = [5, 5], average_neighbors = 5, sync_order = 0.998, show_dyn = False, show_som_map = False):
    data = read_image(source);
    
    network = syncsom(data, map_som_size[0], map_som_size[1]);
    (ticks, (dyn_time, dyn_phase)) = timedcall(network.process, average_neighbors, show_dyn, sync_order);
    print("Sample: ", source, "\t\tExecution time: ", ticks, "\t\tWinners: ", network.som_layer.get_winner_number(), "\n");
    
    if (show_dyn is True):
        draw_dynamics(dyn_time, dyn_phase);
    
    clusters = network.get_clusters();
    draw_image_mask_segments(source, clusters);
Esempio n. 19
0
 def click_image_load(self):
     self.__user_pattern = [ 0 for i in range(32 * 32) ];
     Canvas.delete(self.__widget, "all");
     
     index_digit = int(math.floor(random.random() * 10));
     list_file_digit_sample = IMAGE_DIGIT_SAMPLES.GET_LIST_IMAGE_SAMPLES(index_digit);
     
     index_image = int(math.floor( random.random() * len(list_file_digit_sample) ));
     file_name = list_file_digit_sample[index_image];
     data = read_image(file_name);
             
     image_pattern = rgb2gray(data);
     for y in range(32):
         for x in range(32):
             linear_index = y * 32 + x;
             if (image_pattern[linear_index] < 128):
                 self.__user_pattern[linear_index] = 1;
                 self.__widget.create_rectangle(x * 10, y * 10, x * 10 + 10, y * 10 + 10, fill = self.__color, width = 0);
Esempio n. 20
0
def template_recognition_image(images, steps, time, corruption=0.1):
    samples = []

    for file_name in images:
        data = read_image(file_name)

        image_pattern = rgb2gray(data)

        for index_pixel in range(len(image_pattern)):
            if (image_pattern[index_pixel] < 128):
                image_pattern[index_pixel] = 1
            else:
                image_pattern[index_pixel] = -1

        samples += [image_pattern]

    net = syncpr(len(samples[0]), 0.3, 0.3, ccore=True)
    net.train(samples)

    # Recognize the each learned pattern
    for i in range(len(samples)):
        sync_output_dynamic = net.simulate(steps, time, samples[i],
                                           solve_type.RK4, True)
        syncpr_visualizer.show_output_dynamic(sync_output_dynamic)
        syncpr_visualizer.show_pattern(sync_output_dynamic, 10, 10)

        # corrupt a little bit by black and white pixels
        for _ in range(math.floor(len(samples[i]) * corruption)):
            random.seed()
            random_pixel = math.floor(random.random() * len(samples[i]))
            samples[i][random_pixel] = 1

            random_pixel = math.floor(random.random() * len(samples[i]))
            samples[i][random_pixel] = -1

        sync_output_dynamic = net.simulate(steps, time, samples[i],
                                           solve_type.RK4, True)
        syncpr_visualizer.show_output_dynamic(sync_output_dynamic)
        syncpr_visualizer.show_pattern(sync_output_dynamic, 10, 10)

        syncpr_visualizer.animate_pattern_recognition(
            sync_output_dynamic, 10, 10, title="Pattern Recognition")
Esempio n. 21
0
def template_recognition_image(images, steps, time, corruption = 0.1):
    samples = [];
    
    for file_name in images:
        data = read_image(file_name);
                
        image_pattern = rgb2gray(data);
                
        for index_pixel in range(len(image_pattern)):
            if (image_pattern[index_pixel] < 128):
                image_pattern[index_pixel] = 1;
            else:
                image_pattern[index_pixel] = -1;
                
        samples += [ image_pattern ];
    
    net = syncpr(len(samples[0]), 0.3, 0.3, ccore = True);
    net.train(samples);
    
    # Recognize the each learned pattern
    for i in range(len(samples)):
        sync_output_dynamic = net.simulate(steps, time, samples[i], solve_type.RK4, True);
        syncpr_visualizer.show_output_dynamic(sync_output_dynamic);
        syncpr_visualizer.show_pattern(sync_output_dynamic, 10, 10);
        
        # corrupt a little bit by black and white pixels
        for _ in range( math.floor(len(samples[i]) * corruption) ):
            random.seed();
            random_pixel = math.floor(random.random() * len(samples[i]));
            samples[i][random_pixel] = 1;
            
            random_pixel = math.floor(random.random() * len(samples[i]));
            samples[i][random_pixel] = -1;
        
        sync_output_dynamic = net.simulate(steps, time, samples[i], solve_type.RK4, True);
        syncpr_visualizer.show_output_dynamic(sync_output_dynamic);
        syncpr_visualizer.show_pattern(sync_output_dynamic, 10, 10);

        syncpr_visualizer.animate_pattern_recognition(sync_output_dynamic, 10, 10, title = "Pattern Recognition");
Esempio n. 22
0
def segmentation_double_t():
    image = read_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE10);
    image = rgb2gray(image);

    for pixel_index in range(len(image)):
        if (image[pixel_index] < 128):
            image[pixel_index] = 1;
        else:
            image[pixel_index] = 0;

    params = pcnn_parameters();
    
    params.AF = 0.1;
    params.AL = 0.1;
    params.AT = 0.8;
    params.VF = 1.0;
    params.VL = 1.0;
    params.VT = 20.0;
    params.W = 1.0;
    params.M = 1.0;
    
    ensembles = template_dynamic_pcnn(32 * 32, 28,  image, params, conn_type.GRID_EIGHT, False);
    draw_image_mask_segments(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE10, ensembles);
Esempio n. 23
0
def segmentation_double_t():
    image = read_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE10);
    image = rgb2gray(image);

    for pixel_index in range(len(image)):
        if (image[pixel_index] < 128):
            image[pixel_index] = 1;
        else:
            image[pixel_index] = 0;

    params = pcnn_parameters();
    
    params.AF = 0.1;
    params.AL = 0.1;
    params.AT = 0.8;
    params.VF = 1.0;
    params.VL = 1.0;
    params.VT = 20.0;
    params.W = 1.0;
    params.M = 1.0;
    
    ensembles = template_dynamic_pcnn(32 * 32, 28,  image, params, conn_type.GRID_EIGHT, False);
    draw_image_mask_segments(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE10, ensembles);
Esempio n. 24
0
 def process(self, image_source, collect_dynamic = False, order_color = 0.9995, order_object = 0.999):
     """!
     @brief Performs image segmentation.
     
     @param[in] image_source (string): Path to image file that should be processed.
     @param[in] collect_dynamic (bool): If 'True' then whole dynamic of each layer of the network is collected.
     @param[in] order_color (double): Local synchronization order for the first layer - coloring segmentation.
     @param[in] order_object (double): Local synchronization order for the second layer - object segmentation.
     
     @return (syncsegm_analyser) Analyser of segmentation results by the network.
     
     """
     
     self.__color_radius = order_color;
     self.__order_object = order_object;
     
     data = read_image(image_source);
     color_analyser = self.__analyse_colors(data, collect_dynamic);
     
     if (self.__object_radius is None):
         return syncsegm_analyser(color_analyser, None);
 
     object_segment_analysers = self.__analyse_objects(image_source, color_analyser, collect_dynamic);
     return syncsegm_analyser(color_analyser, object_segment_analysers);
Esempio n. 25
0
def template_image_segmentation(image_file, steps, time, dynamic_file_prefix):
    image = read_image(image_file)
    stimulus = rgb2gray(image)

    params = hhn_parameters()
    params.deltah = 650
    params.w1 = 0.1
    params.w2 = 9.0
    params.w3 = 5.0
    params.threshold = -10

    stimulus = [255.0 - pixel for pixel in stimulus]
    divider = max(stimulus) / 50.0
    stimulus = [int(pixel / divider) for pixel in stimulus]

    t, dyn_peripheral, dyn_central = None, None, None

    if (not os.path.exists(dynamic_file_prefix + 'dynamic_time.txt') or
            not os.path.exists(dynamic_file_prefix + 'dynamic_peripheral.txt')
            or not os.path.exists(dynamic_file_prefix +
                                  'dynamic_dyn_central.txt')):

        print(
            "File with output dynamic is not found - simulation will be performed - it may take some time, be patient."
        )

        net = hhn_network(len(stimulus), stimulus, params, ccore=True)

        (t, dyn_peripheral, dyn_central) = net.simulate(steps, time)

        print("Store dynamic to save time for simulation next time.")

        with open(dynamic_file_prefix + 'dynamic_time.txt',
                  'wb') as file_descriptor:
            pickle.dump(t, file_descriptor)

        with open(dynamic_file_prefix + 'dynamic_peripheral.txt',
                  'wb') as file_descriptor:
            pickle.dump(dyn_peripheral, file_descriptor)

        with open(dynamic_file_prefix + 'dynamic_dyn_central.txt',
                  'wb') as file_descriptor:
            pickle.dump(dyn_central, file_descriptor)
    else:
        print("Load output dynamic from file.")

        with open(dynamic_file_prefix + 'dynamic_time.txt',
                  'rb') as file_descriptor:
            t = pickle.load(file_descriptor)

        with open(dynamic_file_prefix + 'dynamic_peripheral.txt',
                  'rb') as file_descriptor:
            dyn_peripheral = pickle.load(file_descriptor)

        with open(dynamic_file_prefix + 'dynamic_dyn_central.txt',
                  'rb') as file_descriptor:
            dyn_central = pickle.load(file_descriptor)

    animate_segmentation(t, dyn_peripheral, image_file, 200)

    # just for checking correctness of results - let's use classical algorithm
    if (False):
        dbscan_instance = dbscan(image, 3, 4, True)
        dbscan_instance.process()
        trustable_clusters = dbscan_instance.get_clusters()

        amount_canvases = len(trustable_clusters) + 2
        visualizer = dynamic_visualizer(amount_canvases,
                                        x_title="Time",
                                        y_title="V",
                                        y_labels=False)
        visualizer.append_dynamics(t, dyn_peripheral, 0, trustable_clusters)
        visualizer.append_dynamics(t, dyn_central, amount_canvases - 2, True)
        visualizer.show()
Esempio n. 26
0
def template_image_segmentation(image_file, steps, time, dynamic_file_prefix):
    image = read_image(image_file);
    stimulus = rgb2gray(image);

    params = hhn_parameters();
    params.deltah = 650;
    params.w1 = 0.1;
    params.w2 = 9.0;
    params.w3 = 5.0;
    params.threshold = -10;

    stimulus = [255.0 - pixel for pixel in stimulus];
    divider = max(stimulus) / 50.0;
    stimulus = [int(pixel / divider) for pixel in stimulus];

    t, dyn_peripheral, dyn_central = None, None, None;

    if ( not os.path.exists(dynamic_file_prefix + 'dynamic_time.txt') or
         not os.path.exists(dynamic_file_prefix + 'dynamic_peripheral.txt') or
         not os.path.exists(dynamic_file_prefix + 'dynamic_dyn_central.txt') ):
        
        print("File with output dynamic is not found - simulation will be performed - it may take some time, be patient.");

        net = hhn_network(len(stimulus), stimulus, params, ccore=True);

        (t, dyn_peripheral, dyn_central) = net.simulate(steps, time);

        print("Store dynamic to save time for simulation next time.");

        with open(dynamic_file_prefix + 'dynamic_time.txt', 'wb') as file_descriptor:
            pickle.dump(t, file_descriptor);

        with open(dynamic_file_prefix + 'dynamic_peripheral.txt', 'wb') as file_descriptor:
            pickle.dump(dyn_peripheral, file_descriptor);

        with open(dynamic_file_prefix + 'dynamic_dyn_central.txt', 'wb') as file_descriptor:
            pickle.dump(dyn_central, file_descriptor);
    else:
        print("Load output dynamic from file.");
        
        with open (dynamic_file_prefix + 'dynamic_time.txt', 'rb') as file_descriptor:
            t = pickle.load(file_descriptor);

        with open (dynamic_file_prefix + 'dynamic_peripheral.txt', 'rb') as file_descriptor:
            dyn_peripheral = pickle.load(file_descriptor);

        with open (dynamic_file_prefix + 'dynamic_dyn_central.txt', 'rb') as file_descriptor:
            dyn_central = pickle.load(file_descriptor);

    animate_segmentation(t, dyn_peripheral, image_file, 200);

    # just for checking correctness of results - let's use classical algorithm
    if (False):
        dbscan_instance = dbscan(image, 3, 4, True);
        dbscan_instance.process();
        trustable_clusters = dbscan_instance.get_clusters();
    
        amount_canvases = len(trustable_clusters) + 2;
        visualizer = dynamic_visualizer(amount_canvases, x_title = "Time", y_title = "V", y_labels = False);
        visualizer.append_dynamics(t, dyn_peripheral, 0, trustable_clusters);
        visualizer.append_dynamics(t, dyn_central, amount_canvases - 2, True);
        visualizer.show();
Esempio n. 27
0
def template_segmentation_image(source, color_radius, object_radius, noise_size, show_dyn):    
    data = read_image(source);
    print("Pixel dimension: ", len(data[0]));

    network = syncnet(data, color_radius, ccore = True);
    print("Network has been created");
    
    (ticks, analyser) = timedcall(network.process, 0.9995, solve_type.FAST, show_dyn);
    
    print("Sample: ", source, "\t\tExecution time: ", ticks, "\n");
    
    if (show_dyn is True):
        sync_visualizer.show_output_dynamic(analyser);
    
    clusters = analyser.allocate_clusters();
    real_clusters = [cluster for cluster in clusters if len(cluster) > noise_size];
    
    draw_image_mask_segments(source, real_clusters);
    
    if (object_radius is None):
        return;
    
    # continue analysis
    pointer_image = Image.open(source);
    image_size = pointer_image.size;
    
    object_colored_clusters = [];
    object_colored_dynamics = [];
    total_dyn = [];
    
    for cluster in clusters:
        coordinates = [];
        for index in cluster:
            y = floor(index / image_size[0]);
            x = index - y * image_size[0];
            
            coordinates.append([x, y]);
        
        print(coordinates);
        
        # perform clustering analysis of the colored objects
        if (network is not None):
            del network;
            network = None;
        
        if (len(coordinates) < noise_size):
            continue;
        
        network = syncnet(coordinates, object_radius, ccore = True);
        analyser = network.process(0.999, solve_type.FAST, show_dyn);
        
        if (show_dyn is True):
            object_colored_dynamics.append( (analyser.time, analyser.output) );
        
        object_clusters = analyser.allocate_clusters();
        
        # decode it
        real_description_clusters = [];
        for object_cluster in object_clusters:
            real_description = [];
            for index_object in object_cluster:
                real_description.append(cluster[index_object]);
            
            real_description_clusters.append(real_description);
            
            if (len(real_description) > noise_size):
                object_colored_clusters.append(real_description);
            
        # draw_image_mask_segments(source, [ cluster ]);
        # draw_image_mask_segments(source, real_description_clusters);
    
    draw_image_mask_segments(source, object_colored_clusters);
    
    if (show_dyn is True):
        draw_dynamics_set(object_colored_dynamics, None, None, None, [0, 2 * 3.14], False, False);
# an example of image color segmentation.
from pyclustering.utils import draw_image_mask_segments, read_image

from pyclustering.samples.definitions import IMAGE_SIMPLE_SAMPLES

from pyclustering.cluster.kmeans import kmeans

from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer

# load image from the pyclustering collection.
data = read_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE_BEACH)

# set initial centers for K-Means algorithm.
amount_initial_centers = 3
initial_centers = kmeans_plusplus_initializer(
    data, amount_initial_centers).initialize()
#start_centers = [ [153, 217, 234, 128], [0, 162, 232, 128], [34, 177, 76, 128], [255, 242, 0, 128] ]

# create K-Means algorithm instance.
kmeans_instance = kmeans(data, initial_centers)

# start processing.
kmeans_instance.process()

# obtain clusters that are considered as segments.
segments = kmeans_instance.get_clusters()

# show image segmentation results.
draw_image_mask_segments(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE_BEACH, segments)
Esempio n. 29
0
# an example of image color segmentation.
from pyclustering.utils import draw_image_mask_segments, read_image

from pyclustering.samples.definitions import IMAGE_SIMPLE_SAMPLES

from pyclustering.cluster.kmeans import kmeans
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer

# load image from the pyclustering collection.
data = read_image("4_projeta/inner_img_rgb.tif")

# set initial centers for K-Means algorithm.
amount_initial_centers = 2
initial_centers = kmeans_plusplus_initializer(
    data, amount_initial_centers).initialize()

# create K-Means algorithm instance.
kmeans_instance = kmeans(data, initial_centers)

# start processing.
kmeans_instance.process()

# obtain clusters that are considered as segments.
segments = kmeans_instance.get_clusters()

print(len(segments[1]))

# show image segmentation results.
clusterized = draw_image_mask_segments("4_projeta/inner_img_rgb.tif", segments)

clusterized.save('5_extrai_linhas/grupos.tif')
def template_segmentation_image(image,
                                parameters,
                                simulation_time,
                                brightness,
                                scale_color=True,
                                fastlinking=False,
                                show_spikes=False,
                                ccore_flag=True):
    image_source = Image.open(image)
    image_size = image_source.size

    width = image_size[0]
    height = image_size[1]

    stimulus = read_image(image)
    stimulus = rgb2gray(stimulus)

    if brightness is not None:
        for pixel_index in range(len(stimulus)):
            if stimulus[pixel_index] < brightness:
                stimulus[pixel_index] = 1
            else:
                stimulus[pixel_index] = 0
    else:
        maximum_stimulus = float(max(stimulus))
        minimum_stimulus = float(min(stimulus))
        delta = maximum_stimulus - minimum_stimulus

        for pixel_index in range(len(stimulus)):
            if scale_color is True:
                stimulus[pixel_index] = 1.0 - (
                    (float(stimulus[pixel_index]) - minimum_stimulus) / delta)
            else:
                stimulus[pixel_index] = float(stimulus[pixel_index]) / 255

    if parameters is None:
        parameters = pcnn_parameters()

        parameters.AF = 0.1
        parameters.AL = 0.1
        parameters.AT = 0.8
        parameters.VF = 1.0
        parameters.VL = 1.0
        parameters.VT = 30.0
        parameters.W = 1.0
        parameters.M = 1.0

        parameters.FAST_LINKING = fastlinking

    net = pcnn_network(len(stimulus),
                       parameters,
                       conn_type.GRID_EIGHT,
                       height=height,
                       width=width,
                       ccore=ccore_flag)
    output_dynamic = net.simulate(simulation_time, stimulus)

    pcnn_visualizer.show_output_dynamic(output_dynamic)

    ensembles = output_dynamic.allocate_sync_ensembles()
    draw_image_mask_segments(image, ensembles)

    pcnn_visualizer.show_time_signal(output_dynamic)

    if show_spikes is True:
        spikes = output_dynamic.allocate_spike_ensembles()
        draw_image_mask_segments(image, spikes)

        pcnn_visualizer.animate_spike_ensembles(output_dynamic, image_size)
def template_segmentation_image(source, color_radius, object_radius, noise_size, show_dyn):    
    data = read_image(source);
    print("Pixel dimension: ", len(data[0]));

    network = syncnet(data, color_radius, ccore = True);
    print("Network has been created");
    
    (ticks, analyser) = timedcall(network.process, 0.9995, solve_type.FAST, show_dyn);
    
    print("Sample: ", source, "\t\tExecution time: ", ticks, "\n");
    
    if (show_dyn is True):
        sync_visualizer.show_output_dynamic(analyser);
    
    clusters = analyser.allocate_clusters();
    real_clusters = [cluster for cluster in clusters if len(cluster) > noise_size];
    
    draw_image_mask_segments(source, real_clusters);
    
    if (object_radius is None):
        return;
    
    # continue analysis
    pointer_image = Image.open(source);
    image_size = pointer_image.size;
    
    object_colored_clusters = [];
    object_colored_dynamics = [];
    total_dyn = [];
    
    for cluster in clusters:
        coordinates = [];
        for index in cluster:
            y = floor(index / image_size[0]);
            x = index - y * image_size[0];
            
            coordinates.append([x, y]);
        
        print(coordinates);
        
        # perform clustering analysis of the colored objects
        if (network is not None):
            del network;
            network = None;
        
        if (len(coordinates) < noise_size):
            continue;
        
        network = syncnet(coordinates, object_radius, ccore = True);
        analyser = network.process(0.999, solve_type.FAST, show_dyn);
        
        if (show_dyn is True):
            object_colored_dynamics.append( (analyser.time, analyser.output) );
        
        object_clusters = analyser.allocate_clusters();
        
        # decode it
        real_description_clusters = [];
        for object_cluster in object_clusters:
            real_description = [];
            for index_object in object_cluster:
                real_description.append(cluster[index_object]);
            
            real_description_clusters.append(real_description);
            
            if (len(real_description) > noise_size):
                object_colored_clusters.append(real_description);
            
        # draw_image_mask_segments(source, [ cluster ]);
        # draw_image_mask_segments(source, real_description_clusters);
    
    draw_image_mask_segments(source, object_colored_clusters);
    
    if (show_dyn is True):
        draw_dynamics_set(object_colored_dynamics, None, None, None, [0, 2 * 3.14], False, False);