def template_segmentation_image(source, color_radius, color_neighbors, object_radius, object_neighbors, noise_size):    
    data = read_image(source);

    dbscan_instance = dbscan(data, color_radius, color_neighbors, True);
    print("Segmentation: '", source, "', Dimensions:", len(data[0]));
    dbscan_instance.process();
    
    clusters = dbscan_instance.get_clusters();
    
    real_clusters = [cluster for cluster in clusters if len(cluster) > noise_size];
    
    print("Draw allocated color segments (back mask representation)...");
    draw_image_mask_segments(source, real_clusters);
    
    print("Draw allocated color segments (color segment representation)...");
    draw_image_color_segments(source, real_clusters);
    
    if (object_radius is None):
        return;
    
    # continue analysis
    pointer_image = Image.open(source);
    image_size = pointer_image.size;
    
    object_colored_clusters = [];
    
    for cluster in clusters:
        coordinates = [];
        for index in cluster:
            y = floor(index / image_size[0]);
            x = index - y * image_size[0];
            
            coordinates.append([x, y]);
        
        # perform clustering analysis of the colored objects
        if (len(coordinates) < noise_size):
            continue;
        
        dbscan_instance = dbscan(coordinates, object_radius, object_neighbors, True);
        dbscan_instance.process();
    
        object_clusters = dbscan_instance.get_clusters();
        
        # decode it
        real_description_clusters = [];
        for object_cluster in object_clusters:
            real_description = [];
            for index_object in object_cluster:
                real_description.append(cluster[index_object]);
            
            real_description_clusters.append(real_description);
            
            if (len(real_description) > noise_size):
                object_colored_clusters.append(real_description);
    
    print("Draw allocated object segments (back mask representation)...");
    draw_image_mask_segments(source, object_colored_clusters);
    
    print("Draw allocated object segments (color segment representation)...");
    draw_image_color_segments(source, object_colored_clusters);
예제 #2
0
    def testCreationDeletionByCore(self):
        # Crash occurs in case of memory leak
        data = read_image(IMAGE_MAP_SAMPLES.IMAGE_WHITE_SEA_SMALL)

        for iteration in range(0, 15):
            network = hsyncnet(data, 2, ccore=True)
            del network
예제 #3
0
 def testCreationDeletionByCore(self):
     # Crash occurs in case of memory leak
     data = read_image(IMAGE_MAP_SAMPLES.IMAGE_WHITE_SEA_SMALL);
     
     for iteration in range(0, 15):
         network = hsyncnet(data, 2, ccore = True);
         del network;
def template_segmentation_image(image_file, parameters, steps, time, ccore_flag = True):
    image = read_image(image_file);
    stimulus = rgb2gray(image);
    
    for pixel_index in range(len(stimulus)):
        if (stimulus[pixel_index] < 235): stimulus[pixel_index] = 1;
        else: stimulus[pixel_index] = 0;
    
    if (parameters is None):
        parameters = legion_parameters();
    
    net = legion_network(len(stimulus), parameters, conn_type.GRID_FOUR, ccore = ccore_flag);
    output_dynamic = net.simulate(steps, time, stimulus);
    
    ensembles = output_dynamic.allocate_sync_ensembles();
    
    draw_image_mask_segments(image_file, ensembles);
    # draw_dynamics(output_dynamic.time, output_dynamic.output, x_title = "Time", y_title = "x(t)", separate = ensembles);
    
    # just for checking correctness of results - let's use classical algorithm
    dbscan_instance = dbscan(image, 3, 4, True);
    dbscan_instance.process();
    trustable_clusters = dbscan_instance.get_clusters();
    
    draw_dynamics(output_dynamic.time, output_dynamic.output, x_title = "Time", y_title = "x(t)", separate = trustable_clusters);
예제 #5
0
 def train(self):
     samples = [];
     
     print("Digit images preprocessing...");
     
     for index_digit in range(0, 10, 1):
         list_file_digit_sample = IMAGE_DIGIT_SAMPLES.GET_LIST_IMAGE_SAMPLES(index_digit);
         
         for file_name in list_file_digit_sample:
             data = read_image(file_name);
             
             image_pattern = rgb2gray(data);
             
             for index_pixel in range(len(image_pattern)):
                 if (image_pattern[index_pixel] < 128):
                     image_pattern[index_pixel] = 1;
                 else:
                     image_pattern[index_pixel] = 0;
             
             samples += [ image_pattern ];
     
    
     print("SOM initialization...");
     self.__network = som(2, 5, samples, 300, type_conn.grid_four, None, True);
     
     print("SOM training...");
     self.__network.train();
     
     print("SOM is ready...");
def template_segmentation_image(source, start_centers):    
    data = read_image(source);

    kmeans_instance = kmeans(data, start_centers);
    kmeans_instance.process();
    
    clusters = kmeans_instance.get_clusters();
    draw_image_mask_segments(source, clusters);
예제 #7
0
def template_segmentation_image(source, start_centers):
    data = read_image(source)

    kmeans_instance = kmeans(data, start_centers)
    kmeans_instance.process()

    clusters = kmeans_instance.get_clusters()
    draw_image_mask_segments(source, clusters)
def template_segmentation_image(source, map_som_size = [5, 5], average_neighbors = 5, sync_order = 0.998, show_dyn = False, show_som_map = False):
    data = read_image(source);
    
    network = syncsom(data, map_som_size[0], map_som_size[1]);
    (ticks, (dyn_time, dyn_phase)) = timedcall(network.process, average_neighbors, show_dyn, sync_order);
    print("Sample: ", source, "\t\tExecution time: ", ticks, "\t\tWinners: ", network.som_layer.get_winner_number(), "\n");
    
    if (show_dyn is True):
        draw_dynamics(dyn_time, dyn_phase);
    
    clusters = network.get_clusters();
    draw_image_mask_segments(source, clusters);
예제 #9
0
def template_segmentation_image(image, parameters, simulation_time, brightness, scale_color = True, fastlinking = False, show_spikes = False, ccore_flag = True):
    stimulus = read_image(image);
    stimulus = rgb2gray(stimulus);
    
    if (brightness != None):
        for pixel_index in range(len(stimulus)):
            if (stimulus[pixel_index] < brightness): stimulus[pixel_index] = 1;
            else: stimulus[pixel_index] = 0;
    else:
        maximum_stimulus = float(max(stimulus));
        minimum_stimulus = float(min(stimulus));
        delta = maximum_stimulus - minimum_stimulus;
        
        for pixel_index in range(len(stimulus)):
            if (scale_color is True):
                stimulus[pixel_index] = 1.0 - ((float(stimulus[pixel_index]) - minimum_stimulus) / delta);
            else:
                stimulus[pixel_index] = float(stimulus[pixel_index]) / 255;
    
    if (parameters is None):
        parameters = pcnn_parameters();
    
        parameters.AF = 0.1;
        parameters.AL = 0.1;
        parameters.AT = 0.8;
        parameters.VF = 1.0;
        parameters.VL = 1.0;
        parameters.VT = 30.0;
        parameters.W = 1.0;
        parameters.M = 1.0;
        
        parameters.FAST_LINKING = fastlinking;
    
    net = pcnn_network(len(stimulus), parameters, conn_type.GRID_EIGHT, ccore = ccore_flag);
    output_dynamic = net.simulate(simulation_time, stimulus);
    
    pcnn_visualizer.show_output_dynamic(output_dynamic);
    
    ensembles = output_dynamic.allocate_sync_ensembles();
    draw_image_mask_segments(image, ensembles);
    
    pcnn_visualizer.show_time_signal(output_dynamic);
    
    if (show_spikes is True):
        spikes = output_dynamic.allocate_spike_ensembles();
        draw_image_mask_segments(image, spikes);
    
        image_source = Image.open(image);
        image_size = image_source.size;
        
        pcnn_visualizer.animate_spike_ensembles(output_dynamic, image_size);
예제 #10
0
 def click_image_load(self):
     self.__user_pattern = [ 0 for i in range(32 * 32) ];
     Canvas.delete(self.__widget, "all");
     
     index_digit = int(math.floor(random.random() * 10));
     list_file_digit_sample = IMAGE_DIGIT_SAMPLES.GET_LIST_IMAGE_SAMPLES(index_digit);
     
     index_image = int(math.floor( random.random() * len(list_file_digit_sample) ));
     file_name = list_file_digit_sample[index_image];
     data = read_image(file_name);
             
     image_pattern = rgb2gray(data);
     for y in range(32):
         for x in range(32):
             linear_index = y * 32 + x;
             if (image_pattern[linear_index] < 128):
                 self.__user_pattern[linear_index] = 1;
                 self.__widget.create_rectangle(x * 10, y * 10, x * 10 + 10, y * 10 + 10, fill = self.__color, width = 0);
예제 #11
0
def template_segmentation_image(source,
                                map_som_size=[5, 5],
                                average_neighbors=5,
                                sync_order=0.998,
                                show_dyn=False,
                                show_som_map=False):
    data = read_image(source)

    network = syncsom(data, map_som_size[0], map_som_size[1])
    (ticks, (dyn_time, dyn_phase)) = timedcall(network.process,
                                               average_neighbors, show_dyn,
                                               sync_order)
    print("Sample: ", source, "\t\tExecution time: ", ticks, "\t\tWinners: ",
          network.som_layer.get_winner_number(), "\n")

    if (show_dyn is True):
        draw_dynamics(dyn_time, dyn_phase)

    clusters = network.get_clusters()
    draw_image_mask_segments(source, clusters)
예제 #12
0
def segmentation_double_t():
    image = read_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE10);
    image = rgb2gray(image);

    for pixel_index in range(len(image)):
        if (image[pixel_index] < 128):
            image[pixel_index] = 1;
        else:
            image[pixel_index] = 0;

    params = pcnn_parameters();
    
    params.AF = 0.1;
    params.AL = 0.1;
    params.AT = 0.8;
    params.VF = 1.0;
    params.VL = 1.0;
    params.VT = 20.0;
    params.W = 1.0;
    params.M = 1.0;
    
    ensembles = template_dynamic_pcnn(32 * 32, 28,  image, params, conn_type.GRID_EIGHT, False);
    draw_image_mask_segments(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE10, ensembles);
예제 #13
0
def segmentation_double_t():
    image = read_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE10)
    image = rgb2gray(image)

    for pixel_index in range(len(image)):
        if (image[pixel_index] < 128):
            image[pixel_index] = 1
        else:
            image[pixel_index] = 0

    params = pcnn_parameters()

    params.AF = 0.1
    params.AL = 0.1
    params.AT = 0.8
    params.VF = 1.0
    params.VL = 1.0
    params.VT = 20.0
    params.W = 1.0
    params.M = 1.0

    ensembles = template_dynamic_pcnn(32 * 32, 28, image, params,
                                      conn_type.GRID_EIGHT, False)
    draw_image_mask_segments(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE10, ensembles)
def template_segmentation_image(source, color_radius, object_radius, noise_size, show_dyn):    
    data = read_image(source);
    print("Pixel dimension: ", len(data[0]));

    network = syncnet(data, color_radius, ccore = True);
    print("Network has been created");
    
    (ticks, (t, dyn)) = timedcall(network.process, 0.9995, solve_type.FAST, show_dyn);
    # (t, dyn) = network.process(0.998, solve_type.FAST, show_dyn);
    
    print("Sample: ", source, "\t\tExecution time: ", ticks, "\n");
    
    if (show_dyn is True):
        draw_dynamics(t, dyn);
    
    clusters = network.get_clusters();
    real_clusters = [cluster for cluster in clusters if len(cluster) > noise_size];
    
    draw_image_mask_segments(source, real_clusters);
    
    if (object_radius is None):
        return;
    
    # continue analysis
    pointer_image = Image.open(source);
    image_size = pointer_image.size;
    
    object_colored_clusters = [];
    object_colored_dynamics = [];
    total_dyn = [];
    
    for cluster in clusters:
        coordinates = [];
        for index in cluster:
            y = floor(index / image_size[0]);
            x = index - y * image_size[0];
            
            coordinates.append([x, y]);
        
        print(coordinates);
        
        # perform clustering analysis of the colored objects
        if (network is not None):
            del network;
            network = None;
        
        if (len(coordinates) < noise_size):
            continue;
        
        network = syncnet(coordinates, object_radius, ccore = True);
        (t, dyn) = network.process(0.999, solve_type.FAST, show_dyn);
        
        if (show_dyn is True):
            object_colored_dynamics.append( (t, dyn) );
        
        object_clusters = network.get_clusters();
        
        # decode it
        real_description_clusters = [];
        for object_cluster in object_clusters:
            real_description = [];
            for index_object in object_cluster:
                real_description.append(cluster[index_object]);
            
            real_description_clusters.append(real_description);
            
            if (len(real_description) > noise_size):
                object_colored_clusters.append(real_description);
            
        # draw_image_mask_segments(source, [ cluster ]);
        # draw_image_mask_segments(source, real_description_clusters);
    
    draw_image_mask_segments(source, object_colored_clusters);
    
    if (show_dyn is True):
        draw_dynamics_set(object_colored_dynamics, None, None, None, [0, 2 * 3.14], False, False);
def template_segmentation_image(source, color_radius, color_neighbors,
                                object_radius, object_neighbors, noise_size):
    data = read_image(source)

    dbscan_instance = dbscan(data, color_radius, color_neighbors, True)
    print("Segmentation: '", source, "', Dimensions:", len(data[0]))
    dbscan_instance.process()

    clusters = dbscan_instance.get_clusters()

    real_clusters = [
        cluster for cluster in clusters if len(cluster) > noise_size
    ]

    print("Draw allocated color segments (back mask representation)...")
    draw_image_mask_segments(source, real_clusters)

    print("Draw allocated color segments (color segment representation)...")
    draw_image_color_segments(source, real_clusters)

    if (object_radius is None):
        return

    # continue analysis
    pointer_image = Image.open(source)
    image_size = pointer_image.size

    object_colored_clusters = []

    for cluster in clusters:
        coordinates = []
        for index in cluster:
            y = floor(index / image_size[0])
            x = index - y * image_size[0]

            coordinates.append([x, y])

        # perform clustering analysis of the colored objects
        if (len(coordinates) < noise_size):
            continue

        dbscan_instance = dbscan(coordinates, object_radius, object_neighbors,
                                 True)
        dbscan_instance.process()

        object_clusters = dbscan_instance.get_clusters()

        # decode it
        real_description_clusters = []
        for object_cluster in object_clusters:
            real_description = []
            for index_object in object_cluster:
                real_description.append(cluster[index_object])

            real_description_clusters.append(real_description)

            if (len(real_description) > noise_size):
                object_colored_clusters.append(real_description)

    print("Draw allocated object segments (back mask representation)...")
    draw_image_mask_segments(source, object_colored_clusters)

    print("Draw allocated object segments (color segment representation)...")
    draw_image_color_segments(source, object_colored_clusters)
def template_segmentation_image(source, color_radius, object_radius,
                                noise_size, show_dyn):
    data = read_image(source)
    print("Pixel dimension: ", len(data[0]))

    network = syncnet(data, color_radius, ccore=True)
    print("Network has been created")

    (ticks, (t, dyn)) = timedcall(network.process, 0.9995, solve_type.FAST,
                                  show_dyn)
    # (t, dyn) = network.process(0.998, solve_type.FAST, show_dyn);

    print("Sample: ", source, "\t\tExecution time: ", ticks, "\n")

    if (show_dyn is True):
        draw_dynamics(t, dyn)

    clusters = network.get_clusters()
    real_clusters = [
        cluster for cluster in clusters if len(cluster) > noise_size
    ]

    draw_image_mask_segments(source, real_clusters)

    if (object_radius is None):
        return

    # continue analysis
    pointer_image = Image.open(source)
    image_size = pointer_image.size

    object_colored_clusters = []
    object_colored_dynamics = []
    total_dyn = []

    for cluster in clusters:
        coordinates = []
        for index in cluster:
            y = floor(index / image_size[0])
            x = index - y * image_size[0]

            coordinates.append([x, y])

        print(coordinates)

        # perform clustering analysis of the colored objects
        if (network is not None):
            del network
            network = None

        if (len(coordinates) < noise_size):
            continue

        network = syncnet(coordinates, object_radius, ccore=True)
        (t, dyn) = network.process(0.999, solve_type.FAST, show_dyn)

        if (show_dyn is True):
            object_colored_dynamics.append((t, dyn))

        object_clusters = network.get_clusters()

        # decode it
        real_description_clusters = []
        for object_cluster in object_clusters:
            real_description = []
            for index_object in object_cluster:
                real_description.append(cluster[index_object])

            real_description_clusters.append(real_description)

            if (len(real_description) > noise_size):
                object_colored_clusters.append(real_description)

        # draw_image_mask_segments(source, [ cluster ]);
        # draw_image_mask_segments(source, real_description_clusters);

    draw_image_mask_segments(source, object_colored_clusters)

    if (show_dyn is True):
        draw_dynamics_set(object_colored_dynamics, None, None, None,
                          [0, 2 * 3.14], False, False)