def template_segmentation_image(source, color_radius, color_neighbors, object_radius, object_neighbors, noise_size):    
    data = read_image(source);

    dbscan_instance = dbscan(data, color_radius, color_neighbors, True);
    print("Segmentation: '", source, "', Dimensions:", len(data[0]));
    dbscan_instance.process();
    
    clusters = dbscan_instance.get_clusters();
    
    real_clusters = [cluster for cluster in clusters if len(cluster) > noise_size];
    
    print("Draw allocated color segments (back mask representation)...");
    draw_image_mask_segments(source, real_clusters);
    
    print("Draw allocated color segments (color segment representation)...");
    draw_image_color_segments(source, real_clusters);
    
    if (object_radius is None):
        return;
    
    # continue analysis
    pointer_image = Image.open(source);
    image_size = pointer_image.size;
    
    object_colored_clusters = [];
    
    for cluster in clusters:
        coordinates = [];
        for index in cluster:
            y = floor(index / image_size[0]);
            x = index - y * image_size[0];
            
            coordinates.append([x, y]);
        
        # perform clustering analysis of the colored objects
        if (len(coordinates) < noise_size):
            continue;
        
        dbscan_instance = dbscan(coordinates, object_radius, object_neighbors, True);
        dbscan_instance.process();
    
        object_clusters = dbscan_instance.get_clusters();
        
        # decode it
        real_description_clusters = [];
        for object_cluster in object_clusters:
            real_description = [];
            for index_object in object_cluster:
                real_description.append(cluster[index_object]);
            
            real_description_clusters.append(real_description);
            
            if (len(real_description) > noise_size):
                object_colored_clusters.append(real_description);
    
    print("Draw allocated object segments (back mask representation)...");
    draw_image_mask_segments(source, object_colored_clusters);
    
    print("Draw allocated object segments (color segment representation)...");
    draw_image_color_segments(source, object_colored_clusters);
Esempio n. 2
0
def clustering_random_points(amount, ccore):
    sample = [ [ random.random(), random.random() ] for _ in range(amount) ]
    
    dbscan_instance = dbscan(sample, 0.05, 20, ccore)
    (ticks, _) = timedcall(dbscan_instance.process)
    
    print("Execution time ("+ str(amount) +" 2D-points):", ticks)
Esempio n. 3
0
    def templateClusteringWithAnswers(sample_path, answer_path, radius, neighbors, ccore, **kwargs):
        random_order = kwargs.get('random_order', False)
        repeat = kwargs.get('repeat', 1)

        for _ in range(repeat):
            sample = read_sample(sample_path)

            sample_index_map = [ i for i in range(len(sample)) ]
            if random_order:
                shuffle(sample_index_map)

            sample_shuffled = [ sample[i] for i in sample_index_map ]

            dbscan_instance = dbscan(sample_shuffled, radius, neighbors, ccore)
            dbscan_instance.process()

            clusters = dbscan_instance.get_clusters()
            noise = dbscan_instance.get_noise()

            for cluster in clusters:
                for i in range(len(cluster)):
                    cluster[i] = sample_index_map[cluster[i]]

            for i in range(len(noise)):
                noise[i] = sample_index_map[noise[i]]
            noise = sorted(noise)

            reader = answer_reader(answer_path)
            expected_noise = sorted(reader.get_noise())
            expected_length_clusters = reader.get_cluster_lengths()

            assertion.eq(len(sample), sum([len(cluster) for cluster in clusters]) + len(noise))
            assertion.eq(sum(expected_length_clusters), sum([len(cluster) for cluster in clusters]))
            assertion.eq(expected_length_clusters, sorted([len(cluster) for cluster in clusters]))
            assertion.eq(expected_noise, noise)
Esempio n. 4
0
def template_segmentation_image(image_file, parameters, steps, time, ccore_flag = True):
    image = read_image(image_file);
    stimulus = rgb2gray(image);
    
    for pixel_index in range(len(stimulus)):
        if (stimulus[pixel_index] < 235): stimulus[pixel_index] = 1;
        else: stimulus[pixel_index] = 0;
    
    if (parameters is None):
        parameters = legion_parameters();
    
    net = legion_network(len(stimulus), parameters, conn_type.GRID_FOUR, ccore = ccore_flag);
    output_dynamic = net.simulate(steps, time, stimulus);
    
    ensembles = output_dynamic.allocate_sync_ensembles();
    
    draw_image_mask_segments(image_file, ensembles);
    # draw_dynamics(output_dynamic.time, output_dynamic.output, x_title = "Time", y_title = "x(t)", separate = ensembles);
    
    # just for checking correctness of results - let's use classical algorithm
    dbscan_instance = dbscan(image, 3, 4, True);
    dbscan_instance.process();
    trustable_clusters = dbscan_instance.get_clusters();
    
    draw_dynamics(output_dynamic.time, output_dynamic.output, x_title = "Time", y_title = "x(t)", separate = trustable_clusters);
Esempio n. 5
0
    def testVisualize3DClustersOneCanvas(self):
        sample = read_sample(FCPS_SAMPLES.SAMPLE_HEPTA)

        dbscan_instance = dbscan(sample, 0.5, 3, True)
        dbscan_instance.process()
        clusters = dbscan_instance.get_clusters()

        visualizer = cluster_visualizer()
        visualizer.append_clusters(clusters, sample, markersize=30)
        visualizer.show()
Esempio n. 6
0
    def testVisualize1DClustersOneCanvas(self):
        sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE8)

        dbscan_instance = dbscan(sample, 1.0, 3, True)
        dbscan_instance.process()
        clusters = dbscan_instance.get_clusters()

        visualizer = cluster_visualizer()
        visualizer.append_clusters(clusters, sample, markersize=5)
        visualizer.show()
Esempio n. 7
0
    def testVisualize2DClustersOneCanvas(self):
        sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE4);
 
        dbscan_instance = dbscan(sample, 0.7, 3, False);
        dbscan_instance.process();
        clusters = dbscan_instance.get_clusters();
          
        visualizer = cluster_visualizer();
        visualizer.append_clusters(clusters, sample, markersize = 5);
        visualizer.show();
Esempio n. 8
0
 def testVisualize3DClustersOneCanvas(self):
     sample = read_sample(FCPS_SAMPLES.SAMPLE_HEPTA);
       
     dbscan_instance = dbscan(sample, 0.5, 3, False);
     dbscan_instance.process();
     clusters = dbscan_instance.get_clusters();
       
     visualizer = cluster_visualizer();
     visualizer.append_clusters(clusters, sample, markersize = 30);
     visualizer.show();
Esempio n. 9
0
    def testVisualize2DClustersOneCanvas(self):
        sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE4);
 
        dbscan_instance = dbscan(sample, 0.7, 3, False);
        dbscan_instance.process();
        clusters = dbscan_instance.get_clusters();
          
        visualizer = cluster_visualizer();
        visualizer.append_clusters(clusters, sample, markersize = 5);
        visualizer.show();
    def get_modelo(self, algoritmo, eps, neig):
        print(algoritmo + ' ' + str(eps) + ' - ' + str(neig))
        instance = None

        if algoritmo == 'AGNES':
            instance = agglomerative(self.amostras,
                                     self.numero_clusters,
                                     link=None)
        elif algoritmo == 'BIRCH':
            instance = birch(self.amostras,
                             self.numero_clusters,
                             entry_size_limit=10000)
        elif algoritmo == 'CLARANS':
            instance = clarans(self.amostras,
                               self.numero_clusters,
                               numlocal=100,
                               maxneighbor=1)
        elif algoritmo == 'CURE':
            instance = cure(self.amostras,
                            self.numero_clusters,
                            number_represent_points=5,
                            compression=0.5)
        elif algoritmo == 'DBSCAN':
            instance = dbscan(self.amostras, eps=eps, neighbors=neig)
        elif algoritmo == 'FCM':
            initial_centers = kmeans_plusplus_initializer(
                self.amostras, self.numero_clusters).initialize()
            instance = fcm(self.amostras, initial_centers)
        elif algoritmo == 'KMEANS':
            initial_centers = kmeans_plusplus_initializer(
                self.amostras, self.numero_clusters).initialize()
            instance = kmeans(self.amostras, initial_centers, tolerance=0.001)
        elif algoritmo == 'KMEDOIDS':
            instance = kmedoids(self.amostras,
                                initial_index_medoids=[0, 0, 0, 0, 0, 0, 0],
                                tolerance=0.0001)  #ajustar o n_de cluster
        elif algoritmo == 'OPTICS':
            instance = optics(self.amostras, eps=eps, minpts=neig)
        elif algoritmo == 'ROCK':
            instance = rock(self.amostras,
                            eps=eps,
                            number_clusters=self.numero_clusters,
                            threshold=0.5)
        else:
            pass

        instance.process()
        lista_agrupada = self.get_lista_agrupada(instance.get_clusters())
        lista_agrupada = np.array(lista_agrupada)

        if (neig != 0):
            n_grupos = len(np.unique(lista_agrupada))
            if n_grupos > self.numero_clusters:
                lista_agrupada = self.get_modelo(algoritmo, eps, neig + 1)
        return lista_agrupada
Esempio n. 11
0
    def templateClusterAllocationOneDimensionData(self, ccore_flag):
        input_data = [ [random()] for i in range(10) ] + [ [random() + 3] for i in range(10) ] + [ [random() + 5] for i in range(10) ] + [ [random() + 8] for i in range(10) ];

        dbscan_instance = dbscan(input_data, 0.5, 2, ccore_flag);
        dbscan_instance.process();
            
        clusters = dbscan_instance.get_clusters();
        
        assert len(clusters) == 4;
        for cluster in clusters:
            assert len(cluster) == 10;
Esempio n. 12
0
 def templateClusterAllocationOneDimensionData(ccore_flag):
     for _ in range(50):
         input_data = [ [random()] for _ in range(10) ] + [ [random() + 3] for _ in range(10) ] + [ [random() + 6] for _ in range(10) ] + [ [random() + 9] for _ in range(10) ];
 
         dbscan_instance = dbscan(input_data, 1.0, 2, ccore_flag);
         dbscan_instance.process();
             
         clusters = dbscan_instance.get_clusters();
             
         assert len(clusters) == 4;
         for cluster in clusters:
             assert len(cluster) == 10;
Esempio n. 13
0
def template_clustering(radius, neighb, path, invisible_axes = False, ccore = True):
    sample = read_sample(path);
    
    dbscan_instance = dbscan(sample, radius, neighb, ccore);
    (ticks, result) = timedcall(dbscan_instance.process);
    
    clusters = dbscan_instance.get_clusters();
    noise = dbscan_instance.get_noise();
    
    print("Sample: ", path, "\t\tExecution time: ", ticks, "\n");
    
    draw_clusters(sample, clusters, [], '.', hide_axes = invisible_axes);
Esempio n. 14
0
 def templateClusteringResults(self, path, radius, neighbors, expected_length_clusters, ccore = False):
     sample = read_sample(path);
      
     dbscan_instance = dbscan(sample, radius, neighbors, ccore);
     dbscan_instance.process();
      
     clusters = dbscan_instance.get_clusters();
     noise = dbscan_instance.get_noise();
      
     assert sum([len(cluster) for cluster in clusters]) + len(noise) == len(sample);
     assert sum([len(cluster) for cluster in clusters]) == sum(expected_length_clusters);
     assert sorted([len(cluster) for cluster in clusters]) == expected_length_clusters;
Esempio n. 15
0
 def templateClusteringResults(path, radius, neighbors, expected_length_clusters, ccore):
     sample = read_sample(path);
      
     dbscan_instance = dbscan(sample, radius, neighbors, ccore);
     dbscan_instance.process();
      
     clusters = dbscan_instance.get_clusters();
     noise = dbscan_instance.get_noise();
      
     assert sum([len(cluster) for cluster in clusters]) + len(noise) == len(sample);
     assert sum([len(cluster) for cluster in clusters]) == sum(expected_length_clusters);
     assert sorted([len(cluster) for cluster in clusters]) == expected_length_clusters;
Esempio n. 16
0
    def templateClusteringDistanceMatrix(path_to_file, radius, neighbors, expected_length_clusters, ccore):
        sample = read_sample(path_to_file)
        distance_matrix = calculate_distance_matrix(sample)

        dbscan_instance = dbscan(distance_matrix, radius, neighbors, ccore, data_type='distance_matrix')
        dbscan_instance.process()

        clusters = dbscan_instance.get_clusters()
        noise = dbscan_instance.get_noise()

        assertion.eq(len(sample), sum([len(cluster) for cluster in clusters]) + len(noise))
        assertion.eq(sum(expected_length_clusters), sum([len(cluster) for cluster in clusters]))
        assertion.eq(expected_length_clusters, sorted([len(cluster) for cluster in clusters]))
Esempio n. 17
0
    def templateClusteringDistanceMatrix(path_to_file, radius, neighbors, expected_length_clusters, ccore):
        sample = read_sample(path_to_file)
        distance_matrix = calculate_distance_matrix(sample)

        dbscan_instance = dbscan(distance_matrix, radius, neighbors, ccore, data_type='distance_matrix')
        dbscan_instance.process()

        clusters = dbscan_instance.get_clusters()
        noise = dbscan_instance.get_noise()

        assertion.eq(len(sample), sum([len(cluster) for cluster in clusters]) + len(noise))
        assertion.eq(sum(expected_length_clusters), sum([len(cluster) for cluster in clusters]))
        assertion.eq(expected_length_clusters, sorted([len(cluster) for cluster in clusters]))
Esempio n. 18
0
 def testVisualize2DClustersThreeCanvases(self):
     sample_simple1 = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1);
     sample_simple2 = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE2);
     sample_simple3 = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE3);
           
     dbscan_instance = dbscan(sample_simple1, 0.4, 2, False);
     dbscan_instance.process();
     clusters_sample1 = dbscan_instance.get_clusters();
           
     dbscan_instance = dbscan(sample_simple2, 1, 2, False);
     dbscan_instance.process();
     clusters_sample2 = dbscan_instance.get_clusters();
   
     dbscan_instance = dbscan(sample_simple3, 0.7, 3, False);
     dbscan_instance.process();
     clusters_sample3 = dbscan_instance.get_clusters();
           
     visualizer = cluster_visualizer(3);
     visualizer.append_clusters(clusters_sample1, sample_simple1, 0, markersize = 5);
     visualizer.append_clusters(clusters_sample2, sample_simple2, 1, markersize = 5);
     visualizer.append_clusters(clusters_sample3, sample_simple3, 2, markersize = 5);
     visualizer.show();
Esempio n. 19
0
 def testVisualize2DClustersThreeCanvases(self):
     sample_simple1 = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1);
     sample_simple2 = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE2);
     sample_simple3 = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE3);
           
     dbscan_instance = dbscan(sample_simple1, 0.4, 2, False);
     dbscan_instance.process();
     clusters_sample1 = dbscan_instance.get_clusters();
           
     dbscan_instance = dbscan(sample_simple2, 1, 2, False);
     dbscan_instance.process();
     clusters_sample2 = dbscan_instance.get_clusters();
   
     dbscan_instance = dbscan(sample_simple3, 0.7, 3, False);
     dbscan_instance.process();
     clusters_sample3 = dbscan_instance.get_clusters();
           
     visualizer = cluster_visualizer(3);
     visualizer.append_clusters(clusters_sample1, sample_simple1, 0, markersize = 5);
     visualizer.append_clusters(clusters_sample2, sample_simple2, 1, markersize = 5);
     visualizer.append_clusters(clusters_sample3, sample_simple3, 2, markersize = 5);
     visualizer.show();
Esempio n. 20
0
 def templateLengthProcessData(path_to_file, radius, min_number_neighbors, max_number_neighbors, ccore):
     for _ in range(min_number_neighbors, max_number_neighbors, 1):
         sample = read_sample(path_to_file);
          
         dbscan_instance = dbscan(sample, radius, min_number_neighbors, ccore);
         dbscan_instance.process();
          
         clusters = dbscan_instance.get_clusters();
         noise = dbscan_instance.get_noise();
          
         length = len(noise);
         length += sum([len(cluster) for cluster in clusters]);
      
         assert len(sample) == length;
Esempio n. 21
0
    def templateClusterAllocationOneDimensionData(self, ccore_flag):
        input_data = [[random()] for i in range(10)] + [
            [random() + 3] for i in range(10)
        ] + [[random() + 5] for i in range(10)] + [[random() + 8]
                                                   for i in range(10)]

        dbscan_instance = dbscan(input_data, 0.5, 2, ccore_flag)
        dbscan_instance.process()

        clusters = dbscan_instance.get_clusters()

        assert len(clusters) == 4
        for cluster in clusters:
            assert len(cluster) == 10
Esempio n. 22
0
 def templateLengthProcessData(self, path_to_file, radius, min_number_neighbors, max_number_neighbors, ccore = False):
     for number_neighbors in range(min_number_neighbors, max_number_neighbors, 1):
         sample = read_sample(path_to_file);
          
         dbscan_instance = dbscan(sample, radius, min_number_neighbors, ccore);
         dbscan_instance.process();
          
         clusters = dbscan_instance.get_clusters();
         noise = dbscan_instance.get_noise();
          
         length = len(noise);
         length += sum([len(cluster) for cluster in clusters]);
      
         assert len(sample) == length;
Esempio n. 23
0
def template_clustering(radius, neighb, path, invisible_axes = False, ccore = True):
    sample = read_sample(path);
    
    dbscan_instance = dbscan(sample, radius, neighb, ccore);
    (ticks, result) = timedcall(dbscan_instance.process);
    
    clusters = dbscan_instance.get_clusters();
    noise = dbscan_instance.get_noise();
    
    visualizer = cluster_visualizer();
    visualizer.append_clusters(clusters, sample);
    visualizer.append_cluster(noise, sample, marker = 'x');
    visualizer.show();
    
    print("Sample: ", path, "\t\tExecution time: ", ticks, "\n");
Esempio n. 24
0
    def templateClusteringResults(path, radius, neighbors, expected_length_clusters, ccore, **kwargs):
        random_order = kwargs.get('random_order', False)

        sample = read_sample(path)
        if random_order:
            shuffle(sample)
         
        dbscan_instance = dbscan(sample, radius, neighbors, ccore)
        dbscan_instance.process()
         
        clusters = dbscan_instance.get_clusters()
        noise = dbscan_instance.get_noise()

        assertion.eq(len(sample), sum([len(cluster) for cluster in clusters]) + len(noise))
        assertion.eq(sum(expected_length_clusters), sum([len(cluster) for cluster in clusters]))
        assertion.eq(expected_length_clusters, sorted([len(cluster) for cluster in clusters]))
Esempio n. 25
0
    def templateClusteringResults(path, radius, neighbors, expected_length_clusters, ccore, **kwargs):
        random_order = kwargs.get('random_order', False)

        sample = read_sample(path)
        if random_order:
            shuffle(sample)
         
        dbscan_instance = dbscan(sample, radius, neighbors, ccore)
        dbscan_instance.process()
         
        clusters = dbscan_instance.get_clusters()
        noise = dbscan_instance.get_noise()

        assertion.eq(len(sample), sum([len(cluster) for cluster in clusters]) + len(noise))
        assertion.eq(sum(expected_length_clusters), sum([len(cluster) for cluster in clusters]))
        assertion.eq(expected_length_clusters, sorted([len(cluster) for cluster in clusters]))
Esempio n. 26
0
def template_clustering(radius,
                        neighb,
                        path,
                        invisible_axes=False,
                        ccore=True):
    sample = read_sample(path)

    dbscan_instance = dbscan(sample, radius, neighb, ccore)
    (ticks, result) = timedcall(dbscan_instance.process)

    clusters = dbscan_instance.get_clusters()
    noise = dbscan_instance.get_noise()

    print("Sample: ", path, "\t\tExecution time: ", ticks, "\n")

    draw_clusters(sample, clusters, [], '.', hide_axes=invisible_axes)
Esempio n. 27
0
    def cluster(self, dataset):
        clustering = dbscan.dbscan(dataset.tolist(), self.radius, self.k, True)

        start = time.clock()
        clustering.process()
        computation_time = time.clock() - start

        return {
            'clustering': clustering.get_clusters(),
            'n_clusters': len(clustering.get_clusters()),
            'outliers': clustering.get_noise(),
            'outlier_ratio': len(clustering.get_noise()) / len(dataset),
            'computation_time': computation_time,
            'radius': self.radius,
            'n': self.k
        }
    def on_run_dbscan(self):
        if self.ui.matrix_file_path.text() == '':
            self.ui.status_label.setText(
                "<font color='red'>No data specified</font>")
            return None
        self.ui.status_label.setText("Loading data...")
        self.udm = load_dist_matrix(self.ui.matrix_file_path.text())
        self.ui.status_label.setText("Running dbscan for :{}...".format(
            self.ui.matrix_file_path.text()))
        dbs = dbscan.dbscan(self.udm,
                            float(self.ui.eps_edit.text()),
                            int(self.ui.min_points_edit.text()),
                            data_type="distance_matrix")
        dbs.process()

        self.ui.status_label.setText("Done.")
        print(dbs.get_clusters)
Esempio n. 29
0
    def templateClusterAllocationOneDimensionDataSpecificData(data_type, ccore_flag):
        for _ in range(50):
            sample = [[random()] for _ in range(10)] + [[random() + 3] for _ in range(10)] + [[random() + 6] for _ in range(10)] + [[random() + 9] for _ in range(10)]

            if data_type == 'distance_matrix':
                input_data = calculate_distance_matrix(sample)
            elif data_type == 'points':
                input_data = sample
            else:
                raise ValueError("Incorrect data type '%s' is specified" % data_type)

            dbscan_instance = dbscan(input_data, 1.0, 2, ccore_flag, data_type=data_type)
            dbscan_instance.process()

            clusters = dbscan_instance.get_clusters()

            assertion.eq(4, len(clusters))
            for cluster in clusters:
                assertion.eq(10, len(cluster))
def dbscan_cluster(df, eps, neighbours, hover_text):
    datadf = df.loc[:, df.columns != hover_text]
    data_list = datadf.to_numpy(dtype="int64").tolist()
    dbscan_instance = dbscan(data_list, eps, neighbours)
    dbscan_instance.process()
    clusters = dbscan_instance.get_clusters()
    reps = dbscan_instance.get_cluster_encoding()

    encoder = cluster_encoder(reps, clusters, data_list)
    encoder.set_encoding(type_encoding.CLUSTER_INDEX_LABELING)
    label = np.array(encoder.get_clusters(), dtype='int32')
    data_array = np.array(data_list)
    col_len = len(datadf.columns)
    if (col_len == 2):
        clus = scat2d(data_array, label, hover_text, df)
        return clus
    else:
        clus = scat3d(data_array, label, hover_text, df)
        return clus
Esempio n. 31
0
def template_clustering(radius,
                        neighb,
                        path,
                        invisible_axes=False,
                        ccore=True):
    sample = read_sample(path)

    dbscan_instance = dbscan(sample, radius, neighb, ccore)
    (ticks, result) = timedcall(dbscan_instance.process)

    clusters = dbscan_instance.get_clusters()
    noise = dbscan_instance.get_noise()

    visualizer = cluster_visualizer()
    visualizer.append_clusters(clusters, sample)
    visualizer.append_cluster(noise, sample, marker='x')
    visualizer.show()

    print("Sample: ", path, "\t\tExecution time: ", ticks, "\n")
Esempio n. 32
0
    def templateClusterAllocationOneDimensionDataSpecificData(data_type, ccore_flag):
        for _ in range(50):
            sample = [[random()] for _ in range(10)] + [[random() + 3] for _ in range(10)] + [[random() + 6] for _ in range(10)] + [[random() + 9] for _ in range(10)]

            if data_type == 'distance_matrix':
                input_data = calculate_distance_matrix(sample)
            elif data_type == 'points':
                input_data = sample
            else:
                raise ValueError("Incorrect data type '%s' is specified" % data_type)

            dbscan_instance = dbscan(input_data, 1.0, 2, ccore_flag, data_type=data_type)
            dbscan_instance.process()

            clusters = dbscan_instance.get_clusters()

            assertion.eq(4, len(clusters))
            for cluster in clusters:
                assertion.eq(10, len(cluster))
Esempio n. 33
0
def template_clustering(radius, neighb, path, invisible_axes = False, ccore = True, show = True):
    sample = read_sample(path)
    
    dbscan_instance = dbscan(sample, radius, neighb, ccore)
    (ticks, _) = timedcall(dbscan_instance.process)
    
    clusters = dbscan_instance.get_clusters()
    noise = dbscan_instance.get_noise()
    
    print([len(cluster) for cluster in clusters])
    
    if show:
        visualizer = cluster_visualizer()
        visualizer.append_clusters(clusters, sample)
        visualizer.append_cluster(noise, sample, marker = 'x')
        visualizer.show()
    
    print("Sample: ", path, "\t\tExecution time: ", ticks, "\n")
    
    return sample, clusters, noise
Esempio n. 34
0
    def pickle_dump_load(ccore):
        sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
        dbscan_instance = dbscan(sample, 0.7, 3, ccore)
        dbscan_instance.process()

        expected_clusters = dbscan_instance.get_clusters()
        expected_noise = dbscan_instance.get_noise()
        expected_encoding = dbscan_instance.get_cluster_encoding()

        dbscan_dump_file = open('test_dbscan_file.pkl', 'wb')
        pickle.dump(dbscan_instance, dbscan_dump_file)
        dbscan_dump_file.close()

        dbscan_dump_file = open('test_dbscan_file.pkl', 'rb')
        dbscan_instance = pickle.load(dbscan_dump_file)
        dbscan_dump_file.close()

        assertion.eq(expected_clusters, dbscan_instance.get_clusters())
        assertion.eq(expected_noise, dbscan_instance.get_noise())
        assertion.eq(expected_encoding, dbscan_instance.get_cluster_encoding())
def template_clustering(radius, neighb, path, invisible_axes = False, ccore = True, show = True):
    sample = read_sample(path);
    
    dbscan_instance = dbscan(sample, radius, neighb, ccore);
    (ticks, _) = timedcall(dbscan_instance.process);
    
    clusters = dbscan_instance.get_clusters();
    noise = dbscan_instance.get_noise();
    
    print([len(cluster) for cluster in clusters]);
    
    if (False):
        visualizer = cluster_visualizer();
        visualizer.append_clusters(clusters, sample);
        visualizer.append_cluster(noise, sample, marker = 'x');
        visualizer.show();
    
    print("Sample: ", path, "\t\tExecution time: ", ticks, "\n");
    
    return (sample, clusters, noise);
Esempio n. 36
0
    def templateLengthProcessSpecificData(data_type, path_to_file, radius, min_number_neighbors, max_number_neighbors, ccore):
        for _ in range(min_number_neighbors, max_number_neighbors, 1):
            sample = read_sample(path_to_file)

            if data_type == 'distance_matrix':
                input_data = calculate_distance_matrix(sample)
            elif data_type == 'points':
                input_data = sample
            else:
                raise ValueError("Incorrect data type '%s' is specified" % data_type)

            dbscan_instance = dbscan(input_data, radius, min_number_neighbors, ccore, data_type=data_type)
            dbscan_instance.process()

            clusters = dbscan_instance.get_clusters()
            noise = dbscan_instance.get_noise()

            length = len(noise)
            length += sum([len(cluster) for cluster in clusters])

            assertion.eq(len(sample), length)
Esempio n. 37
0
    def templateLengthProcessSpecificData(data_type, path_to_file, radius, min_number_neighbors, max_number_neighbors, ccore):
        for _ in range(min_number_neighbors, max_number_neighbors, 1):
            sample = read_sample(path_to_file)

            if data_type == 'distance_matrix':
                input_data = calculate_distance_matrix(sample)
            elif data_type == 'points':
                input_data = sample
            else:
                raise ValueError("Incorrect data type '%s' is specified" % data_type)

            dbscan_instance = dbscan(input_data, radius, min_number_neighbors, ccore, data_type=data_type)
            dbscan_instance.process()

            clusters = dbscan_instance.get_clusters()
            noise = dbscan_instance.get_noise()

            length = len(noise)
            length += sum([len(cluster) for cluster in clusters])

            assertion.eq(len(sample), length)
    def templateClusteringWithAnswers(sample_path, answer_path, radius,
                                      neighbors, ccore, **kwargs):
        random_order = kwargs.get('random_order', False)
        repeat = kwargs.get('repeat', 1)

        for _ in range(repeat):
            sample = read_sample(sample_path)

            sample_index_map = [i for i in range(len(sample))]
            if random_order:
                shuffle(sample_index_map)

            sample_shuffled = [sample[i] for i in sample_index_map]

            dbscan_instance = dbscan(sample_shuffled, radius, neighbors, ccore)
            dbscan_instance.process()

            clusters = dbscan_instance.get_clusters()
            noise = dbscan_instance.get_noise()

            for cluster in clusters:
                for i in range(len(cluster)):
                    cluster[i] = sample_index_map[cluster[i]]

            for i in range(len(noise)):
                noise[i] = sample_index_map[noise[i]]
            noise = sorted(noise)

            reader = answer_reader(answer_path)
            expected_noise = sorted(reader.get_noise())
            expected_length_clusters = reader.get_cluster_lengths()

            assertion.eq(
                len(sample),
                sum([len(cluster) for cluster in clusters]) + len(noise))
            assertion.eq(sum(expected_length_clusters),
                         sum([len(cluster) for cluster in clusters]))
            assertion.eq(expected_length_clusters,
                         sorted([len(cluster) for cluster in clusters]))
            assertion.eq(expected_noise, noise)
def template_clustering(radius, neighb, path, invisible_axes = False, ccore = False, show = True, tempos = tempos_dbscan):
    sample = read_sample(path)
    
    dbscan_instance = dbscan(sample, radius, neighb, ccore)
    (ticks, _) = timedcall(dbscan_instance.process)
    
    clusters = dbscan_instance.get_clusters()
    noise = dbscan_instance.get_noise()
    
    print([len(cluster) for cluster in clusters])
    
    if show:
        visualizer = cluster_visualizer()
        visualizer.append_clusters(clusters, sample)
        visualizer.append_cluster(noise, sample, marker = 'x')
        visualizer.show()
    
    print("Sample: ", path, "\t\tExecution time: ", ticks, "\n")

    tempos_dbscan.append(ticks)

    return sample, clusters, noise
Esempio n. 40
0
    def pickle_dump_load(ccore):
        dump_file_name = tempfile.gettempdir() + os.sep + 'test_dbscan_file.pkl'

        sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
        dbscan_instance = dbscan(sample, 0.7, 3, ccore)
        dbscan_instance.process()

        expected_clusters = dbscan_instance.get_clusters()
        expected_noise = dbscan_instance.get_noise()
        expected_encoding = dbscan_instance.get_cluster_encoding()

        dbscan_dump_file = open(dump_file_name, 'wb')
        pickle.dump(dbscan_instance, dbscan_dump_file)
        dbscan_dump_file.close()

        dbscan_dump_file = open(dump_file_name, 'rb')
        dbscan_instance = pickle.load(dbscan_dump_file)
        dbscan_dump_file.close()

        os.remove(dump_file_name)

        assertion.eq(expected_clusters, dbscan_instance.get_clusters())
        assertion.eq(expected_noise, dbscan_instance.get_noise())
        assertion.eq(expected_encoding, dbscan_instance.get_cluster_encoding())
def template_segmentation_image(image_file,
                                parameters,
                                steps,
                                time,
                                ccore_flag=True):
    image = read_image(image_file)
    stimulus = rgb2gray(image)

    for pixel_index in range(len(stimulus)):
        if (stimulus[pixel_index] < 235): stimulus[pixel_index] = 1
        else: stimulus[pixel_index] = 0

    if (parameters is None):
        parameters = legion_parameters()

    net = legion_network(len(stimulus),
                         parameters,
                         conn_type.GRID_FOUR,
                         ccore=ccore_flag)
    output_dynamic = net.simulate(steps, time, stimulus)

    ensembles = output_dynamic.allocate_sync_ensembles()

    draw_image_mask_segments(image_file, ensembles)
    # draw_dynamics(output_dynamic.time, output_dynamic.output, x_title = "Time", y_title = "x(t)", separate = ensembles);

    # just for checking correctness of results - let's use classical algorithm
    dbscan_instance = dbscan(image, 3, 4, True)
    dbscan_instance.process()
    trustable_clusters = dbscan_instance.get_clusters()

    draw_dynamics(output_dynamic.time,
                  output_dynamic.output,
                  x_title="Time",
                  y_title="x(t)",
                  separate=trustable_clusters)
Esempio n. 42
0
 def testCoreInterfaceIntInputData(self):
     dbscan_instance = dbscan([[1], [2], [3], [20], [21], [22]], 3, 2, True)
     dbscan_instance.process()
     assert len(dbscan_instance.get_clusters()) == 2
Esempio n. 43
0
def template_image_segmentation(image_file, steps, time, dynamic_file_prefix):
    image = read_image(image_file)
    stimulus = rgb2gray(image)

    params = hhn_parameters()
    params.deltah = 650
    params.w1 = 0.1
    params.w2 = 9.0
    params.w3 = 5.0
    params.threshold = -10

    stimulus = [255.0 - pixel for pixel in stimulus]
    divider = max(stimulus) / 50.0
    stimulus = [int(pixel / divider) for pixel in stimulus]

    t, dyn_peripheral, dyn_central = None, None, None

    if (not os.path.exists(dynamic_file_prefix + 'dynamic_time.txt') or
            not os.path.exists(dynamic_file_prefix + 'dynamic_peripheral.txt')
            or not os.path.exists(dynamic_file_prefix +
                                  'dynamic_dyn_central.txt')):

        print(
            "File with output dynamic is not found - simulation will be performed - it may take some time, be patient."
        )

        net = hhn_network(len(stimulus), stimulus, params, ccore=True)

        (t, dyn_peripheral, dyn_central) = net.simulate(steps, time)

        print("Store dynamic to save time for simulation next time.")

        with open(dynamic_file_prefix + 'dynamic_time.txt',
                  'wb') as file_descriptor:
            pickle.dump(t, file_descriptor)

        with open(dynamic_file_prefix + 'dynamic_peripheral.txt',
                  'wb') as file_descriptor:
            pickle.dump(dyn_peripheral, file_descriptor)

        with open(dynamic_file_prefix + 'dynamic_dyn_central.txt',
                  'wb') as file_descriptor:
            pickle.dump(dyn_central, file_descriptor)
    else:
        print("Load output dynamic from file.")

        with open(dynamic_file_prefix + 'dynamic_time.txt',
                  'rb') as file_descriptor:
            t = pickle.load(file_descriptor)

        with open(dynamic_file_prefix + 'dynamic_peripheral.txt',
                  'rb') as file_descriptor:
            dyn_peripheral = pickle.load(file_descriptor)

        with open(dynamic_file_prefix + 'dynamic_dyn_central.txt',
                  'rb') as file_descriptor:
            dyn_central = pickle.load(file_descriptor)

    animate_segmentation(t, dyn_peripheral, image_file, 200)

    # just for checking correctness of results - let's use classical algorithm
    if (False):
        dbscan_instance = dbscan(image, 3, 4, True)
        dbscan_instance.process()
        trustable_clusters = dbscan_instance.get_clusters()

        amount_canvases = len(trustable_clusters) + 2
        visualizer = dynamic_visualizer(amount_canvases,
                                        x_title="Time",
                                        y_title="V",
                                        y_labels=False)
        visualizer.append_dynamics(t, dyn_peripheral, 0, trustable_clusters)
        visualizer.append_dynamics(t, dyn_central, amount_canvases - 2, True)
        visualizer.show()
Esempio n. 44
0
from pyclustering.samples.definitions import FCPS_SAMPLES, FAMOUS_SAMPLES

# Sample for cluster analysis.
# sample = read_sample(FCPS_SAMPLES.SAMPLE_CHAINLINK)
sample = read_sample(FAMOUS_SAMPLES.SAMPLE_IRIS)
lines = open("t4.8k", "r")
inp = []
for line in lines:
    cords = line.split()
    if len(cords) != 2:
        continue
    inp.append([float(cords[0]), float(cords[1])])
# print(inp)

# Create DBSCAN algorithm.
dbscan_instance = dbscan(inp, 5, 3)
# dbscan_instance = dbscan(sample, 5, 3)

# Start processing by DBSCAN.
dbscan_instance.process()

# Obtain results of clustering.
clusters = dbscan_instance.get_clusters()
# noise = dbscan_instance.get_noise()

# Visualize clustering results
visualizer = cluster_visualizer_multidim()
# visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, inp, marker='o')
# visualizer.append_clusters(clusters, sample, marker='o')
# visualizer.append_cluster(noise, inp, marker='x')
Esempio n. 45
0
def template_image_segmentation(image_file, steps, time, dynamic_file_prefix):
    image = read_image(image_file);
    stimulus = rgb2gray(image);

    params = hhn_parameters();
    params.deltah = 650;
    params.w1 = 0.1;
    params.w2 = 9.0;
    params.w3 = 5.0;
    params.threshold = -10;

    stimulus = [255.0 - pixel for pixel in stimulus];
    divider = max(stimulus) / 50.0;
    stimulus = [int(pixel / divider) for pixel in stimulus];

    t, dyn_peripheral, dyn_central = None, None, None;

    if ( not os.path.exists(dynamic_file_prefix + 'dynamic_time.txt') or
         not os.path.exists(dynamic_file_prefix + 'dynamic_peripheral.txt') or
         not os.path.exists(dynamic_file_prefix + 'dynamic_dyn_central.txt') ):
        
        print("File with output dynamic is not found - simulation will be performed - it may take some time, be patient.");

        net = hhn_network(len(stimulus), stimulus, params, ccore=True);

        (t, dyn_peripheral, dyn_central) = net.simulate(steps, time);

        print("Store dynamic to save time for simulation next time.");

        with open(dynamic_file_prefix + 'dynamic_time.txt', 'wb') as file_descriptor:
            pickle.dump(t, file_descriptor);

        with open(dynamic_file_prefix + 'dynamic_peripheral.txt', 'wb') as file_descriptor:
            pickle.dump(dyn_peripheral, file_descriptor);

        with open(dynamic_file_prefix + 'dynamic_dyn_central.txt', 'wb') as file_descriptor:
            pickle.dump(dyn_central, file_descriptor);
    else:
        print("Load output dynamic from file.");
        
        with open (dynamic_file_prefix + 'dynamic_time.txt', 'rb') as file_descriptor:
            t = pickle.load(file_descriptor);

        with open (dynamic_file_prefix + 'dynamic_peripheral.txt', 'rb') as file_descriptor:
            dyn_peripheral = pickle.load(file_descriptor);

        with open (dynamic_file_prefix + 'dynamic_dyn_central.txt', 'rb') as file_descriptor:
            dyn_central = pickle.load(file_descriptor);

    animate_segmentation(t, dyn_peripheral, image_file, 200);

    # just for checking correctness of results - let's use classical algorithm
    if (False):
        dbscan_instance = dbscan(image, 3, 4, True);
        dbscan_instance.process();
        trustable_clusters = dbscan_instance.get_clusters();
    
        amount_canvases = len(trustable_clusters) + 2;
        visualizer = dynamic_visualizer(amount_canvases, x_title = "Time", y_title = "V", y_labels = False);
        visualizer.append_dynamics(t, dyn_peripheral, 0, trustable_clusters);
        visualizer.append_dynamics(t, dyn_central, amount_canvases - 2, True);
        visualizer.show();
Esempio n. 46
0
def process_dbscan(sample):
    instance = dbscan(sample, 1.0, 2)
    (ticks, _) = timedcall(instance.process)
    return ticks
Esempio n. 47
0
 def testCoreInterfaceIntInputData(self):
     dbscan_instance = dbscan([ [1], [2], [3], [20], [21], [22] ], 3, 2, True)
     dbscan_instance.process()
     assert len(dbscan_instance.get_clusters()) == 2
Esempio n. 48
0
import numpy as np
import matplotlib.pyplot as plt
from pyclustering.cluster.dbscan import dbscan
from pyclustering.cluster import cluster_visualizer

from features import features

cmap = plt.get_cmap('tab10')

# [0, 1, 2,  3,  4,  5,  6]
# [x, y, r, vx, vy, vr, th]
data = np.c_[features[:, 2], features[:, 6], features[:, 5]]  #, features[:, 6]

# Create DBSCAN algorithm.
dbscan_instance = dbscan(data, 0.7, 3)

# Start processing by DBSCAN.
dbscan_instance.process()

# Obtain results of clustering.
clusters = dbscan_instance.get_clusters()
noise = dbscan_instance.get_noise()

labels = np.full_like(features[:, 0], -1).astype('int')
for i, indices in enumerate(clusters):
    labels[indices] = i
# labels += 1
print(labels)
print(len(clusters))
def template_segmentation_image(source, color_radius, color_neighbors,
                                object_radius, object_neighbors, noise_size):
    data = read_image(source)

    dbscan_instance = dbscan(data, color_radius, color_neighbors, True)
    print("Segmentation: '", source, "', Dimensions:", len(data[0]))
    dbscan_instance.process()

    clusters = dbscan_instance.get_clusters()

    real_clusters = [
        cluster for cluster in clusters if len(cluster) > noise_size
    ]

    print("Draw allocated color segments (back mask representation)...")
    draw_image_mask_segments(source, real_clusters)

    print("Draw allocated color segments (color segment representation)...")
    draw_image_color_segments(source, real_clusters)

    if (object_radius is None):
        return

    # continue analysis
    pointer_image = Image.open(source)
    image_size = pointer_image.size

    object_colored_clusters = []

    for cluster in clusters:
        coordinates = []
        for index in cluster:
            y = floor(index / image_size[0])
            x = index - y * image_size[0]

            coordinates.append([x, y])

        # perform clustering analysis of the colored objects
        if (len(coordinates) < noise_size):
            continue

        dbscan_instance = dbscan(coordinates, object_radius, object_neighbors,
                                 True)
        dbscan_instance.process()

        object_clusters = dbscan_instance.get_clusters()

        # decode it
        real_description_clusters = []
        for object_cluster in object_clusters:
            real_description = []
            for index_object in object_cluster:
                real_description.append(cluster[index_object])

            real_description_clusters.append(real_description)

            if (len(real_description) > noise_size):
                object_colored_clusters.append(real_description)

    print("Draw allocated object segments (back mask representation)...")
    draw_image_mask_segments(source, object_colored_clusters)

    print("Draw allocated object segments (color segment representation)...")
    draw_image_color_segments(source, object_colored_clusters)
def work():
    now = time.time()
    try:
        tracks = next(data_generator)
        #[h,t,f,l,v,s] = track[0]

        # for t in tracks:
        #     escena.draw(t)

        #X = np.array([t[5] for t in tracks])
        # X = np.array([(t[1]-t[0])/2 for t in tracks])

        # hdb = hdbscan.HDBSCAN(min_cluster_size=3)
        # hdb.fit(X)
        # print("Second stage HDBScan num clusters: ", len(set(hdb.labels_)))
        # # create a list og lists
        # clusters = [[] for i in range(len(set(hdb.labels_)))]
        # for i,l in enumerate(tracks):
        #     clusters[hdb.labels_[i]].append(l)

        # create instance of Elbow method using K value from 1 to 10.
        # kmin, kmax = 1, 5
        # #X = [(t[1]-t[0])/2 for t in tracks]
        # X = [t[5] for t in tracks]
        # elbow_instance = elbow(X, kmin, kmax)
        # # process input data and obtain results of analysis
        # elbow_instance.process()
        # amount_clusters = elbow_instance.get_amount()  # most probable amount of clusters
        # wce = elbow_instance.get_wce()  # total within-cluster errors for each K
        # # perform cluster analysis using K-Means algorithm
        # centers = kmeans_plusplus_initializer(X, amount_clusters, amount_candidates=kmeans_plusplus_initializer.FARTHEST_CENTER_CANDIDATE).initialize()
        # kmeans_instance = kmeans(X, centers)
        # kmeans_instance.process()
        # # obtain clustering results and visualize them
        # clusters = kmeans_instance.get_clusters()
        # centers = kmeans_instance.get_centers()

        # Create DBSCAN algorithm.
        XD = [[] for i in range(len(tracks))]
        for i, t1 in enumerate(tracks):
            for j, t2 in enumerate(tracks):
                XD[i].append(
                    np.linalg.norm(t1[0] - t2[1]) +
                    np.linalg.norm(t1[1] - t2[0]))

        dbscan_instance = dbscan(XD, 700, 2, data_type='distance_matrix')
        # Start processing by DBSCAN.
        dbscan_instance.process()
        # Obtain results of clustering.
        clusters = dbscan_instance.get_clusters()
        noise = dbscan_instance.get_noise()

        # create a list og lists
        print(len(clusters), clusters)
        clouds = [[] for i in range(len(clusters))]
        for i, c in enumerate(clusters):
            for x in c:
                clouds[i].append(tracks[x])

        #kmeans_visualizer.show_clusters(X, clusters, centers)
        # print("Tracks ", len(set(hdb.labels_)))

        # escena.drawTrack(clusters)
        #print("real elapsed", (time.time() - now)*1000, " computed: " , s[-1]['timestamp']-s[0]['timestamp'])

        for c in clouds:
            escena.drawTrack(c)
    #   escena.drawTrack(clusters)

    except StopIteration:
        print("End iterator")
        timer.stop()