Пример #1
0
Файл: gaac.py Проект: DrDub/nltk
 def cluster(self, vectors, assign_clusters=False, trace=False):
     # stores the merge order
     self._dendrogram = Dendrogram(
         [numpy.array(vector, numpy.float64) for vector in vectors])
     return VectorSpaceClusterer.cluster(self, vectors, assign_clusters, trace)
Пример #2
0
Файл: gaac.py Проект: DrDub/nltk
class GAAClusterer(VectorSpaceClusterer):
    """
    The Group Average Agglomerative starts with each of the N vectors as singleton
    clusters. It then iteratively merges pairs of clusters which have the
    closest centroids.  This continues until there is only one cluster. The
    order of merges gives rise to a dendrogram: a tree with the earlier merges
    lower than later merges. The membership of a given number of clusters c, 1
    <= c <= N, can be found by cutting the dendrogram at depth c.

    This clusterer uses the cosine similarity metric only, which allows for
    efficient speed-up in the clustering process.
    """

    def __init__(self, num_clusters=1, normalise=True, svd_dimensions=None):
        VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
        self._num_clusters = num_clusters
        self._dendrogram = None
        self._groups_values = None

    def cluster(self, vectors, assign_clusters=False, trace=False):
        # stores the merge order
        self._dendrogram = Dendrogram(
            [numpy.array(vector, numpy.float64) for vector in vectors])
        return VectorSpaceClusterer.cluster(self, vectors, assign_clusters, trace)

    def cluster_vectorspace(self, vectors, trace=False):
        # variables describing the initial situation
        N = len(vectors)
        cluster_len = [1]*N
        cluster_count = N
        index_map = numpy.arange(N)

        # construct the similarity matrix
        dims = (N, N)
        dist = numpy.ones(dims, dtype=numpy.float)*numpy.inf
        for i in range(N):
            for j in range(i+1, N):
                dist[i, j] = cosine_distance(vectors[i], vectors[j])

        while cluster_count > max(self._num_clusters, 1):
            i, j = numpy.unravel_index(dist.argmin(), dims)
            if trace:
                print("merging %d and %d" % (i, j))

            # update similarities for merging i and j
            self._merge_similarities(dist, cluster_len, i, j)

            # remove j
            dist[:, j] = numpy.inf
            dist[j, :] = numpy.inf

            # merge the clusters
            cluster_len[i] = cluster_len[i]+cluster_len[j]
            self._dendrogram.merge(index_map[i], index_map[j])
            cluster_count -= 1

            # update the index map to reflect the indexes if we
            # had removed j
            index_map[j+1:] -= 1
            index_map[j] = N

        self.update_clusters(self._num_clusters)

    def _merge_similarities(self, dist, cluster_len, i, j):
        # the new cluster i merged from i and j adopts the average of
        # i and j's similarity to each other cluster, weighted by the
        # number of points in the clusters i and j
        i_weight = cluster_len[i]
        j_weight = cluster_len[j]
        weight_sum = i_weight+j_weight

        # update for x<i
        dist[:i, i] = dist[:i, i]*i_weight + dist[:i, j]*j_weight
        dist[:i, i] /= weight_sum
        # update for i<x<j
        dist[i, i+1:j] = dist[i, i+1:j]*i_weight + dist[i+1:j, j]*j_weight
        # update for i<j<x
        dist[i, j+1:] = dist[i, j+1:]*i_weight + dist[j, j+1:]*j_weight
        dist[i, i+1:] /= weight_sum

    def update_clusters(self, num_clusters):
        clusters = self._dendrogram.groups(num_clusters)
        self._centroids = []
        for cluster in clusters:
            assert len(cluster) > 0
            if self._should_normalise:
                centroid = self._normalise(cluster[0])
            else:
                centroid = numpy.array(cluster[0])
            for vector in cluster[1:]:
                if self._should_normalise:
                    centroid += self._normalise(vector)
                else:
                    centroid += vector
            centroid /= len(cluster)
            self._centroids.append(centroid)
        self._num_clusters = len(self._centroids)

    def classify_vectorspace(self, vector):
        best = None
        for i in range(self._num_clusters):
            centroid = self._centroids[i]
            dist = cosine_distance(vector, centroid)
            if not best or dist < best[0]:
                best = (dist, i)
        return best[1]

    def dendrogram(self):
        """
        :return: The dendrogram representing the current clustering
        :rtype:  Dendrogram
        """
        return self._dendrogram

    def num_clusters(self):
        return self._num_clusters

    def __repr__(self):
        return '<GroupAverageAgglomerative Clusterer n=%d>' % self._num_clusters
Пример #3
0
 def cluster(self, vectors, assign_clusters=False, trace=False):
     # stores the merge order
     self._dendrogram = Dendrogram(
         [numpy.array(vector, numpy.float64) for vector in vectors]
     )
     return VectorSpaceClusterer.cluster(self, vectors, assign_clusters, trace)
Пример #4
0
class GAAClusterer(VectorSpaceClusterer):
    """
    The Group Average Agglomerative starts with each of the N vectors as singleton
    clusters. It then iteratively merges pairs of clusters which have the
    closest centroids.  This continues until there is only one cluster. The
    order of merges gives rise to a dendrogram: a tree with the earlier merges
    lower than later merges. The membership of a given number of clusters c, 1
    <= c <= N, can be found by cutting the dendrogram at depth c.

    This clusterer uses the cosine similarity metric only, which allows for
    efficient speed-up in the clustering process.
    """

    def __init__(self, num_clusters=1, normalise=True, svd_dimensions=None):
        VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
        self._num_clusters = num_clusters
        self._dendrogram = None
        self._groups_values = None

    def cluster(self, vectors, assign_clusters=False, trace=False):
        # stores the merge order
        self._dendrogram = Dendrogram(
            [numpy.array(vector, numpy.float64) for vector in vectors]
        )
        return VectorSpaceClusterer.cluster(self, vectors, assign_clusters, trace)

    def cluster_vectorspace(self, vectors, trace=False):
        # variables describing the initial situation
        N = len(vectors)
        cluster_len = [1] * N
        cluster_count = N
        index_map = numpy.arange(N)

        # construct the similarity matrix
        dims = (N, N)
        dist = numpy.ones(dims, dtype=numpy.float) * numpy.inf
        for i in range(N):
            for j in range(i + 1, N):
                dist[i, j] = cosine_distance(vectors[i], vectors[j])

        while cluster_count > max(self._num_clusters, 1):
            i, j = numpy.unravel_index(dist.argmin(), dims)
            if trace:
                print("merging %d and %d" % (i, j))

            # update similarities for merging i and j
            self._merge_similarities(dist, cluster_len, i, j)

            # remove j
            dist[:, j] = numpy.inf
            dist[j, :] = numpy.inf

            # merge the clusters
            cluster_len[i] = cluster_len[i] + cluster_len[j]
            self._dendrogram.merge(index_map[i], index_map[j])
            cluster_count -= 1

            # update the index map to reflect the indexes if we
            # had removed j
            index_map[j + 1 :] -= 1
            index_map[j] = N

        self.update_clusters(self._num_clusters)

    def _merge_similarities(self, dist, cluster_len, i, j):
        # the new cluster i merged from i and j adopts the average of
        # i and j's similarity to each other cluster, weighted by the
        # number of points in the clusters i and j
        i_weight = cluster_len[i]
        j_weight = cluster_len[j]
        weight_sum = i_weight + j_weight

        # update for x<i
        dist[:i, i] = dist[:i, i] * i_weight + dist[:i, j] * j_weight
        dist[:i, i] /= weight_sum
        # update for i<x<j
        dist[i, i + 1 : j] = (
            dist[i, i + 1 : j] * i_weight + dist[i + 1 : j, j] * j_weight
        )
        # update for i<j<x
        dist[i, j + 1 :] = dist[i, j + 1 :] * i_weight + dist[j, j + 1 :] * j_weight
        dist[i, i + 1 :] /= weight_sum

    def update_clusters(self, num_clusters):
        clusters = self._dendrogram.groups(num_clusters)
        self._centroids = []
        for cluster in clusters:
            assert len(cluster) > 0
            if self._should_normalise:
                centroid = self._normalise(cluster[0])
            else:
                centroid = numpy.array(cluster[0])
            for vector in cluster[1:]:
                if self._should_normalise:
                    centroid += self._normalise(vector)
                else:
                    centroid += vector
            centroid /= len(cluster)
            self._centroids.append(centroid)
        self._num_clusters = len(self._centroids)

    def classify_vectorspace(self, vector):
        best = None
        for i in range(self._num_clusters):
            centroid = self._centroids[i]
            dist = cosine_distance(vector, centroid)
            if not best or dist < best[0]:
                best = (dist, i)
        return best[1]

    def dendrogram(self):
        """
        :return: The dendrogram representing the current clustering
        :rtype:  Dendrogram
        """
        return self._dendrogram

    def num_clusters(self):
        return self._num_clusters

    def __repr__(self):
        return "<GroupAverageAgglomerative Clusterer n=%d>" % self._num_clusters
Пример #5
0
class GAAClusterer(VectorSpaceClusterer):
    """
    The Group Average Agglomerative starts with each of the N vectors as singleton
    clusters. It then iteratively merges pairs of clusters which have the
    closest centroids.  This continues until there is only one cluster. The
    order of merges gives rise to a dendrogram: a tree with the earlier merges
    lower than later merges. The membership of a given number of clusters c, 1
    <= c <= N, can be found by cutting the dendrogram at depth c.

    This clusterer uses the cosine similarity metric only, which allows for
    efficient speed-up in the clustering process.
    """
    def __init__(self, num_clusters=1, normalise=True, svd_dimensions=None):
        VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
        self._num_clusters = num_clusters
        self._dendrogram = None
        self._groups_values = None

    def cluster(self, vectors, assign_clusters=False, trace=False):
        # stores the merge order
        self._dendrogram = Dendrogram(
            [numpy.array(vector, numpy.float64) for vector in vectors])
        return VectorSpaceClusterer.cluster(self, vectors, assign_clusters,
                                            trace)

    def cluster_vectorspace(self, vectors, trace=False):
        # create a cluster for each vector
        clusters = [[vector] for vector in vectors]

        # the sum vectors
        vector_sum = copy.copy(vectors)

        while len(clusters) > max(self._num_clusters, 1):
            # find the two best candidate clusters to merge, based on their
            # S(union c_i, c_j)
            best = None
            for i in range(len(clusters)):
                for j in range(i + 1, len(clusters)):
                    sim = self._average_similarity(vector_sum[i],
                                                   len(clusters[i]),
                                                   vector_sum[j],
                                                   len(clusters[j]))
                    if not best or sim > best[0]:
                        best = (sim, i, j)

            # merge them and replace in cluster list
            i, j = best[1:]
            sum = clusters[i] + clusters[j]
            if trace: print('merging %d and %d' % (i, j))

            clusters[i] = sum
            del clusters[j]
            vector_sum[i] = vector_sum[i] + vector_sum[j]
            del vector_sum[j]

            self._dendrogram.merge(i, j)

        self.update_clusters(self._num_clusters)

    def update_clusters(self, num_clusters):
        clusters = self._dendrogram.groups(num_clusters)
        self._centroids = []
        for cluster in clusters:
            assert len(cluster) > 0
            if self._should_normalise:
                centroid = self._normalise(cluster[0])
            else:
                centroid = numpy.array(cluster[0])
            for vector in cluster[1:]:
                if self._should_normalise:
                    centroid += self._normalise(vector)
                else:
                    centroid += vector
            centroid /= float(len(cluster))
            self._centroids.append(centroid)
        self._num_clusters = len(self._centroids)

    def classify_vectorspace(self, vector):
        best = None
        for i in range(self._num_clusters):
            centroid = self._centroids[i]
            sim = self._average_similarity(vector, 1, centroid, 1)
            if not best or sim > best[0]:
                best = (sim, i)
        return best[1]

    def dendrogram(self):
        """
        :return: The dendrogram representing the current clustering
        :rtype:  Dendrogram
        """
        return self._dendrogram

    def num_clusters(self):
        return self._num_clusters

    def _average_similarity(self, v1, l1, v2, l2):
        sum = v1 + v2
        length = l1 + l2
        return (numpy.dot(sum, sum) - length) / (length * (length - 1))

    def __repr__(self):
        return '<GroupAverageAgglomerative Clusterer n=%d>' % self._num_clusters
Пример #6
0
class GAAClusterer(VectorSpaceClusterer):
    """
    The Group Average Agglomerative starts with each of the N vectors as singleton
    clusters. It then iteratively merges pairs of clusters which have the
    closest centroids.  This continues until there is only one cluster. The
    order of merges gives rise to a dendrogram: a tree with the earlier merges
    lower than later merges. The membership of a given number of clusters c, 1
    <= c <= N, can be found by cutting the dendrogram at depth c.

    This clusterer uses the cosine similarity metric only, which allows for
    efficient speed-up in the clustering process.
    """

    def __init__(self, num_clusters=1, normalise=True, svd_dimensions=None):
        VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
        self._num_clusters = num_clusters
        self._dendrogram = None
        self._groups_values = None

    def cluster(self, vectors, assign_clusters=False, trace=False):
        # stores the merge order
        self._dendrogram = Dendrogram(
            [numpy.array(vector, numpy.float64) for vector in vectors])
        return VectorSpaceClusterer.cluster(self, vectors, assign_clusters, trace)

    def cluster_vectorspace(self, vectors, trace=False):
        # create a cluster for each vector
        clusters = [[vector] for vector in vectors]

        # the sum vectors
        vector_sum = copy.copy(vectors)

        while len(clusters) > max(self._num_clusters, 1):
            # find the two best candidate clusters to merge, based on their
            # S(union c_i, c_j)
            best = None
            for i in range(len(clusters)):
                for j in range(i + 1, len(clusters)):
                    sim = self._average_similarity(
                                vector_sum[i], len(clusters[i]),
                                vector_sum[j], len(clusters[j]))
                    if not best or sim > best[0]:
                        best = (sim, i, j)

            # merge them and replace in cluster list
            i, j = best[1:]
            sum = clusters[i] + clusters[j]
            if trace: print 'merging %d and %d' % (i, j)

            clusters[i] = sum
            del clusters[j]
            vector_sum[i] = vector_sum[i] + vector_sum[j]
            del vector_sum[j]

            self._dendrogram.merge(i, j)

        self.update_clusters(self._num_clusters)

    def update_clusters(self, num_clusters):
        clusters = self._dendrogram.groups(num_clusters)
        self._centroids = []
        for cluster in clusters:
            assert len(cluster) > 0
            if self._should_normalise:
                centroid = self._normalise(cluster[0])
            else:
                centroid = numpy.array(cluster[0])
            for vector in cluster[1:]:
                if self._should_normalise:
                    centroid += self._normalise(vector)
                else:
                    centroid += vector
            centroid /= float(len(cluster))
            self._centroids.append(centroid)
        self._num_clusters = len(self._centroids)

    def classify_vectorspace(self, vector):
        best = None
        for i in range(self._num_clusters):
            centroid = self._centroids[i]
            sim = self._average_similarity(vector, 1, centroid, 1)
            if not best or sim > best[0]:
                best = (sim, i)
        return best[1]

    def dendrogram(self):
        """
        :return: The dendrogram representing the current clustering
        :rtype:  Dendrogram
        """
        return self._dendrogram

    def num_clusters(self):
        return self._num_clusters

    def _average_similarity(self, v1, l1, v2, l2):
        sum = v1 + v2
        length = l1 + l2
        return (numpy.dot(sum, sum) - length) / (length * (length - 1))

    def __repr__(self):
        return '<GroupAverageAgglomerative Clusterer n=%d>' % self._num_clusters