def test_clusterer(self): """Here we take 10 documents categorized as 'government' and 'mystery' from the brown corpus, and perform k-means clustering on these. Optimally we would like the clusterer to divide them in two clusters. The clusterer generates clusters depending on random initial conditions, so the result can be different in different test runs. In order to account for that that we run a lot of iterations (50) which hopefully will generate a good result. The success condition is that a max of 2 out of 10 documents will fall in the wrong cluster. """ clusterer = KMeans() government_ids = brown.fileids(categories='government')[:10] mystery_ids = brown.fileids(categories='mystery')[:10] government_uids = [] mystery_uids = [] for articleid in government_ids: text = " ".join(brown.words(articleid)) self.folder.invokeFactory('Document', articleid, text=text) government_uids.append(self.folder[articleid].UID()) for articleid in mystery_ids: text = " ".join(brown.words(articleid)) self.folder.invokeFactory('Document', articleid, text=text) mystery_uids.append(self.folder[articleid].UID()) result = clusterer.clusterize(2, 50, repeats=50) cluster1 = set(result[0]) missed = min(len(cluster1-set(government_uids)), len(cluster1-set(mystery_uids))) self.failUnless(missed<=2)
def action_clusterize(self, action, data): """ """ catalog = getToolByName(self.context, 'portal_catalog') clusterer = KMeans() clusters = clusterer.clusterize(data['no_clusters'], data['no_noun_ranks'], repeats=data['repeats']) result = [] for cluster in clusters.values(): clusterlist = [] for uid in cluster: item = catalog.unrestrictedSearchResults(UID=uid)[0] clusterlist.append( (item.getURL(), item.Title, item.Description)) result.append(clusterlist) self.clusters = result
def action_clusterize(self, action, data): """ """ catalog = getToolByName(self.context, 'portal_catalog') clusterer = KMeans() clusters = clusterer.clusterize( data['no_clusters'], data['no_noun_ranks'], repeats=data['repeats']) result = [] for cluster in clusters.values(): clusterlist = [] for uid in cluster: item = catalog.unrestrictedSearchResults(UID=uid)[0] clusterlist.append( (item.getURL(), item.Title, item.Description)) result.append(clusterlist) self.clusters = result
def test_clusterer(self): """Here we take 10 documents categorized as 'government' and 'mystery' from the brown corpus, and perform k-means clustering on these. Optimally we would like the clusterer to divide them in two clusters. The clusterer generates clusters depending on random initial conditions, so the result can be different in different test runs. In order to account for that that we run a lot of iterations (50) which hopefully will generate a good result. The success condition is that a max of 1 out of 10 documents will fall in the wrong cluster. """ tagged_sents = brown.tagged_sents( categories=['government','mystery']) tagger = getUtility(IPOSTagger, name="collective.classification.taggers.NgramTagger") tagger.train(tagged_sents) extractor = getUtility(ITermExtractor) extractor.setTagger(tagger) storage = getUtility(INounPhraseStorage) clusterer = KMeans() government_ids = brown.fileids(categories='government')[:10] mystery_ids = brown.fileids(categories='mystery')[:10] for articleid in government_ids: text = " ".join(brown.words(articleid)) storage.addDocument(articleid,text) for articleid in mystery_ids: text = " ".join(brown.words(articleid)) storage.addDocument(articleid,text) result = clusterer.clusterize(2,20,repeats=50) cluster1 = set(result[0]) missed = min(len(cluster1-set(government_ids)), len(cluster1-set(mystery_ids))) self.failUnless(missed<2)