kp,desc = sift.compute(gray,kp) #print desc if desc != None: img_features = [] for row in desc: i+=1 vocab.append(row.tolist()) img_features.append(row.tolist()) raw_corpus.append(img_features) imp.append(i) #print raw_corpus # Perform clustering with k clusters. This will probably need tuning. cluster = KMeans(k, n_init=1) cluster.fit(vocab) #print minicorpus # Now we build the clustered corpus where each entry is a string containing the cluster ids for each sift-feature. corpus = [] for entry in raw_corpus: corpus.append(' '.join([str(x) for x in cluster.predict(entry)])) #print corpus #now we are setting our features and thereby normalizing our values #print setfeatures.setFeatures(corpus,k) table = normalizefraction.normalize(setfeatures.setFeatures(corpus,k),k) #print table with open(var.outputfile, var.option) as f: writer = csv.writer(f) writer.writerows(table)
kp,desc = sift.detectAndCompute(gray,None) #print desc if desc != None: img_features = [] for row in desc: #i+=1 vocab.append(row.tolist()) img_features.append(row.tolist()) raw_corpus.append(img_features) #imp.append(i) #print raw_corpus # Perform clustering with k clusters. This will probably need tuning. cluster = KMeans(k, n_init=1) cluster.fit(vocab) #print minicorpus # Now we build the clustered corpus where each entry is a string containing the cluster ids for each sift-feature. corpus = [] for entry in raw_corpus: corpus.append(' '.join([str(x) for x in cluster.predict(entry)])) #print corpus #now we are setting our features and thereby normalizing our values #print setfeatures.setFeatures(corpus,k) table = normalize.normalize(setfeatures.setFeatures(corpus,k),k) #print table with open(var.outputfile, var.option) as f: writer = csv.writer(f) writer.writerows(table)