예제 #1
0
    def run_cluster1(self):
        '''
      This method uses language models to cluster web-comics.
      First, it creates the model for each comic series.
      The models are created using NLTK. Then, it gets the probability
      of each document in the series. This way we get the relationship
      matrix of document and series.
      ext, we run the HCC algorithm to cluster these comics.
    '''
        cluster = self.saver.load_it(CLUSTER_DS)
        if cluster is None:
            self.m_rship = self.saver.load_it(RSP_MATRIX)
            if self.m_rship is None:
                self.launch_corpus_reader(self.create_models, self.models)
                self.m_rship = {}
                for s in self.models:
                    self.m_rship[s] = {}
                self.launch_corpus_reader(self.create_rship_matrix,
                                          self.m_rship)
                self.saver.save_it(self.m_rship, RSP_MATRIX)

            #Generate v_series and v_docs vectors.
            v_series = self.m_rship.keys()
            v_docs = self.m_rship[self.m_rship.keys()[0]].keys()

            #Do word-document clustering. Use hcc class for that.
            c = hcc(self.ds_cell, v_series, v_docs)
            cluster = c.hcc_cluster()
            self.saver.save_it(cluster, CLUSTER_DS)
        #Create Hierarchial Tree using ETE
        cluster.reverse()
        t = generate_tree()
        tree = t.build_tree(cluster, 0)
        tree.show()
예제 #2
0
def generate_forest(bootset, m):

    forest = []

    for i in range(len(bootset)):
        print('Generating tree for bootstrap {}...'.format(i))
        tree = gt.generate_tree(bootset[i], m)
        forest.append(tree)
    print('Forest successfully generated.')
    return forest
예제 #3
0
def main():
    vertices, edges = generate_tree(1000, 1, 0)
    #### Parser d'entrée ####
    # V,E = map(int,input().split())
    # for _ in range(V):
    #     n,c = map(str,input().split())
    #     vertices[n]= c
    # for _ in range(E):
    #     v1,v2,c = map(str,input().split())
    #     edges[(v1,v2)] = c
    algo(vertices, edges)
예제 #4
0
파일: main.py 프로젝트: slahmar/ai-nmqs
# Salomé Lahmar 16201438

from generate_tree import generate_tree
from node import Node
from alpha_beta_variants import alpha_beta_negamax, alpha_beta_nmq

branching = 4
height = 8
inaccuracy = 4
spread = 4
infinity = 1000

# This will generate one tree and test both algorithms on this tree at all depths from 0 to height
valueString = input('Enter top node value: ')
value = int(valueString)
root = generate_tree(branching, height, value, inaccuracy, spread)
root.print_tree(height)

for depth in range(0, height+1):
	print("---------- Depth {} -----------".format(depth))
    (nega_value, nega_static_eval) = alpha_beta_negamax(root, depth)
    print("Simple alpha beta : value found = {} with {} static evaluations".format(nega_value, nega_static_eval))
    (nmq_value, nmq_static_eval) = alpha_beta_nmq(root, depth)
    print("NMQS : value found = {} with {} static evaluations".format(nmq_value, nmq_static_eval))
예제 #5
0
def print_to_xml(name, tree):
    generate_tree.generate_tree(name, tree)
예제 #6
0
from load_dataset import load_dataset
from generate_tree import generate_tree
from tree_view import generate_tree_visualization

dataset = load_dataset("benchmark")

# Amostra todos os 4 atributos
tree = generate_tree(dataset, 4)

generate_tree_visualization(tree, "benchmark.png")