Exemple #1
0
    def close_answers(self):
        q_size = reduce(lambda c, user : c + len(user.questions),
            list(self.users.values()), 0)
        if(q_size > 0):
            self.collect_answers()
        else:
            questions = list(filter(lambda n : n.state == QuestionState.OPEN,
                [node for node in PostOrderIter(self.root)]))
            if len(questions) > 0:
                self.open_questions(questions)
            else:
                questions = list(filter(lambda n :
                    n.state == QuestionState.PROMPTED,
                    [node for node in PostOrderIter(self.root)]))
                self.hanging_prompts(questions)

            self.print_root()
            #self.print_users()

            p_size = reduce(lambda c, user : c + len(user.prompts),
                list(self.users.values()), 0)
            q_size = reduce(lambda c, user : c + len(user.questions),
                list(self.users.values()), 0)
            if(p_size > 0):
                self.collect_prompts()
            elif(q_size > 0):
                self.collect_answers()
            else:
                self.send_done()
                print("DONE!!!")
                self.root.state = QuestionState.CLOSED
                self.print_root()
                time.sleep(self.answer_time)
    def add_voxelized_points_from_csv(self, voxels_file):
        """ add voxelized point data. Rather that entering points lists, this is for data where the number of points have already been counted for each voxel

        Arguments:
            voxels_file (str): csv file where first column is the voxel coordinate as '(x,y,z)' with header 'voxel'.
            each subsequent column contains point counts within that voxel with each column as a seperate data group.
        """
        timer = Timer()

        df = pd.read_csv(voxels_file, index_col='voxel')
        df = df.loc[(df != 0).any(axis=1)]  # ignore voxels with only 0 values

        total_vox = df.shape[0]
        first_group_index = len(self.tree_root.nPoints)
        s = slice(first_group_index, None)

        for region in PostOrderIter(self.tree_root):
            region.add_empty_voxel_groups(ngroups = df.shape[1], index = s)

        i = 0
        for idx,counts in df.iterrows():
            if i % 10000 == 0:
                log.verbose(f'adding voxel {i} of {total_vox}')
            idx = literal_eval(idx)
            counts = list(counts)
            self.get_region_by_voxel(idx).add_voxel_groups(idx, counts, index = s)
            i += 1

        if self.COLLAPSE:
            for region in PostOrderIter(self.tree_root):
                region.collapse_nPoints() #TODO: will not properly collapse because no index passed

        self.calc_point_densities()
        timer.log_elapsed(prefix='Added voxelized points')
Exemple #3
0
    def __analyze(self):
        func = inspect.currentframe().f_back.f_code
        # Remove branches
        for node in PostOrderIter(self.__root):
            children = node.children
            if len(children) > 1:
                leafs = []
                for find_leaf in PostOrderIter(self.__root):
                    if find_leaf.is_leaf and find_leaf.hash in [
                            n.hash for n in node.descendants
                    ]:
                        leafs.append(find_leaf)

                self.__remove_branches(node, leafs)
        # Store Verified
        starting = 0
        end = None
        for node in LevelOrderIter(self.__root):
            if len(node.children) > 1:
                break
            end = node
            starting += 1
        if starting > 7:
            to_store = []
            hinder = 3
            current = end
            done = False
            save_for_root = None
            while not done:

                if current.parent.hash == self.__root.hash:
                    current.parent = None
                    done = True
                else:
                    temp = current
                    current = current.parent

                    if hinder > 0:
                        if hinder == 1:
                            save_for_root = current
                        hinder -= 1
                    else:
                        to_store.append(current)
                        temp.parent = None

            to_store.append(self.__root)
            self.__root = save_for_root
            for i in range(len(to_store) - 1, -1, -1):
                logging.debug("Adding to store")
                self.__stored.append(to_store[i])
Exemple #4
0
def createData(dataTime):
	for node in PostOrderIter(treeTopNode):
		if(node.is_leaf):
			# Gauge 100,200
			if(hasattr(node,'formulae')):
				#print node.formulae
				splits = node.formulae.split("#")
				function = splits[0]
				argSplits = splits[1].split(",")
				argSplitslength=argSplits.__len__()
				j=0
				while(j<argSplitslength):
					argSplits[j]=int(argSplits[j],10)
					j=j+1
				data = 0
				if(not hasattr(node,'data')):
					node.data = 0
				if(function == "Gauge"):
					data = getGaugeValue(node.data,argSplits)
					node.data = data
				if(function == 'Counter'):
					data = getCounterValue(node.data, argSplits)
					node.data = data
				if(function=='Random'):
					data=getRandomValue(argSplits)
					node.data=data
				print (node.path)
				print (node.data)
				count=len(node.path)
				pointName = ""
				j=0
				for i in range(len(node.path)):
					pointName = pointName + "/" + node.path[i].name 
				print(pointName + " " + str(dataTime) + " "+ str(node.data))	
				outputfile.write(pointName + " " + str(dataTime) + " "+ str(node.data)+"\n")
Exemple #5
0
def classify_with_word_vectors(email, model, stopwords):
    words = get_abstract_words(email)
    uncommon_words = list(filter(lambda word: not word in stopwords, words))

    topic = None
    top_similarity = 0.0

    for node in PostOrderIter(tree.subjects):
        total_similarity = 0.0

        for word in uncommon_words:
            similarity = 0.0

            try:
                similarity = model.similarity(word, node.name)
            except KeyError:
                pass

            total_similarity += similarity

        similarity = total_similarity / len(words)

        if similarity > top_similarity:
            topic = node
            top_similarity = similarity

    return topic
    def _populate_region_coordiantes(self, labels):
        """ adds coordinate info to regions under region.voxels. Will also generate self.volume.

        Arguments:
            labels (array or str): labeled image with values corresponding to region id
        """
        image = io.readData(labels, returnMemmap = False).astype(int) # to facilitate pooling and speed up
        log.verbose(f'calculating region info from {labels}')
        properties = region_props(image)

        # add voxels
        for i,prop in enumerate(properties):
            log.verbose(f'retrieving coordinates and densities for region {i}')
            region = self.get_region_by_id(prop.label)
            region.volume = int(prop.area())
            for c in prop.coords():
                c = tuple(c)
                region.add_voxel(c)
                self.regions_by_coord[c] = region

        # label value 0 is ignored by region_props
        self.get_region_by_id(0).add_volume(int(np.sum(image == 0)))

        if self.COLLAPSE:
            for region in PostOrderIter(self.tree_root):
                region.collapse_volume()
Exemple #7
0
 def closest_node_in_tree( self, read ):
     """ Finds the closest node in the tree for the given read.
     
     Node is the closest according to metrics that is induced with Hamming
     distance.
     This method consults informations about unknown reads (that are stored 
     in bitarry unknown_read) within read element. 
     """
     len_r = read.binary_read.length
     num_unc_r = read.unknown_read.count(True) 
     weight = float(len_r-num_unc_r)/float(len_r)
     closest = self
     closest_bit_array = self.binary_tag ^ read.binary_read 
     mask = read.unknown_read.copy()
     mask.invert()
     # print( "mask\t", mask )
     closest_bit_array = closest_bit_array & mask
     closest_distance = closest_bit_array.count(True)*weight
     for node in PostOrderIter(self):
         current_bit_array = node.binary_tag ^ read.binary_read
         mask = read.unknown_read.copy()
         mask.invert() 
         current_bit_array &= mask
         current_distance = current_bit_array.count(True)*weight
         if( current_distance < closest_distance):
             closest = node
             closest_bit_array = current_bit_array
             closest_distance = current_distance
     return (closest, closest_distance)
Exemple #8
0
def exp_comb_gen(single_exp_list,
                 complexity,
                 form='D',
                 allow_not=False,
                 shuffle=True):
    def _combine(acc, el):
        node = Node(OPS[2] if form == 'D' else OPS[1])
        acc.parent = node
        el.parent = node
        return node

    # transform in list of expression trees
    single_exp_trees = [list2exp_parse(exp_lst) for exp_lst in single_exp_list]

    # combine according to complexity
    # for terms in itertools.combinations(single_exp_trees, complexity):
    exp_ite = dbac_util.CycleIterator(single_exp_trees, shuffle=shuffle)
    while True:
        terms = [copy.deepcopy(next(exp_ite)) for _ in range(complexity)]
        exp = functools.reduce(_combine, terms)
        if allow_not:
            sampled_leaves = [
                node for node in PostOrderIter(exp) if node.is_leaf
                and node.name is not None and np.random.rand() > 0.5
            ]
            for node in sampled_leaves:
                not_op = Node(OPS[0], parent=node.parent)
                node.parent = not_op
                none_node = Node(None, parent=not_op)
        yield exp
Exemple #9
0
def calc_tree(root):

    for node in PostOrderIter(root):
        config = node.config
        configs_child = [node_child.config for node_child in node.children]
        context = Context(config)
        context.pipeline(config, configs_child)
Exemple #10
0
    def _make_decision(self):

        for node in PostOrderIter(self.root):

            if node.parent is None:
                continue

            assignment = node.assignment

            node_result = 0

            # node is a leaf
            if len(node.children) == 0 and node.assignment.result != 0.0:
                if assignment.is_winning_move:
                    node_result = 1 if assignment.player_mark == COMPUTER_PLAYER_MARK else -1
            # node is not a leaf
            elif len(node.children) > 0:
                children_results = []
                # Helper.print_tree(self.root)
                for child in node.children:
                    children_results.append(child.assignment.result)

                node_result = reduce(lambda a, b: a + b,
                                     children_results) / len(node.children)

            if node_result != 0.0:
                assignment.result = node_result

        return self.print_children_results_and_get_max()
Exemple #11
0
def getExprValue(exprNode, expectedType = None):
        operation = ""
        operators = ['+','-','*','%',"(",")"]
        for node in PostOrderIter(exprNode):
            #print(type(node.name) == list)
            if type(node.name) == list:                                
                if node.name[0] == "ID":
                    if self.LookupType(node.name[1]) != expectedType:
                        raise Exception("Invalid type found for", expectedType, "operation")
                    if node.parent.name == "method_call":
                        #Implementar metodo para recuperar el valor de una method call
                        pass
                    else:
                        operation += str(self.Lookup(node.name[1])[1])
                    #operation += super(SemanticRules, self).Lookup(node.name[1])[1]
                else:
                    #print(node.name)
                    if expectedType == "int":
                        if node.name[1].isnumeric() or node.name[1] in operators:
                            operation += node.name[1]
                        else:
                            raise Exception("Invalid type found for <", expectedType, "> operation")
                    elif expectedType == "boolean":
                        if not node.name[1].isnumeric():
                            operation += node.name[1]
                        else:
                            raise Exception("Invalid type found for <", expectedType, "> operation")
        return operation
Exemple #12
0
def close_answers():
    q_size = reduce(lambda c, user: c + len(user.questions), users, 0)
    if (q_size > 0):
        collect_answers()
    else:
        questions = list(
            filter(lambda n: n.state == QuestionState.OPEN,
                   [node for node in PostOrderIter(root)]))
        for question in questions:
            if not question.check_consensus():
                print("no consensus %s" % question.name)
                question.state = QuestionState.PROMPTED
                for ans in question.answers:
                    create_prompt(question, ans)
            else:
                print("omg consensus %s" % question.name)
                question.state = QuestionState.CLOSED
                sibling_consensus(question)
        print_root()
        print_users()

        q_size = reduce(
            lambda c, user: c + len(user.prompts) + len(user.questions), users,
            0)
        if (q_size > 0):
            collect_prompts()
        else:
            print("DONE!!!")
            print_root()
            global test_input
            print(test_input)
Exemple #13
0
    def tree_remove_incorrect_minus_nodes(self):
        """ Function for removing incorect minus nodes within contained subtree.
        
        Returns:
            Number of removed nodes.
         
        Note
            Minus node is incorrect if there is no relevant plus nodes up to 
            the root.

        """
        number_of_removed = 0
        for node in PostOrderIter(self):
            if( node.node_label[-1] == '-'):
                node_OK = False
                anc_node = node.parent
                while(True and not(anc_node is None)):
                    if( node.node_label[:-1] == anc_node.node_label[:-1] 
                    and anc_node.node_label[:-1] ):
                        node_OK = True
                        break
                    if( anc_node.parent is None):
                        break
                    anc_node = anc_node.parent
                if(not node_OK):
                    for child in node.children:
                        child.parent = node.parent
                    node.parent = None
                    number_of_removed += 1
        return number_of_removed;
Exemple #14
0
    def build_linkage(self):
        # get a tuple of node at each level
        levels = []
        for group in LevelOrderGroupIter(self):
            levels.append(group)

        # just find how many nodes are leaves
        # this is necessary only because we need to add n to non-leaf clusters
        num_leaves = 0
        for node in PostOrderIter(self):
            if not node.children:
                num_leaves += 1

        link_count = 0
        node_index = 0
        linkages = []
        labels = []

        for g, group in enumerate(
                levels[::-1][:-1]):  # reversed and skip the last
            for i in range(len(group) // 2):
                # get partner nodes
                left_node = group[2 * i]
                right_node = group[2 * i + 1]
                # just double check that these are always partners
                assert leftsibling(right_node) == left_node

                # check if leaves, need to add some new fields to track for linkage
                if not left_node.children:
                    left_node._ind = node_index
                    left_node._n_clusters = 1
                    node_index += 1
                    labels.append(left_node.name)

                if not right_node.children:
                    right_node._ind = node_index
                    right_node._n_clusters = 1
                    node_index += 1
                    labels.append(right_node.name)

                # find the parent, count samples
                parent_node = left_node.parent
                n_clusters = left_node._n_clusters + right_node._n_clusters
                parent_node._n_clusters = n_clusters

                # assign an ind to this cluster for the dendrogram
                parent_node._ind = link_count + num_leaves
                link_count += 1

                distance = g + 1  # equal height for all links

                # add a row to the linkage matrix
                linkages.append(
                    [left_node._ind, right_node._ind, distance, n_clusters])

        labels = np.array(labels)
        linkages = np.array(linkages,
                            dtype=np.double)  # needs to be a double for scipy
        return (linkages, labels)
Exemple #15
0
def cal_bags(tree):
    for node in PostOrderIter(tree):
        if len(node.children) == 0:
            node.sum_weights = node.weight
        else:
            for i in range(len(node.children)):
                node.sum_weights += node.children[i].sum_weights
            node.sum_weights = node.sum_weights * node.weight + node.weight
Exemple #16
0
def search_keywords(keyword):
    check = False
    for node in PostOrderIter(parent):
        if re.match(keyword, node.name, re.IGNORECASE):
            print("Yes, found keyword ====>> " + node.name)
            print(node.anchestors)
            check = True
    if check == False and " " in keyword:
        list_of_words = get_keyword_combinations(keyword)
        for node in PostOrderIter(parent):
            for key in list_of_words:
                #print("key = " + key + "node = " + node.name)
                if re.match(key, node.name, re.IGNORECASE):
                    print("got it.......from else  === > " + node.name)
                    print(key + "=======>>")
                    print(node.anchestors)
                    check = True
Exemple #17
0
 def __init__(self, userId):
     self.userId = userId
     self.node = search.find(ROOT, lambda node: node.name == userId)
     self.group = [node.name
                   for node in PostOrderIter(self.node)]
     self.child = [node.name
                   for node in self.node.children]
     self.alliance = getAlliance(self.node.children)
     self.status = [2]
Exemple #18
0
 def tree_get_labels_contains(self):
     """ Obtain labels of all nodes that are within this tree.
     
     Returns:
         set: Set of labels of nodes within this tree.
     """
     ret = {*[]}
     for node in PostOrderIter(self):
         ret.add(node.node_label)
     return ret;
    def del_attribute(self, attr):
        """ deletes an attribute from all regions

        Arguments:
            attr (str): attribute to delete
        """

        delattr(self.backround, attr)
        for i in PostOrderIter(self.tree_root):
            delattr(i, attr)
def test_postorder():
    """PostOrderIter."""
    f = Node("f")
    b = Node("b", parent=f)
    a = Node("a", parent=b)
    d = Node("d", parent=b)
    c = Node("c", parent=d)
    e = Node("e", parent=d)
    g = Node("g", parent=f)
    i = Node("i", parent=g)
    h = Node("h", parent=i)

    eq_(list(PostOrderIter(f)), [a, c, e, d, b, h, i, g, f])
    eq_(list(PostOrderIter(f, maxlevel=0)), [])
    eq_(list(PostOrderIter(f, maxlevel=3)), [a, d, b, i, g, f])
    eq_(list(PostOrderIter(f, filter_=lambda n: n.name not in ('e', 'g'))),
        [a, c, d, b, h, i, f])
    eq_(list(PostOrderIter(f, stop=lambda n: n.name == 'd')),
        [a, b, h, i, g, f])
    def add_attr_across_tree(self, attribute, value):
        """ adds an attribute to all Regions

        Arguments:
            attribute (str): name of attribute to add
            value: value to give to attribute
        """

        for region in PostOrderIter(self):
            setattr(region, attribute, value)
Exemple #22
0
 def drop_empty_groups(self, logger=None):
     """Drop groups without wells in descendants."""
     for node in PostOrderIter(self.root):
         if not node.is_group:
             continue
         if not node.children:
             self.drop(node.name)
             if logger is not None:
                 logger.info(f'Group {node.name} is empty and is removed.')
     return self
    def filter_regions(self, filt):
        """ filters out regions based on their attributes

        Arguments:
            filt (function or lambda): function to apply to each node. Should return True to keep node
        """
        for region in PostOrderIter(self.tree_root):
            if filt(region) is not True:
                siblings = list(region.parent.children)
                siblings.pop(siblings.index(region))
                region.parent.children = siblings
Exemple #24
0
 def tree_initialize(self, labels, size):
     """ Function for initialization od the tree.
     
     Args:
         labels (list): Parameter `labels`represents the list of the labels
             that are given to nodes with sufix '+' or '-'.
         size (:int): Parameter `size` represents nuber of the nodes in the 
             tree.
     """
     current_tree_size = 1
     probability_of_node_creation = 0.9
     for i in range( 2 * size ):
            if( random.random() < probability_of_node_creation):
                # create new leaf node
                label_to_insert = random.choice( labels ) + '+'
                leaf_bit_array = BitArray()
                leaf = EaNode(label_to_insert, leaf_bit_array)
                # find the parent of the leaf node
                position = random.randint(0, current_tree_size)
                if( position == 0):
                    parent_of_leaf = self
                else:
                    j = 1
                    for node in PostOrderIter(self):
                        if( j== position):
                            parent_of_leaf = node
                            break
                        else:
                            j += 1    
                # attach leaf node
                parent_of_leaf.attach_child(leaf)
                # reverse node label, if necessary
                node = leaf.parent
                while( node.parent != None):
                    if( leaf.node_label == node.node_label):
                        leaf.flip_node_label()
                        break
                    if( leaf.node_label[:-1] == node.node_label[:-1]):
                        break
                    node = node.parent
                current_tree_size += 1 
                # delete leaf is label is duplicate within siblings
                for node in leaf.parent.children:
                    if( node.node_label == leaf.node_label and node != leaf):
                        leaf.parent = None;
                        current_tree_size -= 1
                        break
            if( i > size ):
                 probability_of_node_creation *= 0.7
     self.tree_compact_vertical()
     self.tree_compact_horizontal()
     self.tree_rearange_by_label()
     self.tree_set_binary_tags(labels)
     return
Exemple #25
0
 def find_byid(self, id: int):
     """Returns a reference to node with specified id"""
     node_ref = [
         node
         for node in PostOrderIter(self.root, filter_=lambda n: n.id == id)
     ]
     if (len(node_ref) > 0):
         return node_ref[0]
     else:
         raise Exception("Node with id " + id +
                         " does not exist in the TTree")
    def collapse(self, node_id) -> Error:
        current_node, error = self._node_by_id(node_id)

        if error:
            return error

        for node in PostOrderIter(current_node):
            if node is not current_node:
                node.parent = None

        return Error(None)
Exemple #27
0
def update_children(prnt, c_num):
    for c in prnt.children:
        if c.c_num != c_num:
            dist = min(sim_matrix[tval][prnt.c_num - 1][c.c_num - 1],
                       sim_matrix[tval][prnt.c_num - 1][c_num - 1])
            sim_matrix[tval][c.c_num - 1][c_num - 1] = dist
            sim_matrix[tval][c_num - 1][c.c_num - 1] = dist
            #update the value for all its children
            for node in PostOrderIter(c):
                sim_matrix[tval][c_num - 1][node.c_num - 1] = dist
                sim_matrix[tval][node.c_num - 1][c_num - 1] = dist
 def calc_point_densities(self):
     """ calculate density of points by region and save them in Region.points_density"""
     log.verbose('calculating points density')
     groups = list(range(len(self.tree_root.nPoints)))
     for region in PostOrderIter(self.tree_root):
         region.points_density = []
         for gp in groups:
             if region.volume == 0:
                 region.points_density.append(0)
             else:
                 region.points_density.append(region.nPoints[gp] / region.volume)
Exemple #29
0
def get_failed_set(beam_hash, decoding_step, tree_obj, batch_size,
                   hash_gold_levelorder):
    failed_set = []
    failed_list = []
    node_list = []
    for b in range(batch_size):
        node_list.append(node_util.print_tree(tree_obj[b]))
        node_dict = {node.hash: node for node in PostOrderIter(tree_obj[b])}
        batch_set = (set(hash_gold_levelorder[b][decoding_step + 1].tolist()) -
                     set(beam_hash[b].tolist())) - {-1}
        failed_list.append([node_dict[set_el] for set_el in batch_set])
        failed_set.extend([node_dict[set_el] for set_el in batch_set])
    return failed_list, node_list, failed_set
def recompute_wsim(source_tree,
                   target_tree,
                   sims,
                   w_struct=0.6,
                   th_accept=0.14):
    s_post_order = [node for node in PostOrderIter(source_tree)]
    t_post_order = [node for node in PostOrderIter(target_tree)]

    for s in s_post_order:
        s_name = s.name.long_name

        if type(s.name) is not SchemaElement:
            continue

        for t in t_post_order:
            t_name = t.name.long_name

            if type(t.name) is not SchemaElement:
                continue

            # if the nodes are on the same level and are not leaves
            if s.height == t.height and (s.height > 0 and t.height > 0):
                ssim = compute_ssim(s, t, sims, th_accept)

                if math.isnan(ssim):
                    continue

                if (s_name, t_name) not in sims:
                    lsim = compute_lsim(s.name, t.name)
                else:
                    lsim = sims[(s_name, t_name)]['lsim']

                wsim = compute_weighted_similarity(ssim, lsim, w_struct)
                sims[(s_name, t_name)] = {
                    'ssim': ssim,
                    'lsim': lsim,
                    'wsim': wsim
                }
    return sims