Exemplo n.º 1
0
    def get_elementary_cycles(self):
        """Returns all elementary cycles in a graph

        """
        tarjan = Tarjan(self.graph)
        for ssc in tarjan.ssc():
            for start_node in ssc:
                least_node = min(ssc)  # Some kind of ordering
                self.find_cycles(least_node, least_node)
                # ssc changes at each iteration, since we remove the
                # least node to avoid unnecesary DFSs
                ssc = tarjan.remove_useless_edges(ssc, least_node)
        return self.cycles
Exemplo n.º 2
0
    def test_tarjan_example_2(self):
        # Graph from https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm#/media/File:Tarjan%27s_Algorithm_Animation.gif
        example = {
            'A': ['E'],
            'B': ['A'],
            'C': ['B', 'D'],
            'D': ['C'],
            'E': ['B'],
            'F': ['B', 'E', 'G'],
            'G': ['F', 'C'],
            'H': ['G', 'H', 'D']
        }

        g = Tarjan(example)
        self.assertEqual(g.sccs,
                         [['B', 'E', 'A'], ['D', 'C'], ['G', 'F'], ['H']])
Exemplo n.º 3
0
    def test_tarjan_example_1(self):
        # Graph from https://en.wikipedia.org/wiki/File:Scc.png
        example = {
            'A': ['B'],
            'B': ['C', 'E', 'F'],
            'C': ['D', 'G'],
            'D': ['C', 'H'],
            'E': ['A', 'F'],
            'F': ['G'],
            'G': ['F'],
            'H': ['D', 'G']
        }

        g = Tarjan(example)
        self.assertEqual(g.sccs,
                         [['F', 'G'], ['H', 'D', 'C'], ['E', 'B', 'A']])
Exemplo n.º 4
0
def arc_argmax(parse_probs, length, tokens_to_keep, ensure_tree = True):
	"""
	adopted from Timothy Dozat https://github.com/tdozat/Parser/blob/master/lib/models/nn.py
	"""
	if ensure_tree:
		I = np.eye(len(tokens_to_keep))
		# block loops and pad heads
		parse_probs = parse_probs * tokens_to_keep * (1-I)
		parse_preds = np.argmax(parse_probs, axis=1)
		tokens = np.arange(1, length)
		roots = np.where(parse_preds[tokens] == 0)[0]+1
		# ensure at least one root
		if len(roots) < 1:
			# The current root probabilities
			root_probs = parse_probs[tokens,0]
			# The current head probabilities
			old_head_probs = parse_probs[tokens, parse_preds[tokens]]
			# Get new potential root probabilities
			new_root_probs = root_probs / old_head_probs
			# Select the most probable root
			new_root = tokens[np.argmax(new_root_probs)]
			# Make the change
			parse_preds[new_root] = 0
			# ensure at most one root
		elif len(roots) > 1:
			# The probabilities of the current heads
			root_probs = parse_probs[roots,0]
			# Set the probability of depending on the root zero
			parse_probs[roots,0] = 0
			# Get new potential heads and their probabilities
			new_heads = np.argmax(parse_probs[roots][:,tokens], axis=1)+1
			new_head_probs = parse_probs[roots, new_heads] / root_probs
			# Select the most probable root
			new_root = roots[np.argmin(new_head_probs)]
			# Make the change
			parse_preds[roots] = new_heads
			parse_preds[new_root] = 0
		# remove cycles
		tarjan = Tarjan(parse_preds, tokens)
		cycles = tarjan.SCCs
		for SCC in tarjan.SCCs:
			if len(SCC) > 1:
				dependents = set()
				to_visit = set(SCC)
				while len(to_visit) > 0:
					node = to_visit.pop()
					if not node in dependents:
						dependents.add(node)
						to_visit.update(tarjan.edges[node])
				# The indices of the nodes that participate in the cycle
				cycle = np.array(list(SCC))
				# The probabilities of the current heads
				old_heads = parse_preds[cycle]
				old_head_probs = parse_probs[cycle, old_heads]
				# Set the probability of depending on a non-head to zero
				non_heads = np.array(list(dependents))
				parse_probs[np.repeat(cycle, len(non_heads)), np.repeat([non_heads], len(cycle), axis=0).flatten()] = 0
				# Get new potential heads and their probabilities
				new_heads = np.argmax(parse_probs[cycle][:,tokens], axis=1)+1
				new_head_probs = parse_probs[cycle, new_heads] / old_head_probs
				# Select the most probable change
				change = np.argmax(new_head_probs)
				changed_cycle = cycle[change]
				old_head = old_heads[change]
				new_head = new_heads[change]
				# Make the change
				parse_preds[changed_cycle] = new_head
				tarjan.edges[new_head].add(changed_cycle)
				tarjan.edges[old_head].remove(changed_cycle)
		return parse_preds
	else:
		# block and pad heads
		parse_probs = parse_probs * tokens_to_keep
		parse_preds = np.argmax(parse_probs, axis=1)
		return parse_preds
Exemplo n.º 5
0
def arc_argmax(parse_probs, length, tokens_to_keep, ensure_tree=True):
    """
    adopted from Timothy Dozat https://github.com/tdozat/Parser/blob/master/lib/models/nn.py
    """
    if ensure_tree:
        I = np.eye(len(tokens_to_keep))
        # block loops and pad heads
        # arc_masks = left_arc_mask(length)
        # parse_probs = parse_probs * arc_masks
        # parse_probs = np.reshape((parse_probs), (len(tokens_to_keep), len(tokens_to_keep)))
        # tmp = parse_probs[-1:, :]
        # parse_probs = np.concatenate((parse_probs[:-1, -1:], parse_probs[:-1, :-1]), axis=1)
        # parse_probs = np.concatenate((tmp, parse_probs))

        parse_probs = parse_probs * tokens_to_keep * (1 - I)
        parse_preds = np.argmax(parse_probs, axis=1)
        tokens = np.arange(1, length)  #original
        # tokens = np.arange(length) #modified
        # root_idx = len(tokens_to_keep) - 1
        root_idx = 0

        roots = np.where(parse_preds[tokens] == root_idx)[0] + 1  #original
        # roots = np.where(parse_preds[tokens] == 0)[0] #modified
        # ensure at least one root
        if len(roots) < 1:
            # global root_0
            # root_0 += 1

            # The current root probabilities
            root_probs = parse_probs[tokens, root_idx]
            # The current head probabilities
            old_head_probs = parse_probs[tokens, parse_preds[tokens]]
            # Get new potential root probabilities
            new_root_probs = root_probs / old_head_probs
            # Select the most probable root
            new_root = tokens[np.argmax(new_root_probs)]
            # Make the change
            parse_preds[new_root] = root_idx
        # ensure at most one root
        elif len(roots) > 1:
            # global root_more_than_1
            # root_more_than_1 += 1

            # The probabilities of the current heads
            root_probs = parse_probs[roots, root_idx]
            # Set the probability of depending on the root zero
            parse_probs[roots, root_idx] = 0
            # Get new potential heads and their probabilities
            new_heads = np.argmax(parse_probs[roots][:, tokens],
                                  axis=1) + 1  # original line
            # new_heads = np.argmax(parse_probs[roots][:, tokens], axis=1) # modified line
            new_head_probs = parse_probs[roots, new_heads] / root_probs
            # Select the most probable root
            new_root = roots[np.argmin(new_head_probs)]
            # Make the change
            parse_preds[roots] = new_heads
            parse_preds[new_root] = root_idx
        # remove cycles
        tarjan = Tarjan(parse_preds, tokens)
        cycles = tarjan.SCCs
        for SCC in tarjan.SCCs:
            # global circle_count
            # circle_count += 1

            if len(SCC) > 1:
                dependents = set()
                to_visit = set(SCC)
                while len(to_visit) > 0:
                    node = to_visit.pop()
                    if not node in dependents:
                        dependents.add(node)
                        to_visit.update(tarjan.edges[node])
                # The indices of the nodes that participate in the cycle
                cycle = np.array(list(SCC))
                # The probabilities of the current heads
                old_heads = parse_preds[cycle]
                old_head_probs = parse_probs[cycle, old_heads]
                # Set the probability of depending on a non-head to zero
                non_heads = np.array(list(dependents))
                parse_probs[
                    np.repeat(cycle, len(non_heads)),
                    np.repeat([non_heads], len(cycle), axis=0).flatten()] = 0
                # Get new potential heads and their probabilities
                new_heads = np.argmax(parse_probs[cycle][:, tokens],
                                      axis=1) + 1  #original
                # new_heads = np.argmax(parse_probs[cycle][:, tokens], axis=1) #modified
                new_head_probs = parse_probs[cycle, new_heads] / old_head_probs
                # Select the most probable change
                change = np.argmax(new_head_probs)
                changed_cycle = cycle[change]
                old_head = old_heads[change]
                new_head = new_heads[change]
                # Make the change
                parse_preds[changed_cycle] = new_head
                tarjan.edges[new_head].add(changed_cycle)
                tarjan.edges[old_head].remove(changed_cycle)
        return parse_preds
    else:
        # block and pad heads
        parse_probs = parse_probs * tokens_to_keep
        parse_preds = np.argmax(parse_probs, axis=1)
        return parse_preds