Esempio n. 1
0
def encode_data(model, data_loader, log_step=10, logging=print, vocab=None, stage='dev'):
    """Encode all images and captions loadable by `data_loader`
    """
    batch_time = AverageMeter()
    val_logger = LogCollector()
    # switch to evaluate mode
    model.val_start()

    end = time.time()

    # numpy array to keep all the embeddings
    img_embs = None
    cap_embs = None
    logged = False
    for i, (images, captions, lengths, ids) in enumerate(data_loader):
        # make sure val logger is used
        model.logger = val_logger
        lengths = torch.Tensor(lengths).long()
        if torch.cuda.is_available():
            lengths = lengths.cuda()

        # compute the embeddings
        model_output = model.forward_emb(images, captions, lengths, volatile=True)
        img_emb, cap_span_features, left_span_features, right_span_features, word_embs, tree_indices, all_probs, \
        span_bounds = model_output[:8]

        # output sampled trees
        if (not logged) or (stage == 'test'):
            logged = True
            if stage == 'dev':
                sample_num = 5
            for j in range(sample_num):
                logging(generate_tree(captions, tree_indices, j, vocab))

        cap_emb = torch.cat([cap_span_features[l-2][i].reshape(1, -1) for i, l in enumerate(lengths)], dim=0)

        # initialize the numpy arrays given the size of the embeddings
        if img_embs is None:
            img_embs = np.zeros((len(data_loader.dataset), img_emb.size(1)))
            cap_embs = np.zeros((len(data_loader.dataset), cap_emb.size(1)))

        ids = list(ids)
        img_embs[ids] = img_emb.data.cpu().numpy().copy()
        cap_embs[ids] = cap_emb.data.cpu().numpy().copy()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()
        
        if i % log_step == 0:
            logging('Test: [{0}/{1}]\t'
                    '{e_log}\t'
                    'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                    .format(
                        i, len(data_loader), batch_time=batch_time,
                        e_log=str(model.logger)))
        del images, captions

    return img_embs, cap_embs
Esempio n. 2
0
def run():
    if config['generate_tree']:
        # drop existing database and recreate
        postgres_create_db(config['postgres_db'], DBNAME)

        connection = psycopg2.connect(config['benchmark_db'])
        tree = Tree(connection)

        with tree() as transaction:
            transaction.install()
            # create tree with test data
            generate_tree(transaction, config['levels'], config['per_level'])

    connection = psycopg2.connect(config['benchmark_db'])
    tree = Tree(connection)

    with tree() as transaction:
        postgres_analyze_db(transaction.cursor)

        # build a list of benchmarks to run
        benchmarks = create_benchmarks(transaction, config)
        benchmarks_to_run = []
        filter_benchmarks = config['filter_benchmarks']

        for b in benchmarks:
            if not filter_benchmarks or filter_benchmarks in b.name:
                benchmarks_to_run.append(b)

        print()

        if len(benchmarks_to_run):
            print("Running benchmarks..")

            for benchmark in benchmarks_to_run:
                print(benchmark.name.ljust(30), end="")
                sys.stdout.flush()
                duration = benchmark.run(transaction)
                print(format_duration(duration))

        else:
            print("No benchmarks to run")
Esempio n. 3
0
def run():
    if config['generate_tree']:
        # drop existing database and recreate
        postgres_create_db(config['postgres_db'], DBNAME)

        connection = psycopg2.connect(config['benchmark_db'])
        tree = Tree(connection)

        with tree() as transaction:
            transaction.install()
            # create tree with test data
            generate_tree(transaction, config['levels'], config['per_level'])

    connection = psycopg2.connect(config['benchmark_db'])
    tree = Tree(connection)

    with tree() as transaction:
        postgres_analyze_db(transaction.cursor)

        # build a list of benchmarks to run
        benchmarks = create_benchmarks(transaction, config)
        benchmarks_to_run = []
        filter_benchmarks = config['filter_benchmarks']

        for b in benchmarks:
            if not filter_benchmarks or filter_benchmarks in b.name:
                benchmarks_to_run.append(b)

        print()

        if len(benchmarks_to_run):
            print("Running benchmarks..")

            for benchmark in benchmarks_to_run:
                print(benchmark.name.ljust(30), end="")
                sys.stdout.flush()
                duration = benchmark.run(transaction)
                print(format_duration(duration))

        else:
            print("No benchmarks to run")
Esempio n. 4
0
def test_trees(model_path):
    """ use the trained model to generate parse trees for text """
    # load model and options
    checkpoint = torch.load(model_path, map_location='cpu')
    opt = checkpoint['opt']

    # load vocabulary used by the model
    vocab = pickle.load(open(os.path.join(opt.data_path, 'vocab.pkl'), 'rb'))
    opt.vocab_size = len(vocab)

    # construct model
    model = VGNSL(opt)

    # load model state
    model.load_state_dict(checkpoint['model'])

    print('Loading dataset')
    data_loader = get_eval_loader(
        opt.data_path, 'test', vocab, opt.batch_size, opt.workers, 
        load_img=False, img_dim=opt.img_dim
    )

    cap_embs = None
    logged = False
    trees = list()
    for i, (images, captions, lengths, ids) in enumerate(data_loader):
        # make sure val logger is used
        model.logger = print
        lengths = torch.Tensor(lengths).long()
        if torch.cuda.is_available():
            lengths = lengths.cuda()

        # compute the embeddings
        model_output = model.forward_emb(images, captions, lengths, volatile=True)
        img_emb, cap_span_features, left_span_features, right_span_features, word_embs, tree_indices, all_probs, \
        span_bounds = model_output[:8]

        candidate_trees = list()
        for j in range(len(ids)):
            candidate_trees.append(generate_tree(captions, tree_indices, j, vocab))
        appended_trees = ['' for _ in range(len(ids))]
        for j in range(len(ids)):
            appended_trees[ids[j] - min(ids)] = clean_tree(candidate_trees[j])
        trees.extend(appended_trees)
        cap_emb = torch.cat([cap_span_features[l-2][i].reshape(1, -1) for i, l in enumerate(lengths)], dim=0)
        del images, captions, img_emb, cap_emb

    ground_truth = [line.strip() for line in open(
        os.path.join(opt.data_path, 'test_ground-truth.txt'))]
    return trees, ground_truth
def generate_patterns(number_of_vertices, number_of_edges, number_of_vertex_labels, number_of_edge_labels, number_of_patterns):
    patterns = []
    
    for p in range(number_of_patterns):
        start = time()
        
        pattern = ig.Graph(directed=True)
        
        # vertex labels
        vertex_labels = generate_labels(number_of_vertices, number_of_vertex_labels)
        # edge labels
        edge_labels = generate_labels(number_of_edges, number_of_edge_labels)

        # first, generate a tree
        pattern = generate_tree(number_of_vertices, directed=True)
        edge_label_mapping = defaultdict(set)
        for e, edge in enumerate(pattern.es):
            edge_label_mapping[edge.tuple].add(edge_labels[e])
        edge_keys = [0] * (number_of_vertices-1)

        # second, random add edges 
        ecount = pattern.ecount()
        new_edges = list()
        while ecount < number_of_edges:
            u = np.random.randint(0, number_of_vertices)
            v = np.random.randint(0, number_of_vertices)
            src_tgt = (u, v)
            edge_label = edge_labels[ecount]
            # # we do not generate edges between two same vertices with same labels
            if edge_label in edge_label_mapping[src_tgt]:
                continue
            new_edges.append(src_tgt)
            edge_keys.append(len(edge_label_mapping[src_tgt]))
            edge_label_mapping[src_tgt].add(edge_label)
            ecount += 1
        pattern.add_edges(new_edges)
        pattern.vs["label"] = vertex_labels
        pattern.es["label"] = edge_labels
        pattern.es["key"] = edge_keys

        patterns.append(pattern)
    return patterns
Esempio n. 6
0
#
# 说明: 叶子节点是指没有子节点的节点。

from utils import TreeNode, generate_tree, null


class Solution:
    def hasPathSum(self, root: TreeNode, target: int) -> bool:
        def dfs(node, path):
            if not node:
                return False

            path = path + [node.val]
            if node.left is None and node.right is None:
                return sum(path) == target
            if node.left and dfs(node.left, path):
                return True
            if node.right and dfs(node.right, path):
                return True
            return False

        return dfs(root, [])


if __name__ == "__main__":
    s = Solution()
    tree = generate_tree([5, 4, 8, 11, null, 13, 4, 7, 2, null, 1])
    print(s.hasPathSum(tree, 22))
    print(s.hasPathSum(None, 0))

Esempio n. 7
0
        depth, balance = self.is_balance(root)
        return balance

    def is_balance(self, node):
        if not node:
            return 0, True
        if not node.left and not node.right:
            return 1, True
        if node.left:
            left_depth, left_is_balance = self.is_balance(node.left)
        else:
            left_depth, left_is_balance = 0, True
        if node.right:
            right_depth, right_is_balance = self.is_balance(node.right)
        else:
            right_depth, right_is_balance = 0, True

        return max(left_depth, right_depth) + 1, \
               left_is_balance and right_is_balance and abs(
                   left_depth - right_depth) < 2


if __name__ == "__main__":
    s = Solution()
    tree = generate_tree([3, 9, 20, null, null, 15, 7])
    print(s.isBalanced(tree))
    tree = generate_tree([1, 2, 2, 3, 3, null, null, 4, 4])
    print(s.isBalanced(tree))
    tree = generate_tree([1, null, 2, null, 3])
    print(s.isBalanced(tree))
Esempio n. 8
0
class Solution:
    def goodNodes(self, root: TreeNode) -> int:
        # 周赛第三题
        # 思路 深度优先遍历,维护一个最大值向下传递,
        # 递归思路,如果当前节点大于等于最大值则+1,更新最大值,遍历子树,结果相加。
        res = 0
        if not root:
            return res
        res += 1
        if root.left:
            res += self.goodNode(root.left, root.val)
        if root.right:
            res += self.goodNode(root.right, root.val)
        return res

    def goodNode(self, root, maxval):
        res = 0
        if root.val >= maxval:
            res += 1
        if root.left:
            res += self.goodNode(root.left, max(maxval, root.val))
        if root.right:
            res += self.goodNode(root.right, max(maxval, root.val))
        return res


if __name__ == "__main__":
    s = Solution()
    root = generate_tree([3, 3, null, 4, 2])
    print(s.goodNodes(root))
Esempio n. 9
0
            return 0
        if not root.left and not root.right:
            return 1
        return max(self.depth(root.left), self.depth(root.right)) + 1

    def isBalanced1(self, root: TreeNode):
        # 更高效的解法,将判断平衡和深度写在一个dfs,向上传递平衡的树的深度,
        # 如果不平衡向上传递-1。时间复杂度O(n*logn)
        def DFS(root):

            if root is None:
                return 0

            ld = DFS(root.left)
            rd = DFS(root.right)

            if ld == -1 or rd == -1 or abs(rd - ld) > 1:
                return -1
            else:
                return 1 + max(ld, rd)

        return DFS(root) >= 0


if __name__ == "__main__":
    s = Solution()
    data = generate_tree([3, 9, 20, null, null, 15, 7])
    print(s.isBalanced(data))
    data = generate_tree([1, 2, 2, 3, 3, null, null, 4, 4])
    print(s.isBalanced1(data))
Esempio n. 10
0
from functools import lru_cache


class Solution:
    def maxSumBST(self, root: TreeNode) -> int:
        self.res = 0

        def dfs(node):
            if not node:
                return [True, 0, 0, 0]
            left = dfs(node.left)
            right = dfs(node.right)
            if left[0] and right[0] and ((not node.left) or left[1]<node.val) \
                and ((not node.right) or right[2]>node.val):
                new_sum = node.val + left[3] + right[3]
                new_max = right[1] if node.right else node.val
                new_min = left[2] if node.left else node.val
                self.res = max(self.res, new_sum)
                return [True, new_max, new_min, new_sum]
            return [False, 0, 0, 0]

        dfs(root)
        return self.res


if __name__ == "__main__":
    s = Solution()
    root = generate_tree(
        [1, 4, 3, 2, 4, 2, 5, null, null, null, null, null, null, 4, 6])
    print(s.maxSumBST(root))
Esempio n. 11
0
            return None
        my_stack = [root]
        values = []
        while my_stack:
            node = my_stack.pop()
            if isinstance(node, TreeNode):
                if node.right:
                    my_stack.append(node.right)
                my_stack.append((node, node.val))
                if node.left:
                    my_stack.append(node.left)
            else:
                values.append(node)
        for i, node in enumerate(values[:-1]):
            if node[1] == p.val:
                return values[i + 1][0]

        return None


if __name__ == "__main__":
    s = Solution()
    # root = generate_tree([2, 1, 3])
    # p = root
    # print(s.inorderSuccessor(root, p).val)
    root = generate_tree([5, 3, 6, 2, 4, None, None, 1])
    p = root.right
    res = s.inorderSuccessor(root, p)
    if res:
        print(res.val)
Esempio n. 12
0
# Created at: 2020-04-22

# 给定一棵二叉树,想象自己站在它的右侧,按照从顶部到底部的顺序,返回从右侧所能看到的节点值。
from typing import *
from utils import TreeNode, generate_tree, null


class Solution:
    def rightSideView(self, root: TreeNode) -> List[int]:
        # 思路1,层次遍历,取最右
        res = []
        if not root:
            return res
        cur_level = [root]
        while cur_level:
            next_level = []
            res.append(cur_level[-1].val)
            for node in cur_level:
                if node.left:
                    next_level.append(node.left)
                if node.right:
                    next_level.append(node.right)
            cur_level = next_level
        return res


if __name__ == "__main__":
    s = Solution()
    root = generate_tree([1, 2, 3, null, 5, null, 4])
    print(s.rightSideView(root))
Esempio n. 13
0
# Created by: Jiaming
# Created at: 2020-04-29

from typing import *
from utils import TreeNode, generate_tree, null


# 实现一个函数,检查二叉树是否为二叉搜素树。
class Solution:
    def isValidBST(self, root: TreeNode) -> bool:
        # 中序遍历,然后检查是否是排序好的。
        # 时间复杂度O(n),空间复杂度O(n),如果使用递归可以优化空间复杂度为O(1)
        def preorder(root):
            if not root:
                return list()
            if not root.left and not root.right:
                return [root.val]
            return preorder(root.left) + [root.val] + preorder(root.right)

        origin_list = preorder(root)
        if len(origin_list) != len(set(origin_list)):
            return False
        return sorted(origin_list) == origin_list


if __name__ == "__main__":
    s = Solution()
    T = generate_tree([5, 1, 4, null, null, 3, 6])
    T = generate_tree([1, 1])
    print(s.isValidBST(T))
Esempio n. 14
0
    def isValidBST(self, root: TreeNode) -> bool:
        # 使用获得中序遍历的结果判断是否为一个升序序列
        my_stack = [root]

        res = []
        while my_stack:
            node = my_stack.pop()
            if isinstance(node, TreeNode):
                if node.right:
                    my_stack.append(node.right)
                my_stack.append(node.val)
                if node.left:
                    my_stack.append(node.left)
            else:
                res.append(node)

        return sorted(res) == res and len(res)==len(set(res))



if __name__ == "__main__":
    s = Solution()
    root = generate_tree([2, 1, 3])
    print(s.isValidBST(root))
    root = generate_tree([1, 1])
    print(s.isValidBST(root))
    #
    root = generate_tree([10, 5, 15, None, None, 6, 20])
    print(s.isValidBST(root))

    def generate(self,
                 number_of_vertices,
                 number_of_edges,
                 number_of_vertex_labels,
                 number_of_edge_labels,
                 alpha,
                 max_pattern_counts=-1,
                 max_subgraph=512,
                 return_subisomorphisms=False):
        assert number_of_edges >= number_of_vertices - 1

        graph_pattern_valid = True
        if number_of_vertex_labels < self.number_of_pattern_vertex_labels:
            print(
                "WARNING: the number of graph vertex labels (%d) is less than the number of pattern vertex labels (%d)."
                % (number_of_vertex_labels,
                   self.number_of_pattern_vertex_labels))
            graph_pattern_valid = False
        if number_of_edge_labels < self.number_of_pattern_edge_labels:
            print(
                "WARNING: the number of graph edge labels (%d) is less than the number of pattern edge labels (%d)."
                % (number_of_edge_labels, self.number_of_pattern_edge_labels))
            graph_pattern_valid = False

        if not graph_pattern_valid:
            # no subisomorphism in this setting
            # we can generate the graph randomly
            vertex_labels = generate_labels(number_of_vertices,
                                            number_of_vertex_labels)
            edge_labels = generate_labels(number_of_edges,
                                          number_of_edge_labels)
            graph = generate_tree(number_of_vertices, directed=True)
            graph_edge_label_mapping = defaultdict(
                set)  # key: (0, v1, 0, v2), value: e_labels
            for e, edge in enumerate(graph.es):
                graph_edge_label_mapping[(0, edge.source, 0,
                                          edge.target)].add(edge_labels[e])
            ecount = graph.ecount()
            edge_keys = [0] * ecount

            # second, random add edges
            new_edges = list()
            while ecount < number_of_edges:
                u = np.random.randint(0, number_of_vertices)
                v = np.random.randint(0, number_of_vertices)
                edge_label = edge_labels[ecount]
                # # we do not generate edges between two same vertices with same labels
                graph_edge_labels = graph_edge_label_mapping[(0, u, 0, v)]
                if edge_label in graph_edge_labels:
                    continue
                new_edges.append((u, v))
                edge_keys.append(len(graph_edge_labels))
                graph_edge_labels.add(edge_label)
                ecount += 1
            graph.add_edges(new_edges)
            graph.vs["label"] = vertex_labels
            graph.es["label"] = edge_labels
            graph.es["key"] = edge_keys

            metadata = {"counts": 0, "subisomorphisms": list()}
            return graph, metadata
        elif max_pattern_counts != -1 and number_of_edges * alpha > max_pattern_counts * self.number_of_pattern_edges:
            alpha = max_pattern_counts * self.number_of_pattern_edges / number_of_edges * DECAY
            return self.generate(number_of_vertices,
                                 number_of_edges,
                                 number_of_vertex_labels,
                                 number_of_edge_labels,
                                 alpha=alpha,
                                 max_pattern_counts=max_pattern_counts,
                                 max_subgraph=max_subgraph,
                                 return_subisomorphisms=return_subisomorphisms)
        else:
            # split the graph into small subgraphs to speed the subisomorphism searching
            subgraphs = list()
            number_of_subgraphs = math.ceil(number_of_vertices / max_subgraph)
            numbers_of_subgraph_vertices = np.array(np.random.dirichlet(
                [number_of_vertices / number_of_subgraphs] *
                number_of_subgraphs) * number_of_vertices,
                                                    dtype=np.int)
            diff = number_of_vertices - numbers_of_subgraph_vertices.sum()
            numbers_of_subgraph_vertices[-1] += diff

            ecount = 0
            graph_vertex_label_mapping_reversed = defaultdict(
                list)  # key: (sg, v_label), value: v_ids
            graph_edge_label_mapping = defaultdict(
                set)  # key: (sg1, v1, sg2, v2), value: e_labels

            for sg in range(number_of_subgraphs):
                # construct a directed tree
                number_of_subgraph_vertices = numbers_of_subgraph_vertices[sg]
                subgraph_vertex_labels = generate_labels(
                    number_of_subgraph_vertices, number_of_vertex_labels)
                subgraph_edge_labels = generate_labels(
                    number_of_subgraph_vertices - 1,
                    number_of_edge_labels)  # tree label
                subgraph = generate_tree(number_of_subgraph_vertices,
                                         directed=True)
                subgraph["sg"] = sg
                subgraph.vs["label"] = subgraph_vertex_labels

                ecount += (number_of_subgraph_vertices - 1)
                subgraphs.append(subgraph)
                for v_id, v_label in enumerate(subgraph_vertex_labels):
                    graph_vertex_label_mapping_reversed[(sg,
                                                         v_label)].append(v_id)
                for e, (v1, v2) in enumerate(subgraph.get_edgelist()):
                    graph_edge_label_mapping[(sg, v1, sg,
                                              v2)].add(subgraph_edge_labels[e])
                subgraph.delete_edges(None)

                subgraph_pattern_valid = True
                subgraph_vertex_label_counter = Counter(
                    subgraph_vertex_labels)  # key; label, value: count
                for vertex_label, cnt in self.pattern_vertex_label_counter.items(
                ):
                    if subgraph_vertex_label_counter[vertex_label] < cnt:
                        subgraph_pattern_valid = False
                        break
                subgraph["pattern_valid"] = subgraph_pattern_valid

            for (sg1, sg2) in generate_tree(number_of_subgraphs,
                                            directed=True).get_edgelist():
                # add an edge between two subgraphs
                self.add_edges(subgraphs[sg1], subgraphs[sg2],
                               graph_edge_label_mapping, number_of_edge_labels,
                               1)
                ecount += 1

            invalid_cnt = 0
            while invalid_cnt < 10 and ecount < number_of_edges:
                sg1 = np.random.randint(0, number_of_subgraphs)
                sg2 = np.random.randint(0, number_of_subgraphs)
                diff = number_of_edges - ecount

                if diff >= self.number_of_pattern_edges:
                    if subgraphs[sg1]["pattern_valid"] and np.random.rand(
                    ) < alpha:
                        new_ecount = self.add_pattern(
                            subgraphs[sg1],
                            graph_vertex_label_mapping_reversed,
                            graph_edge_label_mapping)
                    else:
                        new_ecount = self.add_edges(
                            subgraphs[sg1], subgraphs[sg2],
                            graph_edge_label_mapping, number_of_edge_labels,
                            self.number_of_pattern_edges)
                else:
                    new_ecount = self.add_edges(subgraphs[sg1], subgraphs[sg2],
                                                graph_edge_label_mapping,
                                                number_of_edge_labels, diff)
                if new_ecount == 0:
                    invalid_cnt += 1
                else:
                    invalid_cnt = 0
                    ecount += new_ecount
            if ecount < number_of_edges:
                alpha = alpha * ecount / number_of_edges * DECAY
                return self.generate(
                    number_of_vertices,
                    number_of_edges,
                    number_of_vertex_labels,
                    number_of_edge_labels,
                    alpha=alpha,
                    max_pattern_counts=max_pattern_counts,
                    max_subgraph=max_subgraph,
                    return_subisomorphisms=return_subisomorphisms)

            self.update_subgraphs(subgraphs, graph_edge_label_mapping)
            graph, graph_vertex_mapping, graph_vertex_mapping_reversed = self.merge_subgraphs(
                subgraphs, graph_edge_label_mapping)
            if return_subisomorphisms:
                subisomorphisms = list()
                for sg, subgraph in enumerate(subgraphs):
                    for subisomorphism in self.pattern_checker.get_subisomorphisms(
                            subgraph, self.pattern):
                        subisomorphism = [
                            graph_vertex_mapping_reversed[(sg, v)]
                            for v in subisomorphism
                        ]
                        subisomorphisms.append(subisomorphism)
                metadata = {
                    "counts": len(subisomorphisms),
                    "subisomorphisms": subisomorphisms
                }
            else:
                counts = 0
                for subgraph in subgraphs:
                    counts += self.pattern_checker.count_subisomorphisms(
                        subgraph, self.pattern)
                metadata = {"counts": counts, "subisomorphisms": list()}
            if metadata["counts"] > max_pattern_counts:
                alpha = alpha * max_pattern_counts / metadata["counts"] * DECAY
                return self.generate(
                    number_of_vertices,
                    number_of_edges,
                    number_of_vertex_labels,
                    number_of_edge_labels,
                    alpha=alpha,
                    max_subgraph=max_subgraph,
                    max_pattern_counts=max_pattern_counts,
                    return_subisomorphisms=return_subisomorphisms)
            # assert(metadata["counts"] == self.pattern_checker.count_subisomorphisms(graph, self.pattern))
            return graph, metadata
Esempio n. 16
0
from typing import List

from utils import TreeNode, null, generate_tree


class Solution:
    def levelOrder(self, root: TreeNode) -> List[List[int]]:
        # 一次AC
        res = []
        if not root:
            return res
        my_stack = [root]
        while my_stack:
            new_stack = []
            new_line = []
            for node in my_stack:
                new_line.append(node.val)
                if node.left:
                    new_stack.append(node.left)
                if node.right:
                    new_stack.append(node.right)
            my_stack = new_stack
            res.append(new_line)
        return res


if __name__ == "__main__":
    s = Solution()
    root = generate_tree([3, 9, 20, null, null, 15, 7])
    print(s.levelOrder(root))
Esempio n. 17
0
        def dfs(val, node, direct=None):
            self.res = max(self.res, val)
            if not node:
                return
            if direct is None:
                if node.left:
                    dfs(0, node.left, 'left')
                if node.right:
                    dfs(0, node.right, 'right')
            elif direct == 'left':
                dfs(0, node.left, 'left')
                dfs(val + 1, node.right, 'right')
            else:
                dfs(val + 1, node.left, 'left')
                dfs(0, node.right, 'right')

        self.res = 0
        dfs(0, root)
        return self.res


if __name__ == "__main__":
    s = Solution()
    root = generate_tree([
        1, null, 1, 1, 1, null, null, 1, 1, null, 1, null, null, null, 1, null,
        1
    ])
    print(s.longestZigZag(root))
    root = generate_tree([1, 1, 1, null, 1, null, null, 1, 1, null, 1])
    print(s.longestZigZag(root))
Esempio n. 18
0
                    predecessor.right = root
                    root = root.left
                else:
                    if pred and root.val < pred.val:
                        y = root
                        if x is None:
                            x = pred
                    pred = root
                    predecessor.right = None
                    root = root.right

            else:
                if pred and root.val < pred.val:
                    y = root
                    if x is None:
                        x = pred
                pred = root
                root = root.right
        x.val, y.val = y.val, x.val


if __name__ == "__main__":
    s = Solution()
    tree = generate_tree([1, 3, null, null, 2])
    print(s.recoverTree(tree))
    print(morris_inorder(tree))

    tree = generate_tree([3, 1, 4, null, null, 2])
    print(s.recoverTree(tree))
    print(morris_inorder(tree))
Esempio n. 19
0
def send_info():
    root = request.form['r']
    print root
    course_info = utils.generate_tree(root=root)
    return json.dumps(course_info)