def test_add_multiple_nodes_left(self): new_tree = bst(20) smaller_node = bst(19) even_smaller_node = bst(18) new_tree.add(smaller_node) new_tree.add(even_smaller_node) self.assertTrue(new_tree.get_left_child().get_left_child().value == even_smaller_node.value)
def __call__(self, x, train): h = bst.bst(self.b0(self.conv0(x))) h = bst.bst(self.b1(self.conv1(h))) h = bst.bst(self.b2(self.conv2(h))) h = F.average_pooling_2d(h, 48) h = self.b3(self.fc0(h)) return h
def test_add_multiple_nodes_right(self): new_tree = bst(20) larger_node = bst(21) even_larger_node = bst(22) new_tree.add(larger_node) new_tree.add(even_larger_node) self.assertTrue(new_tree.get_right_child().get_right_child().value == even_larger_node.value)
def huffman(freq_table): # should use min-heap for pool pool = [bst.bst((freq_table[key], key)) for key in freq_table.keys()] while len(pool) > 1: pool.sort(key=lambda x: -x.data[0]) a, b = pool.pop(), pool.pop() weight = a.data[0] + b.data[0] c = bst.bst((weight, '')) c.left, c.right = a, b pool.append(c) return pool[0]
def __call__(self, x, train, batch_size): h = self.b_conv0(da.dst(self.conv0(x))) h = self.block0(h, train) h = bst.bst(self.b_conv1(self.conv1(h))) h = self.block1(h, train) h = bst.bst(self.b_conv2(self.conv2(h))) h = self.block2(h, train) h = bst.bst(self.b_conv3(self.conv3(h))) h = self.block3(h, train) h = bst.bst(self.b_conv4(self.conv4(h))) h = F.max_pooling_2d(h, 2, 2) h = F.average_pooling_2d(h, 2, 2, 1) h = Variable(cupy.reshape(h.data, [batch_size, -1]), volatile=not train) h = self.b_dense0(self.fc0(h)) return h
def setUp(self): self.tree = bst() self.tree.insert(9) self.tree.insert(6) self.tree.insert(12) self.tree.insert(3) self.tree.insert(8) self.tree.insert(10) self.tree.insert(15) self.tree.insert(7) self.tree.insert(18)
def _reconcile(self): if self.NEED_RECONCILE: self.suspend_watch() temp = get_file_list(self.DIR_PATH, self.SETTINGS.get_wallpaper_recursive()) temp_tree = bst.bst(temp) for my_file in self.LOCAL_FILE_LIST: if temp_tree.extract(my_file) != my_file: self.list_remove(my_file) temp_list = temp_tree.as_list() if len(temp_list) > 0: self.LOCAL_FILE_LIST += temp_tree.as_list() self.NEED_SAVE = True self.instate_watch() self.NEED_RECONCILE = False
def main(): bst_tree = bst() # vals = [round(random.random()*100) for i in xrange(15)] vals = [myString('(')] bst_tree.add_val( vals[0] ) for i in xrange(5): nvals = [] while vals: v = vals.pop() nvals.append( myString(v + '(') ) bst_tree.add_val( nvals[-1] ) nvals.append( myString(v + ')') ) bst_tree.add_val( nvals[-1] ) vals = nvals bst_tree.print_bfs() print "BALANCED LEAVES:", bst_tree.get_blncd_leaves()
def test_check_if_value_doesnt_exist(self): new_tree = bst(20) node1 = bst(10) node2 = bst(25) node3 = bst(30) node4 = bst(4) node5 = bst(89) new_tree.add(node1) new_tree.add(node2) new_tree.add(node3) new_tree.add(node4) new_tree.add(node5) self.assertFalse(new_tree.find(200))
def test(): # 1 print '#' print 'problme 1: please refer to the tree.pdf file to see the tree as a graph' print '#' # node list for construct binary search tree nd_list = [ 65, 28, 22, 46, 35, 32, 40, 48, 47, 55, 83, 78, 89, 85, 86, 91, 93 ] print 'tree node list' print nd_list bt = bst() for i in range(len(nd_list)): bt.bst_insert(bt.root, nd_list[i]) print 'original tree, in order traversal' bt.bst_in_trav(bt.root) bt1 = copy.deepcopy(bt) bt2 = copy.deepcopy(bt) print print 'delete a leaf: (86), in order traversal' bt.delete_value(bt.root, 86) bt.bst_in_trav(bt.root) print print 'delete (91), its successor is (93), (93) is a leaf, in order traversal' bt1.delete_value(bt1.root, 91) bt1.bst_in_trav(bt1.root) print print 'delete (83), its successor is (85), (85) is not a leaf, in order traversal' bt2.delete_value(bt2.root, 83) bt2.bst_in_trav(bt2.root) print print '#' # 2 # bubble sort print 'problem 2: bubble sort' print '#' print 'original list: ' t_list = [32, 7, 45, 13, 64, 9, 3, 11, 9, 18, 69, 23, 33, 99, 76, 86] print t_list print 'bubble sort passes' bbsort(t_list)
def __call__(self, x): h1 = bst.bst(self.b1(self.c1(x), test=not self.train)) h2 = bst.bst(self.b2(self.c2(h1), test=not self.train)) h3 = bst.bst(self.b3(self.c3(h2), test=not self.train)) h4 = bst.bst(self.b4(self.l1(h3), test=not self.train)) return self.b5(self.l2(h4), test=not self.train)
def __call__(self, x): h1 = bst.bst(self.b1(self.l1(x), test=not self.train)) h2 = bst.bst(self.b2(self.l2(h1), test=not self.train)) return self.b3(self.l3(h2), test=not self.train)
def test_for_parent_when_empty(self): new_tree = bst() self.assertTrue(new_tree.parent == None)
def test_if_root(self): new_tree = bst() self.assertTrue(new_tree.is_root())
def test_parent_of_left(self): new_tree = bst(20) smaller_node = bst(19) new_tree.add(smaller_node) self.assertTrue(new_tree.get_left_child().get_parent().value == new_tree.value)
def __call__(self, x): x = x * 256 h1 = bst(self.b1(self.l1(x), test=not self.train)) h2 = bst(self.b2(self.l2(h1), test=not self.train)) return self.b3(self.l3(h2), test=not self.train)
def test_for_left_when_empty(self): new_tree = bst() self.assertFalse(new_tree.has_left_child())
def test_for_both_when_empty(self): new_tree = bst() self.assertFalse(new_tree.has_both_children())
def test_new_bst_with_value(self): new_tree = bst(20) self.assertTrue(new_tree.value != None)
def test_add_node_when_larger(self): new_tree = bst(20) larger_node = bst(21) new_tree.add(larger_node) self.assertTrue(new_tree.get_right_child().value == larger_node.value)
def test_parent_of_right(self): new_tree = bst(20) larger_node = bst(21) new_tree.add(larger_node) self.assertTrue(new_tree.get_right_child().get_parent().value == new_tree.value)
prt = prtree() def printree(node): dot = prt.dot(node) dot.view() prt.dots.append(dot) # print(dot.source) # dot.render( # filename=None, directory=None, view=False, cleanup=False, format='png') if __name__ == "__main__": # dots清理等 prt.dots.clear() t_bst = bst() t_llrbt = llrbt() t_sc = scht() t_lp = lpht() # 取数据集 d = randoms.dict_int(n=100) keys = [key for key in d] minkey = min(keys) maxkey = max(keys) # 插入 for k, v in d.items(): t_bst.insert(k, k) t_llrbt.insert(k, k) t_sc.insert(k, k) # test
f.close() f = open('names_2.txt', 'r') names_2 = f.read().split("\n") # List containing 10000 names f.close() duplicates = [] # Return the list of duplicates in this data structure # STRETCH STARTS # set_1 = set(names_1) # set_2 = set(names_2) # duplicates = list(set_1.intersection(set_2)) # STRETCH ENDS # create new tree and set the root value tree = bst(names_1[0]) # loop over names_1 list and insert names to tree # so I could utilise bst contains method on it for name in names_1: tree.insert(name) # loop over names_2 list and pass each name to contains method # of tree in order to find duplicates # append to duplicates list if found for name in names_2: if tree.contains(name): duplicates.append(name) end_time = time.time() print(f"{len(duplicates)} duplicates:\n\n{', '.join(duplicates)}\n\n") print(f"runtime: {end_time - start_time} seconds")
def test_empty_instantiation(self): new_tree = bst() self.assertTrue(new_tree != None)
# bn2_z = bn2(y.data[0,:], model.predictor.b1) # f.writelines([str(val)+"\n" for val in bn2_z.tolist()]) return z argvs = sys.argv unit = 1000 model = L.Classifier(BinaryMLP(784, unit, 10)) if len(argvs) > 1: chainer.serializers.load_npz(argvs[1], model) x = np.ones((1, 784), dtype=np.float32) * 128 y1 = forward_linear(model.predictor.l1, x, "tmp/output_y.txt") z1 = forward_bn(model.predictor.b1, y1, "tmp/output_bn.txt") h1 = bst(z1) y2 = forward_linear(model.predictor.l2, h1, "tmp/output_y2.txt") z2 = forward_bn(model.predictor.b2, y2, "tmp/output_bn2.txt") h2 = bst(z2) y3 = forward_linear(model.predictor.l3, h2, "tmp/output_y3.txt") z3 = forward_bn(model.predictor.b3, y3, "tmp/output_bn3.txt") # # chainer.serializers.load_npz(argvs[1], model) # train, test = chainer.datasets.get_mnist() # row = train[0] # data, teacher = row # model.predictor(data.reshape(1, 784)) #
def test_add_node_when_smaller(self): new_tree = bst(20) smaller_node = bst(19) new_tree.add(smaller_node) self.assertTrue(new_tree.get_left_child().value == smaller_node.value)
def check_tree(tree, lower_bound=None, upper_bound=None): '''return true if tree is BST''' if lower_bound and tree.data <= lower_bound: return False if upper_bound and tree.data >= upper_bound: return False new_upper = min(upper_bound, tree.data) if upper_bound else tree.data if tree.left and not check_tree(tree.left, lower_bound, new_upper): return False new_lower = max(lower_bound, tree.data) if lower_bound else tree.data if tree.right and not check_tree(tree.right, new_lower, upper_bound): return False return True tree = bst.bst(19) tree.insert_list([7, 43, 3, 111, 23, 47, 2, 5, 17, 37, 53, 13, 29, 41, 31]) print tree print check_tree(tree) tree.left.data = 20 print tree print check_tree(tree) tree.left.data = 7 tree.left.right.data = 20 print tree print check_tree(tree)
start_time = time.time() f = open('names_1.txt', 'r') names_1 = f.read().split("\n") # List containing 10000 names f.close() f = open('names_2.txt', 'r') names_2 = f.read().split("\n") # List containing 10000 names f.close() duplicates = [] # Return the list of duplicates in this data structure # Replace the nested for loops below with your improvements # iterate over list 1 new_bst = bst(names_1[0]) for el in names_1: new_bst.insert(el) # see if any elements in names_2 are also in new_bst, if so add them to the duplicates array for el in names_2: if new_bst.contains(el): duplicates.append(el) end_time = time.time() print(f"{len(duplicates)} duplicates:\n\n{', '.join(duplicates)}\n\n") print(f"runtime: {end_time - start_time} seconds") # ---------- Stretch Goal ----------- # Python has built-in tools that allow for a very efficient approach to this problem # What's the best time you can accomplish? Thare are no restrictions on techniques or data