def test_minimal_graph(repo_dir, version_list, file_name=None): """ Smoke test minimal_graph(). """ ui_ = ui.ui() if file_name is None: graph, repo, cache = test_update_real(repo_dir, version_list, True) open('/tmp/latest_graph.txt', 'wb').write(graph_to_string(graph)) else: repo = hg.repository(ui_, repo_dir) cache = BundleCache(repo, ui_, CACHE_DIR) cache.remove_files() graph = parse_graph(open(file_name, 'rb').read()) print "--- from file: %s ---" % file_name print graph_to_string(graph) version_map = build_version_table(graph, repo) # Incomplete, but better than nothing. # Verify that the chk bounds are the same after shrinking. chk_bounds = {} initial_edges = graph.get_top_key_edges() for edge in initial_edges: chk_bounds[graph.get_chk(edge)] = ( get_rollup_bounds(graph, repo, edge[0] + 1, edge[1], version_map)) print "CHK BOUNDS:" for value in chk_bounds: print value print " ", chk_bounds[value] print sizes = (512, 1024, 2048, 4096, 16 * 1024) for max_size in sizes: try: print "MAX:", max(version_map.values()) small = minimal_graph(graph, repo, version_map, max_size) print "--- size == %i" % max_size print graph_to_string(small) small.rep_invariant(repo, True) # Full check chks = chk_bounds.keys() path = small.get_top_key_edges() print "TOP KEY EDGES:" print path for edge in path: # MUST rebuild the version map because the indices changed. new_map = build_version_table(small, repo) bounds = get_rollup_bounds(small, repo, edge[0] + 1, edge[1], new_map) print "CHK:", small.get_chk(edge) print "BOUNDS: ", bounds assert chk_bounds[small.get_chk(edge)] == bounds print "DELETING: ", edge, small.get_chk(edge) chks.remove(small.get_chk(edge)) assert len(chks) == 0 except UpdateGraphException, err: print "IGNORED: ", err
def test_minimal_graph(repo_dir, version_list, file_name=None): """ Smoke test minimal_graph(). """ ui_ = ui.ui() if file_name is None: graph, repo, cache = test_update_real(repo_dir, version_list, True) open('/tmp/latest_graph.txt', 'wb').write(graph_to_string(graph)) else: repo = hg.repository(ui_, repo_dir) cache = BundleCache(repo, ui_, CACHE_DIR) cache.remove_files() graph = parse_graph(open(file_name, 'rb').read()) print "--- from file: %s ---" % file_name print graph_to_string(graph) version_map = build_version_table(graph, repo) # Incomplete, but better than nothing. # Verify that the chk bounds are the same after shrinking. chk_bounds = {} initial_edges = graph.get_top_key_edges() for edge in initial_edges: chk_bounds[graph.get_chk(edge)] = (get_rollup_bounds( graph, repo, edge[0] + 1, edge[1], version_map)) print "CHK BOUNDS:" for value in chk_bounds: print value print " ", chk_bounds[value] print sizes = (512, 1024, 2048, 4096, 16 * 1024) for max_size in sizes: try: print "MAX:", max(version_map.values()) small = minimal_graph(graph, repo, version_map, max_size) print "--- size == %i" % max_size print graph_to_string(small) small.rep_invariant(repo, True) # Full check chks = chk_bounds.keys() path = small.get_top_key_edges() print "TOP KEY EDGES:" print path for edge in path: # MUST rebuild the version map because the indices changed. new_map = build_version_table(small, repo) bounds = get_rollup_bounds(small, repo, edge[0] + 1, edge[1], new_map) print "CHK:", small.get_chk(edge) print "BOUNDS: ", bounds assert chk_bounds[small.get_chk(edge)] == bounds print "DELETING: ", edge, small.get_chk(edge) chks.remove(small.get_chk(edge)) assert len(chks) == 0 except UpdateGraphException, err: print "IGNORED: ", err
def get_top_key_updates(graph, repo, version_table=None): """ Returns the update tuples needed to build the top key.""" graph.rep_invariant() edges = graph.get_top_key_edges() coalesced_edges = [] ordinals = {} for edge in edges: assert edge[2] >= 0 and edge[2] < 2 assert edge[2] == 0 or (edge[0], edge[1], 0) in edges ordinal = ordinals.get(edge[:2]) if ordinal is None: ordinal = 0 coalesced_edges.append(edge[:2]) ordinals[edge[:2]] = max(ordinal, edge[2]) if version_table is None: version_table = build_version_table(graph, repo) ret = [] for edge in coalesced_edges: parents, latest = get_rollup_bounds(graph, repo, edge[0] + 1, edge[1], version_table) length = graph.get_length(edge) assert len(graph.edge_table[edge][1:]) > 0 #(length, parent_rev, latest_rev, (CHK, ...)) update = (length, parents, latest, graph.edge_table[edge][1:], True, True) ret.append(update) # Stuff additional remote heads into first update. result = get_rollup_bounds(graph, repo, 0, graph.latest_index, version_table) for head in ret[0][2]: if not head in result[1]: print "Expected head not in all_heads!", head[:12] assert False #top_update = list(ret[0]) #top_update[2] = tuple(all_heads) #ret[0] = tuple(top_update) ret[0] = list(ret[0]) ret[0][2] = tuple(result[1]) ret[0] = tuple(ret[0]) return ret
def set_new_edges(self, graph): """ INTERNAL: Set the list of new edges to insert. """ # REDFLAG: Think this through. self.parent.ctx.version_table = build_version_table(graph, self.parent.ctx. repo) # Hmmmm level == 1 handled elsewhere... level = self.parent.ctx.get('REINSERT', 0) if level == 0: # Insert update, don't re-insert self.new_edges = graph.update(self.parent.ctx.repo, self.parent.ctx.ui_, self.parent.ctx['TARGET_VERSIONS'], self.parent.ctx.bundle_cache) elif level == 2 or level == 3: # Topkey(s), graphs(s), updates # Hmmmm... later support different values of REINSERT? self.new_edges = graph.get_top_key_edges() if level == 2: # 3 == All top key updates. # Only the latest update. self.new_edges = self.new_edges[:1] else: pass # Add alternate CHKs for the same bundle. self.new_edges += find_alternate_edges(graph, self.new_edges) if level == 3: # Add CHKs for other bundles to make sure that each # change occurs in at least two CHKS (i.e. edges) if # possible. other_edges, failed = find_redundant_edges(graph, self.new_edges, True) self.new_edges += other_edges for index in failed: self.parent.ctx.ui_.status("Non-redundant index: %i\n" % index) for edge in self.new_edges[:]: # Deep copy! if graph.insert_type(edge) == INSERT_HUGE: # User can do this with level == 5 self.parent.ctx.ui_.status("Skipping unsalted re-insert of " + "big edge: %s\n" % str(edge)) self.new_edges.remove(edge) elif level == 4: # Add redundancy for big updates. self.new_edges = get_huge_top_key_edges(graph, False) self._check_new_edges("There are no big edges to add.") for edge in self.new_edges: assert edge[2] == 1 # MUST add the edges to the graph since they are new. graph.add_edge(edge[:2], (graph.get_length(edge), PENDING_INSERT1)) elif level == 5: # Reinsert big updates. self.new_edges = get_huge_top_key_edges(graph, True) self._check_new_edges("There are no big edges to re-insert.")
def enter(self, dummy): """ Implementation of State virtual. This checks the graph against the local repository and adds edges required to update it to the TARGET_VERSION specified in the context object. Then it starts inserting CHKS for the new edges into Freenet, doing padding / metadata salting as required. """ #require_state(from_state, QUIESCENT) assert (self.parent.ctx.get('REINSERT', 0) > 0 or (not self.parent.ctx['INSERT_URI'] is None)) assert not self.parent.ctx.graph is None graph = self.parent.ctx.graph.clone() if self.parent.params.get('DUMP_GRAPH', False): self.parent.ctx.ui_.status("--- Initial Graph ---\n") self.parent.ctx.ui_.status(graph_to_string(graph) +'\n') latest_revs = get_heads(graph) self.parent.ctx.ui_.status("Latest heads(s) in Freenet: %s\n" % ' '.join([ver[:12] for ver in latest_revs])) if self.parent.ctx.get('REINSERT', 0) == 1: self.parent.ctx.ui_.status("No bundles to reinsert.\n") # REDFLAG: Think this through. Crappy code, but expedient. # Hmmmm.... need version table to build minimal graph self.parent.ctx.version_table = build_version_table(graph, self.parent.ctx. repo) self.parent.transition(INSERTING_GRAPH) return if not self.parent.ctx.has_versions(latest_revs): self.parent.ctx.ui_.warn("The local repository isn't up " + "to date.\n" + "Try doing an fn-pull.\n") self.parent.transition(FAILING) # Hmmm... hard coded state name return # Update graph. try: self.set_new_edges(graph) except UpToDate, err: # REDFLAG: Later, add FORCE_INSERT parameter? # REDFLAG: rework UpToDate exception to include versions, stuff # versions in ctx? self.parent.ctx['UP_TO_DATE'] = True self.parent.ctx.ui_.warn(str(err) + '\n') # Hmmm self.parent.transition(FAILING) # Hmmm... hard coded state name return
def test_rollup(): """ Smoke test get_rollup_bounds(). """ repo, ui_ = setup_rollup_test_repo(TST_REPO_DIR) dump_changesets(repo) cache = BundleCache(repo, ui_, CACHE_DIR) cache.remove_files() graph = UpdateGraph() chks = fake_chks() # 0 Single changeset edges = graph.update(repo, ui_, ['716c293192c7', ], cache) set_chks(graph, edges, chks) # 1 Multiple changesets edges = graph.update(repo, ui_, ['076aec9f34c9', ], cache) set_chks(graph, edges, chks) # 2 Multiple heads, single base edges = graph.update(repo, ui_, ['62a72a238ffc', '4409936ef21f'], cache) set_chks(graph, edges, chks) # 3 Multiple bases, single head edges = graph.update(repo, ui_, ['a2c749d99d54', ], cache) set_chks(graph, edges, chks) # 4 edges = graph.update(repo, ui_, ['f6248cd464e3', ], cache) set_chks(graph, edges, chks) # 5 edges = graph.update(repo, ui_, ['fd1e6832820b', ], cache) set_chks(graph, edges, chks) # 6 edges = graph.update(repo, ui_, ['7429bf7b11f5', ], cache) set_chks(graph, edges, chks) # 7 edges = graph.update(repo, ui_, ['fcc2e90dbf0d', ], cache) set_chks(graph, edges, chks) # 8 edges = graph.update(repo, ui_, ['03c047d036ca', ], cache) set_chks(graph, edges, chks) # 9 edges = graph.update(repo, ui_, ['2f6c65f64ce5', ], cache) set_chks(graph, edges, chks) print print graph_to_string(graph) version_map = build_version_table(graph, repo) dump_version_map(version_map) assert version_map == EXPECTED_VERSION_MAP graph.rep_invariant(repo, True) # Verify contiguousness. print "From earliest..." for index in range(0, graph.latest_index + 1): parents, heads = get_rollup_bounds(graph, repo, 0, index, version_map) print "(%i->%i): %s" % (0, index, versions_str(heads)) print " ", versions_str(parents) print "To latest..." for index in range(0, graph.latest_index + 1): parents, heads = get_rollup_bounds(graph, repo, index, graph.latest_index, version_map) print "(%i->%i): %s" % (index, graph.latest_index, versions_str(heads)) print " ", versions_str(parents) # Empty try: get_rollup_bounds(graph, repo, FIRST_INDEX, FIRST_INDEX, version_map) except AssertionError: # Asserted as expected for to_index == FIRST_INDEX print "Got expected assertion." # Rollup of one changeset index. result = get_rollup_bounds(graph, repo, 0, 0, version_map) check_result(result, (('000000000000', ), ('716c293192c7',))) # Rollup of multiple changeset index. result = get_rollup_bounds(graph, repo, 1, 1, version_map) check_result(result, (('716c293192c7', ), ('076aec9f34c9',))) # Rollup of with multiple heads. result = get_rollup_bounds(graph, repo, 1, 2, version_map) check_result(result, (('716c293192c7', ), ('4409936ef21f','62a72a238ffc'))) # Rollup of with multiple bases. result = get_rollup_bounds(graph, repo, 3, 4, version_map) check_result(result, (('4409936ef21f', '62a72a238ffc', ), ('f6248cd464e3',))) # Rollup with head pulled in from earlier base. result = get_rollup_bounds(graph, repo, 3, 8, version_map) print result check_result(result, (('4409936ef21f', '62a72a238ffc', ), ('03c047d036ca', '7429bf7b11f5'))) # Rollup after remerge to a single head. result = get_rollup_bounds(graph, repo, 0, 9, version_map) print result check_result(result, (('000000000000', ), ('2f6c65f64ce5', )))
def test_rollup(): """ Smoke test get_rollup_bounds(). """ repo, ui_ = setup_rollup_test_repo(TST_REPO_DIR) dump_changesets(repo) cache = BundleCache(repo, ui_, CACHE_DIR) cache.remove_files() graph = UpdateGraph() chks = fake_chks() # 0 Single changeset edges = graph.update(repo, ui_, [ '716c293192c7', ], cache) set_chks(graph, edges, chks) # 1 Multiple changesets edges = graph.update(repo, ui_, [ '076aec9f34c9', ], cache) set_chks(graph, edges, chks) # 2 Multiple heads, single base edges = graph.update(repo, ui_, ['62a72a238ffc', '4409936ef21f'], cache) set_chks(graph, edges, chks) # 3 Multiple bases, single head edges = graph.update(repo, ui_, [ 'a2c749d99d54', ], cache) set_chks(graph, edges, chks) # 4 edges = graph.update(repo, ui_, [ 'f6248cd464e3', ], cache) set_chks(graph, edges, chks) # 5 edges = graph.update(repo, ui_, [ 'fd1e6832820b', ], cache) set_chks(graph, edges, chks) # 6 edges = graph.update(repo, ui_, [ '7429bf7b11f5', ], cache) set_chks(graph, edges, chks) # 7 edges = graph.update(repo, ui_, [ 'fcc2e90dbf0d', ], cache) set_chks(graph, edges, chks) # 8 edges = graph.update(repo, ui_, [ '03c047d036ca', ], cache) set_chks(graph, edges, chks) # 9 edges = graph.update(repo, ui_, [ '2f6c65f64ce5', ], cache) set_chks(graph, edges, chks) print print graph_to_string(graph) version_map = build_version_table(graph, repo) dump_version_map(version_map) assert version_map == EXPECTED_VERSION_MAP graph.rep_invariant(repo, True) # Verify contiguousness. print "From earliest..." for index in range(0, graph.latest_index + 1): parents, heads = get_rollup_bounds(graph, repo, 0, index, version_map) print "(%i->%i): %s" % (0, index, versions_str(heads)) print " ", versions_str(parents) print "To latest..." for index in range(0, graph.latest_index + 1): parents, heads = get_rollup_bounds(graph, repo, index, graph.latest_index, version_map) print "(%i->%i): %s" % (index, graph.latest_index, versions_str(heads)) print " ", versions_str(parents) # Empty try: get_rollup_bounds(graph, repo, FIRST_INDEX, FIRST_INDEX, version_map) except AssertionError: # Asserted as expected for to_index == FIRST_INDEX print "Got expected assertion." # Rollup of one changeset index. result = get_rollup_bounds(graph, repo, 0, 0, version_map) check_result(result, (('000000000000', ), ('716c293192c7', ))) # Rollup of multiple changeset index. result = get_rollup_bounds(graph, repo, 1, 1, version_map) check_result(result, (('716c293192c7', ), ('076aec9f34c9', ))) # Rollup of with multiple heads. result = get_rollup_bounds(graph, repo, 1, 2, version_map) check_result(result, (('716c293192c7', ), ('4409936ef21f', '62a72a238ffc'))) # Rollup of with multiple bases. result = get_rollup_bounds(graph, repo, 3, 4, version_map) check_result(result, (( '4409936ef21f', '62a72a238ffc', ), ('f6248cd464e3', ))) # Rollup with head pulled in from earlier base. result = get_rollup_bounds(graph, repo, 3, 8, version_map) print result check_result(result, (( '4409936ef21f', '62a72a238ffc', ), ('03c047d036ca', '7429bf7b11f5'))) # Rollup after remerge to a single head. result = get_rollup_bounds(graph, repo, 0, 9, version_map) print result check_result(result, (('000000000000', ), ('2f6c65f64ce5', )))