def loadChangesetsForCommits(db, repository, commits, filtered_file_ids=None, load_chunks=True): commit_ids = dict([(commit.getId(db), commit) for commit in commits]) def getCommit(commit_id): return commit_ids.get(commit_id) or gitutils.Commit.fromId( db, repository, commit_id) cursor = db.cursor() cursor.execute( "SELECT id, parent, child FROM changesets WHERE child=ANY (%s) AND type='direct'", (commit_ids.keys(), )) changesets = [] for changeset_id, parent_id, child_id in cursor: changesets.append( diff.Changeset(changeset_id, getCommit(parent_id), getCommit(child_id), "direct")) return loadChangesets(db, repository, changesets, filtered_file_ids=filtered_file_ids, load_chunks=load_chunks)
def createChangeset(db, user, repository, commit=None, from_commit=None, to_commit=None, rescan=False, reanalyze=False, conflicts=False, filtered_file_ids=None, review=None, do_highlight=True, load_chunks=True): cursor = db.cursor() if conflicts: assert commit and len(commit.parents) > 1 cursor.execute("SELECT replay FROM mergereplays WHERE original=%s", (commit.getId(db), )) row = cursor.fetchone() if row: replay = gitutils.Commit.fromId(db, repository, row[0]) else: replay = repository.replaymerge(db, user, commit) if not replay: return None cursor.execute( "INSERT INTO mergereplays (original, replay) VALUES (%s, %s)", (commit.getId(db), replay.getId(db))) from_commit = replay to_commit = commit parents = [replay] changeset_type = 'conflicts' elif commit: parents = [ gitutils.Commit.fromSHA1(db, repository, sha1) for sha1 in commit.parents ] or [None] changeset_type = 'merge' if len(parents) > 1 else 'direct' else: parents = [from_commit] commit = to_commit changeset_type = 'direct' if len( to_commit.parents ) == 1 and from_commit == to_commit.parents[0] else 'custom' changes = None changesets = [] fileversions_values = [] chunks_values = [] thin_diff = False changeset_ids = [] for parent in parents: if parent: cursor.execute( "SELECT id FROM changesets WHERE parent=%s AND child=%s AND type=%s", (parent.getId(db), commit.getId(db), changeset_type)) else: cursor.execute( "SELECT id FROM changesets WHERE parent IS NULL AND child=%s AND type=%s", (commit.getId(db), changeset_type)) row = cursor.fetchone() if row: changeset_ids.append(row[0]) else: break assert len(changeset_ids) in (0, len(parents)) if changeset_ids: if rescan and user.hasRole(db, "developer"): cursor.executemany("DELETE FROM changesets WHERE id=%s", [(changeset_id, ) for changeset_id in changeset_ids]) db.commit() changeset_ids = [] else: for changeset_id in changeset_ids: if changeset_type == 'custom': cursor.execute( "UPDATE customchangesets SET time=NOW() WHERE changeset=%s", (changeset_id, )) changeset = load.loadChangeset( db, repository, changeset_id, filtered_file_ids=filtered_file_ids, load_chunks=load_chunks) changeset.conflicts = conflicts if reanalyze and user.hasRole(db, "developer"): analysis_values = [] for file in changeset.files: if not filtered_file_ids or file.id in filtered_file_ids: for index, chunk in enumerate(file.chunks): old_analysis = chunk.analysis chunk.analyze(file, index == len(file.chunks) - 1, True) if old_analysis != chunk.analysis: analysis_values.append( (chunk.analysis, chunk.id)) if reanalyze == "commit" and analysis_values: cursor.executemany( "UPDATE chunks SET analysis=%s WHERE id=%s", analysis_values) changesets.append(changeset) if not changesets: if len(parents ) == 1 and from_commit and to_commit and filtered_file_ids: iter_commit = to_commit while iter_commit != from_commit: if len(iter_commit.parents) > 1: thin_diff = True break iter_commit = gitutils.Commit.fromSHA1(db, repository, iter_commit.parents[0]) if not thin_diff: if changeset_type == "direct": request = { "changeset_type": "direct", "child_sha1": commit.sha1 } elif changeset_type == "custom": request = { "changeset_type": "custom", "parent_sha1": from_commit.sha1, "child_sha1": to_commit.sha1 } elif changeset_type == "merge": request = { "changeset_type": "merge", "child_sha1": commit.sha1 } else: request = { "changeset_type": "conflicts", "parent_sha1": replay.sha1, "child_sha1": commit.sha1 } request["repository_name"] = repository.name client.requestChangesets([request]) db.commit() for parent in parents: if parent: cursor.execute( "SELECT id FROM changesets WHERE parent=%s AND child=%s AND type=%s", (parent.getId(db), commit.getId(db), changeset_type)) else: cursor.execute( "SELECT id FROM changesets WHERE parent IS NULL AND child=%s AND type=%s", (commit.getId(db), changeset_type)) changeset_id = cursor.fetchone()[0] changeset = load.loadChangeset( db, repository, changeset_id, filtered_file_ids=filtered_file_ids, load_chunks=load_chunks) changeset.conflicts = conflicts changesets.append(changeset) else: changes = diff.parse.parseDifferences( repository, from_commit=from_commit, to_commit=to_commit, filter_paths=[ describe_file(db, file_id) for file_id in filtered_file_ids ])[from_commit.sha1] dbutils.find_files(db, changes) for file in changes: for index, chunk in enumerate(file.chunks): chunk.analyze(file, index == len(file.chunks) - 1) changeset = diff.Changeset(None, from_commit, to_commit, changeset_type) changeset.conflicts = conflicts changeset.files = diff.File.sorted(changes) changesets.append(changeset) if do_highlight: highlights = {} for changeset in changesets: for file in changeset.files: if file.canHighlight(): if file.old_sha1 and file.old_sha1 != '0' * 40: highlights[file.old_sha1] = (file.path, file.getLanguage()) if file.new_sha1 and file.new_sha1 != '0' * 40: highlights[file.new_sha1] = (file.path, file.getLanguage()) syntaxhighlight.request.requestHighlights(repository, highlights) return changesets
def detectMoves(db, changeset, source_file_ids=None, target_file_ids=None): moves = [] for target_file in changeset.files: if target_file_ids and not target_file.id in target_file_ids: continue current_chunks = target_file.chunks count = 0 log = "" while current_chunks: extra_target_chunks = [] count += 1 for target_chunk in current_chunks: # White-space only changes; unlikely target of moved code. if target_chunk.is_whitespace: continue # Too few inserted lines; couldn't possibly be an interesting target # of moved code. if target_chunk.insert_count < 5: continue if target_chunk.analysis: # If more than half the inserted lines are mapped against # deleted lines, most likely edited rather than moved code. if target_chunk.insert_count < len( target_chunk.analysis.split(";")) * 2: continue target_file.loadNewLines() target_chunk.inserted_lines = target_file.getNewLines( target_chunk) source_file, chunk = findSourceChunk(db, changeset, source_file_ids, target_file, target_chunk, extra_target_chunks) if source_file and chunk: moves.append((source_file, target_file, chunk)) continue current_chunks = extra_target_chunks if moves: def orderChunks(a, b): a_source_file, a_target_file, a_chunk = a b_source_file, b_target_file, b_chunk = b c = cmp(a_target_file.path, b_target_file.path) if c != 0: return c else: return cmp(a_chunk.insert_offset, b_chunk.insert_offset) moves.sort(orderChunks) move_changeset = diff.Changeset(None, changeset.parent, changeset.child, 'moves', []) for source_file, target_file, chunk in moves: move_file = diff.File(0, "", source_file.old_sha1, target_file.new_sha1, source_file.repository, chunks=[chunk], move_source_file=source_file, move_target_file=target_file) move_changeset.files.append(move_file) return move_changeset else: return None