def greatest_lesser_change_for_branch(ctx, branch, change_num): """Find the change for the branch that is no higher than change_num. :param ctx: Git Fusion context :param branch: branch for which to find highest change :param change_num: the high water mark of changes to find :return: ObjectType or None """ results = None pattern = p4gf_const.P4GF_P4KEY_INDEX_OT.format( repo_name=ctx.config.repo_name, change_num='*', branch_id=branch.branch_id) LOG.debug("greatest_lesser_change_for_branch() pattern %s", pattern) d = p4gf_p4key.get_all(ctx.p4gf, pattern) if d: for key, value in d.items(): mk = p4gf_object_type_util.KEY_BRANCH_REGEX.search(key) if not mk: LOG.debug("ignoring unexpected p4key: %s", key) continue branch_id = mk.group('branch_id') cl = int(mk.group('change_num')) if cl <= int(change_num): # Ensure we keep the highest change found so far for this branch. if results and int(results.change_num) > cl: continue LOG.debug("greatest_lesser_change_for_branch() candidate %s, %s, %s", branch_id, cl, value) results = ObjectType.create_commit(sha1=value, repo_name=ctx.config.repo_name, change_num=cl, branch_id=branch_id) LOG.debug("greatest_lesser_change_for_branch() returning %s", results) return results
def last_change_num_for_branches(ctx, branch_ids, must_exist_local=False): """Return highest changelist number for all branches which exists in p4. Searches //P4GF_DEPOT/objects/... for commits and returns ObjectType for commit with highest change_num, or None if no matching commit. If must_exist_local is True, only commits which also exist in the repo are considered in the search. """ # pylint: disable=too-many-branches # if only one branch_id given, don't fetch them all if len(branch_ids) == 1: branch_id = branch_ids[0] if branch_id not in ObjectType.last_commits_cache: # Using get_all() instead of get() to avoid annoying # default value "0" for unset keys. key = p4gf_const.P4GF_P4KEY_INDEX_LCN_ON_BRANCH\ .format( repo_name = ctx.config.repo_name , branch_id = branch_id) d = P4Key.get_all(ctx.p4gf, key) if d and d.get(key): ObjectType.last_commits_cache[branch_id] = d[key] if branch_id not in ObjectType.last_commits_cache: return None change_num, sha1 = ObjectType.last_commits_cache[branch_id].split(',') if must_exist_local and not p4gf_util.sha1_exists(sha1): return None return ObjectType.create_commit( sha1 = sha1 , repo_name = ctx.config.repo_name , change_num = int(change_num) , branch_id = branch_id ) # if more than one branch, load up all branches into the cache ObjectType._load_last_commits_cache(ctx) highest = {} k = None for branch_id, v in ObjectType.last_commits_cache.items(): if branch_id not in branch_ids: continue change_num, sha1 = v.split(',') if branch_id in highest: if int(change_num) > highest[branch_id][0]: if must_exist_local and not p4gf_util.sha1_exists(sha1): continue highest[branch_id] = (int(change_num), sha1) elif not branch_ids or branch_id in branch_ids: if must_exist_local and not p4gf_util.sha1_exists(sha1): continue highest[branch_id] = (int(change_num), sha1) else: continue if not k or int(change_num) > highest[k][0]: k = branch_id if not k: return None return ObjectType.create_commit( sha1 = highest[k][1] , repo_name = ctx.config.repo_name , change_num = highest[k][0] , branch_id = k )
def _check_for_old_p4key(p4): """Raise an exception if 2013.1 upgrade incomplete. If a proper upgrade from 2012.2 to 2013.1+ is not done, an old p4key will be present. Raise an exception if it is. """ old_p4key_pattern = p4gf_const.P4GF_P4KEY_OLD_UPDATE_AUTH_KEYS.format('*') if P4Key.get_all(p4, old_p4key_pattern): raise RuntimeError(_('error: Git Fusion 2012.2 artifacts detected.' ' Upgrade required for use with 2013.1+.' ' Please contact your administrator.')) Verbosity.report(Verbosity.DEBUG, _('Old 2012.2 p4key not present.'))
def _last_copied_p4key(self): """Find the "last copied to this Git Fusion server" p4key for each Git Fusion server in the system. Return a list of p4keys whose value exceeds the cutoff. """ key_wild = p4gf_const.P4GF_P4KEY_LAST_COPIED_CHANGE.format( repo_name = self.ctx.config.repo_name , server_id = "*") max_change_num = int(self.change_num) key_val = p4gf_p4key.get_all(self.ctx, key_wild).items() result = [] for key, value in key_val: got_change_num = int(value) if max_change_num < got_change_num: result.append(key) return result
def change_num_to_commit(ctx, change_num, branch_id=None): """If a commit exists as specified, return an ObjectType for the commit, else None. If no branch_id specified, return first found matching commit. """ if not change_num: return None # first, try cache from_cache = ObjectType.change_num_to_commit_cache.get(change_num, branch_id) if from_cache: return ObjectType.create_commit( sha1 = from_cache[1] , repo_name = ctx.config.repo_name , change_num = change_num , branch_id = from_cache[0] ) # not in cache, use index to find commit(s) if not branch_id: branch_id = '*' key_pattern = p4gf_const.P4GF_P4KEY_INDEX_OT\ .format( repo_name = ctx.config.repo_name , change_num = change_num , branch_id = branch_id ) result_sha1 = None result_branch = None key_value = P4Key.get_all(ctx.p4gf, key_pattern) for key, value in key_value.items(): m = util.KEY_BRANCH_REGEX.search(key) found_branch = m.group('branch_id') found_sha1 = value ObjectType.change_num_to_commit_cache.append(change_num, found_branch, found_sha1) if branch_id != '*': if found_branch != branch_id: continue result_sha1 = found_sha1 result_branch = found_branch if not result_sha1: return None return ObjectType.create_commit( sha1 = result_sha1 , repo_name = ctx.config.repo_name , change_num = change_num , branch_id = result_branch )
def print_broken_p4key_lock(p4, pfunc=print): """Report on all of the repo locks that appear to be broken. :param p4: P4API to query Perforce. :param pfunc: either 'print' or some logger : 'LOG.debug' """ # Get all of the existing lock keys, sleep briefly, then check again. pattern = p4gf_const.P4GF_P4KEY_LOCK_VIEW.format(repo_name='*') repo_locks = p4gf_p4key.get_all(p4, pattern) time.sleep(1) lock_name_re = re.compile(r'git-fusion-view-(.*)-lock') for name, old_value in repo_locks.items(): new_value = p4gf_p4key.get(p4, name) # If the value is unchanged or increasing, that is a bad sign. if int(new_value) >= int(old_value): repo_name = lock_name_re.match(name).group(1) pfunc("Possibly broken repo lock: {}".format(repo_name)) pfunc("May need to delete key {} if repo is inaccessible".format( name))
def _load_last_commits_cache(ctx): """If this is the first time called, load the cache of last commits.""" if ObjectType.last_commits_cache_complete: return key_value = P4Key.get_all( ctx.p4gf , p4gf_const.P4GF_P4KEY_INDEX_LCN_ON_BRANCH_REPO_ALL .format(repo_name=ctx.config.repo_name) ) for key, value in key_value.items(): mk = util.KEY_LAST_REGEX.search(key) if not mk: LOG.debug("ignoring unexpected p4key: {}".format(key)) continue mv = util.VALUE_LAST_REGEX.search(value) if not mv: LOG.debug("ignoring invalid p4key value: {}={}" .format(key, value)) ObjectType.last_commits_cache[mk.group('branch_id')] = value LOG.debug2('last change_num,commit for branch {} is {}' .format(mk.group('branch_id'), value)) ObjectType.last_commits_cache_complete = True
def show_all_server_ids(): """List current Git Fusion server ids.""" server_ids = P4Key.get_all(p4, p4gf_const.P4GF_P4KEY_SERVER_ID + '*') ids = [] this_server = p4gf_util.read_server_id_from_file() for key, value in server_ids.items(): id_ = key.replace(p4gf_const.P4GF_P4KEY_SERVER_ID, '') if this_server == id_: id_ = id_ + " *" ids.append((id_, value)) if ids: Verbosity.report( Verbosity.INFO, _("Git Fusion server IDs: {server_id: <30} {hostname: <30}" " (* marks this instance)") .format(server_id="server-id", hostname="hostname")) for sid in ids: Verbosity.report( Verbosity.INFO, _(" {server_id: <30} {hostname: <30}") .format(server_id=sid[0], hostname=sid[1]))
def print_p4key_lock_status(p4, server_id, pfunc=print): """Report on all of the repo locks and their status. :param p4: P4API to query Perforce. :param server_id: identifier for this Git Fusion instance. :param pfunc: either 'print' or some logger : 'LOG.debug' """ # pylint: disable=too-many-branches, too-many-statements, maybe-no-member # Instance of 'bool' has no 'group' member pattern = p4gf_const.P4GF_P4KEY_LOCK_VIEW_OWNERS.format(repo_name='*') repo_locks = p4gf_p4key.get_all(p4, pattern) dead_processes_exist = False for name, raw_value in repo_locks.items(): content = json.loads(raw_value) if "owners" not in content: pfunc(_("Malformed lock {lock_name}").format(lock_name=name)) else: repo_name = LOCK_OWNERS_NAME_RE.match(name).group(1) pfunc( _("***************** {repo_name} Status *****************"). format(repo_name=repo_name)) lock_key_name = p4gf_const.P4GF_P4KEY_LOCK_VIEW.format( repo_name=repo_name) have_lock_key = p4gf_p4key.get(p4, lock_key_name) != '0' if have_lock_key: pfunc( _("View lock {lock_key_name} is set").format( lock_key_name=lock_key_name)) status_key_name = p4gf_p4key.calc_repo_status_p4key_name( repo_name, None) repo_status = p4gf_p4key.get(p4, status_key_name) pushid_key_name = p4gf_p4key.calc_repo_push_id_p4key_name( repo_name) repo_pushid = p4gf_p4key.get(p4, pushid_key_name) failed_push = False if repo_pushid != '0': pfunc( _("Most recent push for repo '{repo_name}'... '{push_id}'" ).format(repo_name=repo_name, push_id=repo_pushid)) if repo_status != '0': pfunc( _("Most recent push status for repo '{repo_name}'... '{status}'" ).format(repo_name=repo_name, status=repo_status)) failed_push = FAILED_PUSH_RE.match(repo_status) if failed_push: repo_pushid = failed_push.group(1) pfunc(FAILED_PUSH_MSG.format(push_id=repo_pushid)) else: successful_push = SUCCESSFUL_PUSH_RE.match(repo_status) if successful_push: repo_pushid = successful_push.group(1) pfunc( _("P4 key based locks for '{repo_name}'...").format( repo_name=repo_name)) lock_server_id = content["server_id"] pfunc( _(" Owning instance: {server_id}").format( server_id=lock_server_id)) pfunc( _(" Initial process: {pid}").format(pid=content["group_id"])) process_number = 0 dead_process_count = 0 for owner in content["owners"]: process_number += 1 pid = owner["process_id"] start_time = owner["start_time"] status = _pid_status( pid) if lock_server_id == server_id else "UNKNOWN" pfunc( _(" Owner #{process_number}: PID {pid}, started at " "{start_time}, status {status}").format( process_number=process_number, pid=pid, start_time=start_time, status=status)) if status == 'DEAD': dead_processes_exist = True dead_process_count += 1 pfunc(DEAD_PROC_MSG.format(pid=pid)) check_syslog_for_sigkill(pid, pfunc) else: pfunc(ACTIVE_PROC_MSG.format(pid=pid)) if dead_processes_exist: pfunc('\n') numkeys = 2 if have_lock_key else 1 if process_number == dead_process_count: if failed_push: pfunc( ALL_DEAD_AND_FAILED_PROCESS.format( repo_name=repo_name, push_id=repo_pushid)) else: pfunc( ALL_DEAD_WITH_LOCK.format(repo_name=repo_name, push_id=repo_pushid)) pfunc(NEED_TO_RELEASE_MSG.format(numkeys=numkeys)) if have_lock_key: pfunc(RELEASE_VIEW_LOCK_MSG.format(lock_key_name)) pfunc(RELEASE_OWNERS_LOCK_MSG.format(name)) else: if failed_push: pfunc( SOME_DEAD_AND_FAILED_PUSH_MSG.format( repo_name=repo_name, push_id=repo_pushid)) else: pfunc( SOME_DEAD_AND_SUCCESSFUL_MSG.format( repo_name=repo_name, push_id=repo_pushid)) pfunc(NEED_TO_RELEASE_MSG.format(numkeys=numkeys)) if have_lock_key: pfunc(RELEASE_VIEW_LOCK_MSG.format(lock_key_name)) pfunc(RELEASE_OWNERS_LOCK_MSG.format(name)) if len(repo_locks): pfunc("")
def convert(args, p4): """Find all git-fusion-* clients and convert them; delete the object cache. Delete the entire object cache (//.git-fusion/objects/...). Keyword arguments: args -- parsed command line arguments p4 -- Git user's Perforce client """ # pylint: disable=too-many-branches, too-many-statements print("Connected to {}".format(p4.port)) p4.user = p4gf_const.P4GF_USER # Sanity check system p4keys = {} p4keys.update(P4Key.get_all(p4, 'git_fusion_auth_server_lock*')) p4keys.update(P4Key.get_all(p4, 'git_fusion_view_*_lock')) if p4keys: print( "All Git Fusion servers connecting to this server must be disabled." ) print("See release notes for instructions on how to proceed.") print("The following p4keys indicate on-going activity:") print(", ".join(sorted(p4keys.keys()))) sys.exit(1) p4keys.update(P4Key.get_all(p4, 'p4gf_auth_keys_last_changenum-*')) if not p4keys: print("Does not look like a Git Fusion 2012.2 installation") print("Cannot find the p4key for p4gf_auth_keys_last_changenum") print("See release notes for instructions on how to proceed.") sys.exit(1) # Retrieve host-specific initialization p4keys. p4keys.update(P4Key.get_all(p4, 'git-fusion*-init-started')) p4keys.update(P4Key.get_all(p4, 'git-fusion*-init-complete')) # we require the server_id before we convert the clients localroot = get_p4gf_localroot(p4) server_id = create_server_id(localroot, args.id, p4) client_name = p4gf_util.get_12_2_object_client_name() convert_clients(args, p4, client_name) group_list = [p4gf_const.P4GF_GROUP_PULL, p4gf_const.P4GF_GROUP_PUSH] if not args.convert: if localroot: print("Removing client files for {}...".format(client_name)) print(" p4 sync -fqk {}...#none".format(localroot)) print("Deleting client {}...".format(client_name)) print(" p4 client -f -d {}".format(client_name)) print("Deleting client {}'s workspace...".format(client_name)) print(" rm -rf {}".format(localroot)) print("Obliterating object cache...") if not args.delete: print(" p4 obliterate -y //.git-fusion/objects/...") else: print(" Skipping obliterate") print("Removing initialization p4keys...") for p4key in sorted(p4keys.keys()): print(" p4 key -d {}".format(p4key)) for group in group_list: print("Leaving existing group {}".format(group)) else: if localroot: print("Removing client files for {}...".format(client_name)) print(" p4 sync -fqk {}...#none".format(localroot)) LOG_FILE.write("p4 sync -fqk {}...#none\n".format(localroot)) p4.run('sync', '-fqk', localroot + '...#none') print("Deleting client {}...".format(client_name)) print(" p4 client -f -d {}".format(client_name)) LOG_FILE.write("p4 client -f -d {}\n".format(client_name)) p4.run('client', '-df', client_name) print("Deleting client {}'s workspace...".format(client_name)) print(" rm -rf {}".format(localroot)) LOG_FILE.write("rm -rf {}\n".format(localroot)) shutil.rmtree(localroot) # after removing the GF localroot # recreate the GF localroot and re-write the server-id # the serverid - p4key has been already set above # in create_server_id ensure_dir(localroot) p4gf_util.write_server_id_to_file(server_id) if not args.delete: print("Obliterating object cache...") print(" p4 obliterate -y //.git-fusion/objects/...") LOG_FILE.write("p4 obliterate -y //.git-fusion/objects/...\n") p4.run('obliterate', '-y', '//.git-fusion/objects/...') else: print(" Run: p4 delete //.git-fusion/objects/...") print(" p4 submit") LOG_FILE.write( "Need to run: p4 delete //.git-fusion/objects/...\n") LOG_FILE.write(" p4 submit\n") print("Removing initialization p4keys...") for p4key in sorted(p4keys.keys()): print(" p4 key -d {}".format(p4key)) LOG_FILE.write(" p4 key -d {}\n".format(p4key)) _delete_p4key(p4, p4key) for group in group_list: print("Leaving existing group {}".format(group))
def delete_all(args, p4, metrics): """Remove all Git Fusion clients, as well as the object cache. Keyword arguments: args -- parsed command line arguments p4 -- Git user's Perforce client """ # pylint:disable=too-many-branches p4.user = p4gf_const.P4GF_USER group_list = [p4gf_const.P4GF_GROUP_PULL, p4gf_const.P4GF_GROUP_PUSH] print(_('Connected to {P4PORT}').format(P4PORT=p4.port)) print_verbose(args, _('Scanning for Git Fusion clients...')) client_name = p4gf_util.get_object_client_name() locks = _lock_all_repos(p4) if args.delete: was_prevented = _prevent_access(p4) else: was_prevented = None delete_clients(args, p4, metrics) # Retrieve the names of the initialization/upgrade "lock" p4keys. p4keys = [ p4gf_const.P4GF_P4KEY_ALL_PENDING_MB, p4gf_const.P4GF_P4KEY_ALL_REMAINING_MB ] # Key patterns NOT published in p4gf_const because they have trailing * # wildcards and it's not worth cluttering p4gf_const for this one use. p4key_patterns = [ 'git-fusion-init-started*', 'git-fusion-init-complete*', 'git-fusion-upgrade-started*', 'git-fusion-upgrade-complete*', 'git-fusion-index-*' ] for p4key_pattern in p4key_patterns: d = P4Key.get_all(p4, p4key_pattern) p4keys.extend(sorted(d.keys())) localroot = get_p4gf_localroot(p4) if not args.delete: if localroot: if args.no_obliterate: print(NTR('p4 sync -f #none')) else: print(NTR('p4 client -f -d {}').format(client_name)) print(NTR('rm -rf {}').format(localroot)) if not args.no_obliterate: print( NTR('p4 obliterate -hay //{}/objects/...').format( p4gf_const.P4GF_DEPOT)) for p4key in p4keys: print(NTR('p4 key -d {}').format(p4key)) for group in group_list: print(NTR('p4 group -a -d {}').format(group)) else: if localroot: if not args.no_obliterate: # Need this in order to use --gc later on # client should not exist; this is likely a NOOP p4gf_util.p4_client_df(p4, client_name) metrics.clients += 1 print_verbose( args, _("Deleting client '{client_name}'s workspace...").format( client_name=client_name)) _remove_local_root(localroot) _delete_cache(args, p4, metrics) print_verbose(args, _('Removing initialization p4keys...')) for p4key in p4keys: delete_p4key(p4, p4key, metrics) for group in group_list: delete_group(args, p4, group, metrics) _release_locks(locks) if was_prevented is not None: if was_prevented != '0': P4Key.set(p4, p4gf_const.P4GF_P4KEY_PREVENT_NEW_SESSIONS, was_prevented) else: P4Key.delete(p4, p4gf_const.P4GF_P4KEY_PREVENT_NEW_SESSIONS)