def update_submodules(cls): """Does a git pull and then update the submodules to the latest version AND finally ensure the submodule is on master @warning if you run this from a module run that does a os.chdir, this os.chdir will NOT persist here """ if ExecUtils.RunCmd('git pull')[0]: raise Error(TermColor.ColorStr( 'unable to git pull as part of submodule update', 'RED')) if ExecUtils.RunCmd('git submodule init && git submodule update')[0]: raise Error(TermColor.ColorStr( 'git submodule update failed!', 'RED'))
def commit_push(cls, files, msg): """Commits to the current branch AND pushes to remote Args: files (list) - list of files to commit msg (string) - the commit message """ ret = ExecUtils.RunCmd('git commit %s -m "%s"' % (' '.join(files), msg))[0] if not ret == 0: raise Error(TermColor.ColorStr( 'error committing these files: %s' % ' '.join(files), 'RED')) ret = ExecUtils.RunCmd('git pull && git push')[0] if not ret == 0: raise Error(TermColor.ColorStr( 'Please manually resolve any conflicts preventing git push of ' + \ 'the commit to remote', 'RED'))
def get_current_branch(cls): """Returns the name of the current branch""" cmd = 'git rev-parse --abbrev-ref HEAD' r = ExecUtils.RunCmd(cmd) if r[0]: raise Error(TermColor.ColorStr('error executing cmd %s' % cmd, 'RED')) return r[1].strip()
def MakeRules(cls, rules, makefile): """Makes all the rules in the give list. Args: rules: list: List of rules by type_base to make. makefile: string: The *main* makefile name. Return: (list, list): Returns a tuple of list in the form (successful_rules, failed_rules) specifying rules for which the make rules were successfully generated and for which it failed. """ if not rules: TermColor.Warning('No rules to build.') return ([], rules) args = zip(itertools.repeat(cls), itertools.repeat('_WorkHorse'), rules, itertools.repeat(makefile)) rule_res = ExecUtils.ExecuteParallel(args, Flags.ARGS.pool_size) successful_rules = []; failed_rules = [] for (res, rule) in rule_res: if res == 1: successful_rules += [rule] elif res == -1: failed_rules += [rule] return (successful_rules, failed_rules)
def WorkHorse(cls, rules): """Runs the workhorse for the command. Args: rules: list: List of rules to be handled. Return: (list, list): Returns a tuple of list in the form (successful_rules, failed_rules) specifying rules that succeeded and ones that failed. """ (successful_build, failed_build) = Builder.WorkHorse(rules) # All our binaries assume they will be run from the source root. os.chdir(FileUtils.GetSrcRoot()) pipe_output = len(successful_build) > 1 args = zip(itertools.repeat(cls), itertools.repeat('_RunSingeRule'), successful_build, itertools.repeat(pipe_output)) rule_res = ExecUtils.ExecuteParallel(args, Flags.ARGS.pool_size) successful_run = []; failed_run = [] for (res, rule) in rule_res: if res == 1: successful_run += [rule] elif res == -1: failed_run += [rule] return (successful_run, failed_build + failed_run)
def WorkHorse(cls, rules): """Runs the workhorse for the command. Args: rules: list: List of rules to be handled. Return: (list, list): Returns a tuple of list in the form (successful_rules, failed_rules) specifying rules that succeeded and ones that failed. """ (successful_expand, failed_expand) = Rules.GetExpandedRules(rules, Flags.ARGS.allowed_rule_types) args = zip(itertools.repeat(cls), itertools.repeat('_RunSingeRule'), successful_expand) rule_res = ExecUtils.ExecuteParallel(args, Flags.ARGS.pool_size) successful_deps = [] failed_deps = [] for (res, rule) in rule_res: if res == 1: successful_deps += [rule] elif res == -1: failed_deps += [rule] return (successful_deps, failed_expand + failed_deps)
def WorkHorse(cls, tasks): """Runs the workhorse for the command. Args: tasks: OrderedDict {int, set(string)}: Dict from priority to set of tasks to execute at the priority. Note: the dict is ordered by priority. Return: (list, list): Returns a tuple of list in the form (successful_tasks, failed_tasks) specifying tasks that succeeded and ones that failed. """ all_tasks = [] dirs_to_import = {} dir_to_task_map = {} for set_tasks in tasks.values(): for task in set_tasks: all_tasks += [task] out_dir = PipelineUtils.GetOutDirForTask(task) publish_dir = PipelineUtils.GetPublishCurrentDirForTask(task) if not out_dir or not publish_dir: continue dirs_to_import[publish_dir] = out_dir dir_to_task_map[publish_dir] = (dir_to_task_map.get(publish_dir, []) + [publish_dir]) # Check if there are any directories to publish. if not dirs_to_import: TermColor.Error('Did not find any dirs to import. Do not forget to specify publish root ' 'using --publish_root') return ([], all_tasks) # Create all the target dirs to import to. for dir in dirs_to_import.values(): FileUtils.MakeDirs(dir) # Run all the copy tasks. successful_dirs = []; failed_dirs = [] args = zip(itertools.repeat(cls), itertools.repeat('_RunSingeTask'), list(dirs_to_import), list(dirs_to_import.values())) dir_res = ExecUtils.ExecuteParallel(args, Flags.ARGS.pool_size) if not dir_res: TermColor.Error('Could not process: %s' % all_tasks) return ([], all_tasks) for (res, dir) in dir_res: if res == Importer.EXITCODE['SUCCESS']: successful_dirs += [dir] elif res == Importer.EXITCODE['FAILURE']: failed_dirs += [dir] else: TermColor.Fatal('Invalid return %d code for %s' % (res, dir)) # Get the reverse mapping from dirs to tasks. successful_tasks = []; failed_tasks = [] for i in successful_dirs: successful_tasks += dir_to_task_map.get(i, []) for i in failed_dirs: failed_tasks += dir_to_task_map.get(i, []) return (successful_tasks, failed_tasks)
def checkout_branch(cls, branch): """Checks out the specified branch with the latest code Args: branch (string) - the branch name """ # fetches the latest code ret = ExecUtils.RunCmd('git fetch origin')[0] if not ret == 0: raise Error(TermColor.ColorStr('error during git fetch origin!', 'RED')) #subprocess.check_call( # 'git checkout -b %s --track origin/%s 2>/dev/null' % \ # (branch, branch), # shell=True) ret = ExecUtils.RunCmd('git checkout -B %s --track origin/%s' % ( branch, branch))[0] if not ret == 0: raise Error(TermColor.ColorStr( 'error checking out branch %s' % branch, 'RED'))
def apply_hotfix(cls, branch, commit_hash=""): """applies a hotfix to a specific branch Args: branch (string) - the branch to apply the hotfix hash (string) - the commit hash to use Raises: EmptyHotfixError - raised when the hotfix is empty Error - critical error such as conflict stopped with hotfix from being applied """ print("moving to branch %s" % TermColor.ColorStr(branch, 'GREEN')) # get onto the appropriate branch cls.checkout_branch(branch) # try to cherry-pick print(TermColor.ColorStr("Applying hotfix to branch: %s" % branch, 'GREEN')) ret = ExecUtils.RunCmd('git cherry-pick %s' % commit_hash)[0] if not ret == 0: r = ExecUtils.RunCmd('git diff --name-only') if r[0]: raise Error(TermColor.ColorStr('error doing a git diff', 'RED')) files = r[1] if not files: raise EmptyHotfixError('hotfix is empty. likely already applied') # not an error if empty raise Error(TermColor.ColorStr( ('Hotfix apply failed at step cherry pick on branch %s.\n' 'You NEED to fix this NOW! Go to %s and fix the issue! ' 'Impacted files: %s') % ( cls.get_current_branch(), os.getcwd(), files), 'RED')) # push cherry-pick to remote ret = ExecUtils.RunCmd('git push origin %s' % branch)[0] if not ret == 0: raise Error(TermColor.ColorStr( 'Please manually resolve your merge conflicts,' + \ 'then commit, and finally run hotfix selecting the ' + \ 'branches that have not yet received the commit', 'RED')) print(TermColor.ColorStr('Applied hotfix to %s' % branch, 'GREEN')) print(TermColor.ColorStr('On branch %s' % branch, 'GREEN'))
def Clean(cls): """Runs the cleaner. Return: int: Exit status. 0 means no error. """ gen_makefile = GenMakefile(Flags.ARGS.debug) gen_makefile.GenMainMakeFile() clean = 'clean' if Flags.ARGS.obj: clean = 'cleano' elif Flags.ARGS.all: clean = 'cleanall' (status, out) = ExecUtils.RunCmd( 'make -f %s %s' % (gen_makefile.GetMakeFileName(), clean)) return status
def _MakeSingeRule(cls, rule, makefile, deps_file): """Builds a Single Rule. Args: rule: string: The rule to build. makefile: string: The *main* makefile name. Return: (int): Returns the result status. The status is '1' for success, '0' for 'ignore', '-1' for fail. """ # Get dependencies list for the rule. Run this with the original main file. (status, out) = ExecUtils.RunCmd('make -f %s %s' % (makefile, cls.GetDepsRuleName(rule))) if status: TermColor.Error('Could not make dependency for rule %s' % Utils.RuleDisplayName(rule)) return -1 return super(CCRules, cls)._MakeSingeRule(rule, makefile, deps_file)
def _RunSingeTask(cls, task): """Runs a Single Task. Args: task: string: The task to run. Return: (EXITCODE, string): Returns a tuple of the result status and the task. """ TermColor.Info('Executing %s' % PipelineUtils.TaskDisplayName(task)) task_vars = cls.__GetEnvVarsForTask(task) TermColor.VInfo(4, 'VARS: \n%s' % task_vars) task_cmd = task pipe_output = True log_file = PipelineUtils.GetLogFileForTask(task) if log_file: task_cmd += ' > ' + PipelineUtils.GetLogFileForTask(task) + ' 2>&1' pipe_output = False timeout = cls.__GetTimeOutForTask(task) start = time.time() (status, out) = ExecUtils.RunCmd(task_cmd, timeout, pipe_output, task_vars) time_taken = time.time() - start TermColor.Info('Executed %s. Took %.2fs' % (PipelineUtils.TaskDisplayName(task), time_taken)) if status: TermColor.Failure('Failed Task: %s' % PipelineUtils.TaskDisplayName(task)) if task_vars.get('PIPELINE_TASK_ABORT_FAIL', None): status_code = Runner.EXITCODE['ABORT_FAIL'] elif task_vars.get('PIPELINE_TASK_ALLOW_FAIL', None): status_code = Runner.EXITCODE['ALLOW_FAIL'] else: status_code = Runner.EXITCODE['FAILURE'] else: status_code = Runner.EXITCODE['SUCCESS'] cls._SendMailForTask(task, status_code, time_taken, log_file, out) # Everything done. Mark the task as successful. return (status_code, task)
def _MakeSingeRule(cls, rule, makefile, deps_file): """Builds a Single Rule. Args: rule: string: The rule to build. makefile: string: The *main* makefile name. Return: (int): Returns the result status. The status is '1' for success, '0' for 'ignore', '-1' for fail. """ # Build the rule. if Flags.ARGS.pool_size: parallel_processes = Flags.ARGS.pool_size else: parallel_processes = max(multiprocessing.cpu_count(), 1) (status, out) = ExecUtils.RunCmd('make -r -j%d -f %s %s' % (parallel_processes, deps_file, rule)) if status: TermColor.Failure('Failed Rule: %s' % Utils.RuleDisplayName(rule)) return -1 TermColor.VInfo(1, '%s Output: \n%s' % (Utils.RuleDisplayName(rule), out)) return 1
def _RunSingeRule(cls, rule, pipe_output): """Runs a Single Rule. Args: rule: string: The rule to run. pipe_output: bool: Whether to pipe_output or dump it to STDOUT. Return: (int, string): Returns a tuple of the result status and the rule. The status is '1' for success, '0' for 'ignore', '-1' for fail. """ TermColor.Info('Running %s' % Utils.RuleDisplayName(rule)) start = time.time() bin_file = FileUtils.GetBinPathForFile(rule) (status, out) = ExecUtils.RunCmd('%s %s' % (bin_file, Flags.ARGS.args), Flags.ARGS.timeout, pipe_output) if status: TermColor.Failure('Failed Rule: %s' % Utils.RuleDisplayName(rule)) return (-1, rule) TermColor.Info('Ran %s. Took %.2fs' % (Utils.RuleDisplayName(rule), (time.time() - start))) # Everything done. Mark the rule as successful. return (1, rule)
def WorkHorse(cls, tasks): """Runs the workhorse for the command. Args: tasks: OrderedDict {int, set(string)}: Dict from priority to set of tasks to execute at the priority. Note: the dict is ordered by priority. Return: (list, list): Returns a tuple of list in the form (successful_tasks, failed_tasks) specifying tasks that succeeded and ones that failed. """ # All our binaries assume they will be run from the source root. start = time.time() os.chdir(FileUtils.GetSrcRoot()) cls._CreateDirsForTasks(tasks) successful_run = []; failed_run = [] aborted_task = None # NOTE(stephen): Storing task dir status and task out dir status separately since # pipelines do not always have an out dir defined. dirs_status = {} out_dirs_status = {} for set_tasks in tasks.values(): if aborted_task: failed_run += set_tasks continue tasks_to_run = [] for task in set_tasks: task_options = cls.__GetTaskOptions(task) # Check if this task requires all previous tasks in the same directory to be # successful. if task_options[Runner.TASK_OPTIONS['REQUIRE_DIR_SUCCESS']]: task_dir = PipelineUtils.TaskDirName(task) cur_dir_status = dirs_status.get(task_dir) # If any previous tasks have been run in this directory, check to ensure all # of them were successful. if cur_dir_status and cur_dir_status != Runner.EXITCODE['SUCCESS']: failed_run += [task] task_display_name = PipelineUtils.TaskDisplayName(task) TermColor.Info('Skipped %s' % task_display_name) TermColor.Failure( 'Skipped Task: %s due to earlier failures in task dir' % task_display_name ) continue tasks_to_run.append(task) # It is possible for all steps at this priority level to be skipped due to the # task options selected. if set_tasks and not tasks_to_run: continue # Run all the tasks at the same priority in parallel. args = zip(itertools.repeat(cls), itertools.repeat('_RunSingeTask'), tasks_to_run) task_res = ExecUtils.ExecuteParallel(args, Flags.ARGS.pool_size) # task_res = [] # for task in tasks_to_run: task_res += [cls._RunSingeTask(task)] if not task_res: TermColor.Error('Could not process: %s' % tasks_to_run) failed_run += tasks_to_run continue for (res, task) in task_res: if res == Runner.EXITCODE['SUCCESS']: successful_run += [task] elif res == Runner.EXITCODE['FAILURE']: failed_run += [task] elif res == Runner.EXITCODE['ALLOW_FAIL']: failed_run += [task] elif res == Runner.EXITCODE['ABORT_FAIL']: failed_run += [task] aborted_task = task else: TermColor.Fatal('Invalid return %d code for %s' % (res, task)) # Update the current status of all tasks in the same directory. task_dir = PipelineUtils.TaskDirName(task) dirs_status[task_dir] = max( dirs_status.get(task_dir, Runner.EXITCODE['_LOWEST']), res, ) # Update the out dir status. out_dir = PipelineUtils.GetOutDirForTask(task) if out_dir: out_dirs_status[out_dir] = max( out_dirs_status.get(out_dir, Runner.EXITCODE['_LOWEST']), res, ) # Write the status files to the dirs. cls._WriteOutDirsStatus(out_dirs_status) # Send the final status mail. time_taken = time.time() - start cls._SendFinalStatusMail(successful_run, failed_run, aborted_task, time_taken) if aborted_task: TermColor.Failure('Aborted by task: %s' % aborted_task) return (successful_run, failed_run)