def _fetch_counters(self, step_nums, skip_s3_wait=False): """Read Hadoop counters from local logs. Args: step_nums -- the steps belonging to us, so that we can ignore errors from other jobs run with the same timestamp """ uris = self._ls_logs("job", step_nums) new_counters = scan_for_counters_in_files(uris, self, self.get_hadoop_version()) # only include steps relevant to the current job for step_num in step_nums: self._counters.append(new_counters.get(step_num, {}))
def _fetch_counters(self, step_nums, skip_s3_wait=False): """Read Hadoop counters from local logs. Args: step_nums -- the steps belonging to us, so that we can ignore errors from other jobs run with the same timestamp """ job_logs = self._enforce_path_regexp(self._ls_logs("history/"), HADOOP_JOB_LOG_URI_RE, step_nums) uris = list(job_logs) new_counters = scan_for_counters_in_files(uris, self, self.get_hadoop_version()) # only include steps relevant to the current job for step_num in step_nums: self._counters.append(new_counters.get(step_num, {}))
def _fetch_counters(self, step_nums, skip_s3_wait=False): """Read Hadoop counters from local logs. Args: step_nums -- the steps belonging to us, so that we can ignore errors from other jobs run with the same timestamp """ uris = self._ls_logs('job', step_nums) new_counters = scan_for_counters_in_files(uris, self, self.get_hadoop_version()) # only include steps relevant to the current job for step_num in step_nums: self._counters.append(new_counters.get(step_num, {}))
def _fetch_counters(self, step_nums, skip_s3_wait=False): """Read Hadoop counters from local logs. Args: step_nums -- the steps belonging to us, so that we can ignore errors from other jobs run with the same timestamp """ job_logs = self._enforce_path_regexp(self._ls_logs('history/'), HADOOP_JOB_LOG_URI_RE, step_nums) uris = list(job_logs) new_counters = scan_for_counters_in_files(uris, self) # only include steps relevant to the current job for step_num in step_nums: self._counters.append(new_counters.get(step_num, {}))
def _fetch_counters(self, step_num, skip_s3_wait=False): """Read Hadoop counters from the job history Args: step_num -- the step num to fetch counters for """ job_logs = self._enforce_path_regexp(self._ls_logs('history/', step_num), HADOOP_JOB_LOG_URI_RE, [self._step_ids[step_num]]) uris = list(job_logs) new_counters = scan_for_counters_in_files(uris, self, self.get_hadoop_version()) # only include steps relevant to the current job hadoop_step_num = int(self._step_ids[step_num].split("_")[-1]) self._counters.append(new_counters.get(hadoop_step_num, {}))