Example #1
0
 def test_get_arch_success(self, m_lock):
     m_lock.list_locks.return_value = [{"arch": "arch"}]
     result = util.get_arch('magna')
     m_lock.list_locks.assert_called_with(
         machine_type="magna",
         count=1
     )
     assert result == "arch"
 def test_get_arch_fail(self, m_lock):
     m_lock.list_locks.return_value = False
     util.get_arch('magna')
     m_lock.list_locks.assert_called_with(machine_type="magna", count=1)
Example #3
0
 def test_get_arch_success(self, m_query):
     m_query.list_locks.return_value = [{"arch": "arch"}]
     result = util.get_arch('magna')
     m_query.list_locks.assert_called_with(machine_type="magna", count=1)
     assert result == "arch"
Example #4
0
 def test_get_arch_fail(self, m_query):
     m_query.list_locks.return_value = False
     util.get_arch('magna')
     m_query.list_locks.assert_called_with(machine_type="magna", count=1)
Example #5
0
    def schedule_suite(self):
        """
        Schedule the suite-run. Returns the number of jobs scheduled.
        """
        name = self.name
        arch = util.get_arch(self.base_config.machine_type)
        suite_name = self.base_config.suite
        suite_path = os.path.normpath(os.path.join(
            self.suite_repo_path,
            self.args.suite_relpath,
            'suites',
            self.base_config.suite.replace(':', '/'),
        ))
        log.debug('Suite %s in %s' % (suite_name, suite_path))
        configs = [
            (combine_path(suite_name, item[0]), item[1]) for item in
            build_matrix(suite_path, subset=self.args.subset, seed=self.args.seed)
        ]
        log.info('Suite %s in %s generated %d jobs (not yet filtered)' % (
            suite_name, suite_path, len(configs)))

        if self.args.dry_run:
            log.debug("Base job config:\n%s" % self.base_config)

        # create, but do not write, the temp file here, so it can be
        # added to the args in collect_jobs, but not filled until
        # any backtracking is done
        base_yaml_path = NamedTemporaryFile(
            prefix='schedule_suite_', delete=False
        ).name
        self.base_yaml_paths.insert(0, base_yaml_path)

        # if newest, do this until there are no missing packages
        # if not, do it once
        backtrack = 0
        limit = self.args.newest
        while backtrack <= limit:
            jobs_missing_packages, jobs_to_schedule = \
                self.collect_jobs(arch, configs, self.args.newest)
            if jobs_missing_packages and self.args.newest:
                new_sha1 = \
                    util.find_git_parent('ceph', self.base_config.sha1)
                if new_sha1 is None:
                    util.schedule_fail('Backtrack for --newest failed', name)
                 # rebuild the base config to resubstitute sha1
                self.config_input['ceph_hash'] = new_sha1
                self.base_config = self.build_base_config()
                backtrack += 1
                continue
            if backtrack:
                log.info("--newest supplied, backtracked %d commits to %s" %
                         (backtrack, self.base_config.sha1))
            break
        else:
            if self.args.newest:
                util.schedule_fail(
                    'Exceeded %d backtracks; raise --newest value' % limit,
                    name,
                )

        if self.args.dry_run:
            log.debug("Base job config:\n%s" % self.base_config)

        with open(base_yaml_path, 'w+b') as base_yaml:
            base_yaml.write(str(self.base_config))

        if jobs_to_schedule:
            self.write_rerun_memo()

        self.schedule_jobs(jobs_missing_packages, jobs_to_schedule, name)

        os.remove(base_yaml_path)

        count = len(jobs_to_schedule)
        missing_count = len(jobs_missing_packages)
        log.info(
            'Suite %s in %s scheduled %d jobs.' %
            (suite_name, suite_path, count)
        )
        log.info('%d/%d jobs were filtered out.',
                 (len(configs) - count),
                 len(configs))
        if missing_count:
            log.warn('Scheduled %d/%d jobs that are missing packages!',
                     missing_count, count)
        return count
Example #6
0
    def schedule_suite(self):
        """
        Schedule the suite-run. Returns the number of jobs scheduled.
        """
        name = self.name
        if self.args.arch:
            arch = self.args.arch
            log.debug("Using '%s' as an arch" % arch)
        else:
            arch = util.get_arch(self.base_config.machine_type)
        suite_name = self.base_config.suite
        suite_path = os.path.normpath(
            os.path.join(
                self.suite_repo_path,
                self.args.suite_relpath,
                'suites',
                self.base_config.suite.replace(':', '/'),
            ))
        log.debug('Suite %s in %s' % (suite_name, suite_path))
        configs = build_matrix(suite_path,
                               subset=self.args.subset,
                               seed=self.args.seed)
        log.info('Suite %s in %s generated %d jobs (not yet filtered)' %
                 (suite_name, suite_path, len(configs)))

        if self.args.dry_run:
            log.debug("Base job config:\n%s" % self.base_config)

        # create, but do not write, the temp file here, so it can be
        # added to the args in collect_jobs, but not filled until
        # any backtracking is done
        base_yaml_path = NamedTemporaryFile(prefix='schedule_suite_',
                                            delete=False).name
        self.base_yaml_paths.insert(0, base_yaml_path)

        # compute job limit in respect of --sleep-before-teardown
        job_limit = self.args.limit or 0
        sleep_before_teardown = int(self.args.sleep_before_teardown or 0)
        if sleep_before_teardown:
            if job_limit == 0:
                log.warning('The --sleep-before-teardown option was provided: '
                            'only 1 job will be scheduled. '
                            'Use --limit to run more jobs')
                # give user a moment to read this warning
                time.sleep(5)
                job_limit = 1
            elif self.args.non_interactive:
                log.warning('The --sleep-before-teardown option is active. '
                            'There will be a maximum {} jobs running '
                            'which will fall asleep for {}'.format(
                                job_limit,
                                format_timespan(sleep_before_teardown)))
            elif job_limit > 4:
                are_you_insane = (
                    'There are {total} configs and {maximum} job limit is used. '
                    'Do you really want to lock all machines needed for '
                    'this run for {that_long}? (y/N):'.format(
                        that_long=format_timespan(sleep_before_teardown),
                        total=len(configs),
                        maximum=job_limit))
                while True:
                    insane = (input(are_you_insane) or 'n').lower()
                    if insane == 'y':
                        break
                    elif insane == 'n':
                        exit(0)

        # if newest, do this until there are no missing packages
        # if not, do it once
        backtrack = 0
        limit = self.args.newest
        while backtrack <= limit:
            jobs_missing_packages, jobs_to_schedule = \
                self.collect_jobs(arch,
                    util.filter_configs(configs,
                        filter_in=self.args.filter_in,
                        filter_out=self.args.filter_out,
                        filter_all=self.args.filter_all,
                        filter_fragments=self.args.filter_fragments,
                        suite_name=suite_name),
                                  self.args.newest, job_limit)
            if jobs_missing_packages and self.args.newest:
                new_sha1 = \
                    util.find_git_parent('ceph', self.base_config.sha1)
                if new_sha1 is None:
                    util.schedule_fail('Backtrack for --newest failed', name)
                # rebuild the base config to resubstitute sha1
                self.config_input['ceph_hash'] = new_sha1
                self.base_config = self.build_base_config()
                backtrack += 1
                continue
            if backtrack:
                log.info("--newest supplied, backtracked %d commits to %s" %
                         (backtrack, self.base_config.sha1))
            break
        else:
            if self.args.newest:
                util.schedule_fail(
                    'Exceeded %d backtracks; raise --newest value' % limit,
                    name,
                )

        if self.args.dry_run:
            log.debug("Base job config:\n%s" % self.base_config)

        with open(base_yaml_path, 'w+b') as base_yaml:
            base_yaml.write(str(self.base_config).encode())

        if jobs_to_schedule:
            self.write_rerun_memo()

        # Before scheduling jobs, check the priority
        if self.args.priority and jobs_to_schedule and not self.args.force_priority:
            self.check_priority(len(jobs_to_schedule))

        self.schedule_jobs(jobs_missing_packages, jobs_to_schedule, name)

        os.remove(base_yaml_path)

        count = len(jobs_to_schedule)
        missing_count = len(jobs_missing_packages)
        log.info('Suite %s in %s scheduled %d jobs.' %
                 (suite_name, suite_path, count))
        log.info('%d/%d jobs were filtered out.', (len(configs) - count),
                 len(configs))
        if missing_count:
            log.warn('Scheduled %d/%d jobs that are missing packages!',
                     missing_count, count)
        return count