예제 #1
0
    def choose_ceph_hash(self):
        """
        Get the ceph hash: if --sha1/-S is supplied, use it if it is valid, and
        just keep the ceph_branch around.  Otherwise use the current git branch
        tip.
        """
        repo_name = self.ceph_repo_name

        if self.args.ceph_sha1:
            ceph_hash = self.args.ceph_sha1
            if self.args.validate_sha1:
                ceph_hash = util.git_validate_sha1(repo_name, ceph_hash)
            if not ceph_hash:
                exc = CommitNotFoundError(self.args.ceph_sha1,
                                          '%s.git' % repo_name)
                util.schedule_fail(message=str(exc), name=self.name)
            log.info("ceph sha1 explicitly supplied")

        elif self.args.ceph_branch:
            ceph_hash = util.git_ls_remote(repo_name, self.args.ceph_branch)
            if not ceph_hash:
                exc = BranchNotFoundError(self.args.ceph_branch,
                                          '%s.git' % repo_name)
                util.schedule_fail(message=str(exc), name=self.name)

        log.info("ceph sha1: {hash}".format(hash=ceph_hash))
        return ceph_hash
예제 #2
0
 def choose_suite_branch(self):
     suite_repo_name = self.suite_repo_name
     suite_repo_project_or_url = self.args.suite_repo or 'ceph-qa-suite'
     suite_branch = self.args.suite_branch
     ceph_branch = self.args.ceph_branch
     if suite_branch and suite_branch != 'master':
         if not util.git_branch_exists(
             suite_repo_project_or_url,
             suite_branch
         ):
             exc = BranchNotFoundError(suite_branch, suite_repo_name)
             util.schedule_fail(message=str(exc), name=self.name)
     elif not suite_branch:
         # Decide what branch of the suite repo to use
         if util.git_branch_exists(suite_repo_project_or_url, ceph_branch):
             suite_branch = ceph_branch
         else:
             log.info(
                 "branch {0} not in {1}; will use master for"
                 " ceph-qa-suite".format(
                     ceph_branch,
                     suite_repo_name
                 ))
             suite_branch = 'master'
     return suite_branch
예제 #3
0
 def choose_kernel(self):
     # Put together a stanza specifying the kernel hash
     if self.args.kernel_branch == 'distro':
         kernel_hash = 'distro'
     # Skip the stanza if '-k none' is given
     elif self.args.kernel_branch is None or \
          self.args.kernel_branch.lower() == 'none':
         kernel_hash = None
     else:
         kernel_hash = util.get_gitbuilder_hash(
             'kernel',
             self.args.kernel_branch,
             'default',
             self.args.machine_type,
             self.args.distro,
             self.args.distro_version,
         )
         if not kernel_hash:
             util.schedule_fail("Kernel branch '{branch}' not found".format(
                 branch=self.args.kernel_branch))
     if kernel_hash:
         log.info("kernel sha1: {hash}".format(hash=kernel_hash))
         kernel_dict = dict(kernel=dict(kdb=True, sha1=kernel_hash))
         if kernel_hash != 'distro':
             kernel_dict['kernel']['flavor'] = 'default'
     else:
         kernel_dict = dict()
     return kernel_dict
예제 #4
0
    def schedule_jobs(self, jobs_missing_packages, jobs_to_schedule, name):
        for job in jobs_to_schedule:
            log.info(
                'Scheduling %s', job['desc']
            )

            log_prefix = ''
            if job in jobs_missing_packages:
                log_prefix = "Missing Packages: "
                if (
                    not self.args.dry_run and
                    not config.suite_allow_missing_packages
                ):
                    util.schedule_fail(
                        "At least one job needs packages that don't exist for "
                        "hash {sha1}.".format(sha1=self.base_config.sha1),
                        name,
                    )
            util.teuthology_schedule(
                args=job['args'],
                dry_run=self.args.dry_run,
                verbose=self.args.verbose,
                log_prefix=log_prefix,
            )
            throttle = self.args.throttle
            if not self.args.dry_run and throttle:
                log.info("pause between jobs : --throttle " + str(throttle))
                time.sleep(int(throttle))
예제 #5
0
    def check_num_jobs(self, jobs_to_schedule):
        """
        Fail schedule if number of jobs exceeds job threshold.
        """
        threshold = self.args.job_threshold
        msg = f'''Unable to schedule {jobs_to_schedule} jobs, too many jobs, when maximum {threshold} jobs allowed.

Note: If you still want to go ahead, use --job-threshold 0'''
        if threshold and jobs_to_schedule > threshold:
            util.schedule_fail(msg)
예제 #6
0
 def choose_suite_hash(self, suite_branch):
     suite_repo_name = self.suite_repo_name
     suite_repo_project_or_url = self.args.suite_repo or 'ceph-qa-suite'
     suite_hash = util.git_ls_remote(suite_repo_project_or_url,
                                     suite_branch)
     if not suite_hash:
         exc = BranchNotFoundError(suite_branch, suite_repo_name)
         util.schedule_fail(message=str(exc), name=self.name)
     log.info("%s branch: %s %s", suite_repo_name, suite_branch, suite_hash)
     return suite_hash
예제 #7
0
    def choose_teuthology_branch(self):
        """Select teuthology branch, check if it is present in repo and return
        tuple (branch, hash) where hash is commit sha1 corresponding
        to the HEAD of the branch.

        The branch name value is determined in the following order:

        Use ``--teuthology-branch`` argument value if supplied.

        Use ``TEUTH_BRANCH`` environment variable value if declared.

        If file ``qa/.teuthology_branch`` can be found in the suite repo
        supplied with ``--suite-repo`` or ``--suite-dir`` and contains
        non-empty string then use it as the branch name.

        Use ``teuthology_branch`` value if it is set in the one
        of the teuthology config files ``$HOME/teuthology.yaml``
        or ``/etc/teuthology.yaml`` correspondingly.

        Use ``master``.

        Generate exception if the branch is not present in the repo.

        """
        teuthology_branch = self.args.teuthology_branch
        if not teuthology_branch:
            teuthology_branch = os.environ.get('TEUTH_BRANCH', None)
        if not teuthology_branch:
            branch_file_path = self.suite_repo_path + '/qa/.teuthology_branch'
            log.debug('Check file %s exists', branch_file_path)
            if os.path.exists(branch_file_path):
                log.debug('Found teuthology branch config file %s',
                                                        branch_file_path)
                with open(branch_file_path) as f:
                    teuthology_branch = f.read().strip()
                    if teuthology_branch:
                        log.debug(
                            'The teuthology branch is overridden with %s',
                                                                teuthology_branch)
                    else:
                        log.warning(
                            'The teuthology branch config is empty, skipping')
        if not teuthology_branch:
            teuthology_branch = config.get('teuthology_branch', 'master')

        teuthology_sha1 = util.git_ls_remote(
            'teuthology',
            teuthology_branch
        )
        if not teuthology_sha1:
            exc = BranchNotFoundError(teuthology_branch, build_git_url('teuthology'))
            util.schedule_fail(message=str(exc), name=self.name)
        log.info("teuthology branch: %s %s", teuthology_branch, teuthology_sha1)
        return teuthology_branch, teuthology_sha1
예제 #8
0
 def choose_ceph_version(self, ceph_hash):
     if config.suite_verify_ceph_hash and not self.args.newest:
         # don't bother if newest; we'll search for an older one
         # Get the ceph package version
         try:
             ceph_version = util.package_version_for_hash(
                 ceph_hash, self.args.kernel_flavor, self.args.distro,
                 self.args.distro_version, self.args.machine_type,
             )
         except Exception as exc:
             util.schedule_fail(str(exc), self.name)
         log.info("ceph version: {ver}".format(ver=ceph_version))
         return ceph_version
     else:
         log.info('skipping ceph package verification')
예제 #9
0
 def choose_teuthology_branch(self):
     teuthology_branch = self.args.teuthology_branch
     if teuthology_branch and teuthology_branch != 'master':
         if not util.git_branch_exists('teuthology', teuthology_branch):
             exc = BranchNotFoundError(teuthology_branch, 'teuthology.git')
             util.schedule_fail(message=str(exc), name=self.name)
     elif not teuthology_branch:
         # Decide what branch of teuthology to use
         if util.git_branch_exists('teuthology', self.args.ceph_branch):
             teuthology_branch = self.args.ceph_branch
         else:
             log.info(
                 "branch {0} not in teuthology.git; will use master for"
                 " teuthology".format(self.args.ceph_branch))
             teuthology_branch = 'master'
     log.info("teuthology branch: %s", teuthology_branch)
     return teuthology_branch
예제 #10
0
def main(args):
    conf = process_args(args)
    if conf.verbose:
        teuthology.log.setLevel(logging.DEBUG)

    if not conf.machine_type or conf.machine_type == 'None':
        if not config.default_machine_type or config.default_machine_type == 'None':
            schedule_fail("Must specify a machine_type")
        else:
           conf.machine_type = config.default_machine_type
    elif 'multi' in conf.machine_type:
        schedule_fail("'multi' is not a valid machine_type. " +
                      "Maybe you want 'gibba,smithi,mira' or similar")

    if conf.email:
        config.results_email = conf.email
    if conf.archive_upload:
        config.archive_upload = conf.archive_upload
        log.info('Will upload archives to ' + conf.archive_upload)

    if conf.rerun:
        rerun_filters = get_rerun_filters(conf.rerun, conf.rerun_statuses)
        if len(rerun_filters['descriptions']) == 0:
            log.warn(
                "No jobs matched the status filters: %s",
                conf.rerun_statuses,
            )
            return
        conf.filter_in.extend(rerun_filters['descriptions'])
        conf.suite = normalize_suite_name(rerun_filters['suite'])
        conf.subset, conf.seed = get_rerun_conf(conf)
    if conf.seed < 0:
        conf.seed = random.randint(0, 9999)
        log.info('Using random seed=%s', conf.seed)

    run = Run(conf)
    name = run.name
    run.prepare_and_schedule()
    if not conf.dry_run and conf.wait:
        return wait(name, config.max_job_time,
                    conf.archive_upload_url)
예제 #11
0
    def check_priority(self, jobs_to_schedule):
        priority = self.args.priority
        msg = '''Use the following testing priority
10 to 49: Tests which are urgent and blocking other important development.
50 to 74: Testing a particular feature/fix with less than 25 jobs and can also be used for urgent release testing.
75 to 99: Tech Leads usually schedule integration tests with this priority to verify pull requests against master.
100 to 149: QE validation of point releases.
150 to 199: Testing a particular feature/fix with less than 100 jobs and results will be available in a day or so.
200 to 1000: Large test runs that can be done over the course of a week.
Note: To force run, use --force-priority'''
        if priority < 50:
            util.schedule_fail(msg)
        elif priority < 75 and jobs_to_schedule > 25:
            util.schedule_fail(msg)
        elif priority < 150 and jobs_to_schedule > 100:
            util.schedule_fail(msg)
예제 #12
0
    def schedule_suite(self):
        """
        Schedule the suite-run. Returns the number of jobs scheduled.
        """
        name = self.name
        if self.args.arch:
            arch = self.args.arch
            log.debug("Using '%s' as an arch" % arch)
        else:
            arch = util.get_arch(self.base_config.machine_type)
        suite_name = self.base_config.suite
        suite_path = os.path.normpath(
            os.path.join(
                self.suite_repo_path,
                self.args.suite_relpath,
                'suites',
                self.base_config.suite.replace(':', '/'),
            ))
        log.debug('Suite %s in %s' % (suite_name, suite_path))
        configs = build_matrix(suite_path,
                               subset=self.args.subset,
                               seed=self.args.seed)
        log.info('Suite %s in %s generated %d jobs (not yet filtered)' %
                 (suite_name, suite_path, len(configs)))

        if self.args.dry_run:
            log.debug("Base job config:\n%s" % self.base_config)

        # create, but do not write, the temp file here, so it can be
        # added to the args in collect_jobs, but not filled until
        # any backtracking is done
        base_yaml_path = NamedTemporaryFile(prefix='schedule_suite_',
                                            delete=False).name
        self.base_yaml_paths.insert(0, base_yaml_path)

        # compute job limit in respect of --sleep-before-teardown
        job_limit = self.args.limit or 0
        sleep_before_teardown = int(self.args.sleep_before_teardown or 0)
        if sleep_before_teardown:
            if job_limit == 0:
                log.warning('The --sleep-before-teardown option was provided: '
                            'only 1 job will be scheduled. '
                            'Use --limit to run more jobs')
                # give user a moment to read this warning
                time.sleep(5)
                job_limit = 1
            elif self.args.non_interactive:
                log.warning('The --sleep-before-teardown option is active. '
                            'There will be a maximum {} jobs running '
                            'which will fall asleep for {}'.format(
                                job_limit,
                                format_timespan(sleep_before_teardown)))
            elif job_limit > 4:
                are_you_insane = (
                    'There are {total} configs and {maximum} job limit is used. '
                    'Do you really want to lock all machines needed for '
                    'this run for {that_long}? (y/N):'.format(
                        that_long=format_timespan(sleep_before_teardown),
                        total=len(configs),
                        maximum=job_limit))
                while True:
                    insane = (input(are_you_insane) or 'n').lower()
                    if insane == 'y':
                        break
                    elif insane == 'n':
                        exit(0)

        # if newest, do this until there are no missing packages
        # if not, do it once
        backtrack = 0
        limit = self.args.newest
        while backtrack <= limit:
            jobs_missing_packages, jobs_to_schedule = \
                self.collect_jobs(arch,
                    util.filter_configs(configs,
                        filter_in=self.args.filter_in,
                        filter_out=self.args.filter_out,
                        filter_all=self.args.filter_all,
                        filter_fragments=self.args.filter_fragments,
                        suite_name=suite_name),
                                  self.args.newest, job_limit)
            if jobs_missing_packages and self.args.newest:
                new_sha1 = \
                    util.find_git_parent('ceph', self.base_config.sha1)
                if new_sha1 is None:
                    util.schedule_fail('Backtrack for --newest failed', name)
                # rebuild the base config to resubstitute sha1
                self.config_input['ceph_hash'] = new_sha1
                self.base_config = self.build_base_config()
                backtrack += 1
                continue
            if backtrack:
                log.info("--newest supplied, backtracked %d commits to %s" %
                         (backtrack, self.base_config.sha1))
            break
        else:
            if self.args.newest:
                util.schedule_fail(
                    'Exceeded %d backtracks; raise --newest value' % limit,
                    name,
                )

        if self.args.dry_run:
            log.debug("Base job config:\n%s" % self.base_config)

        with open(base_yaml_path, 'w+b') as base_yaml:
            base_yaml.write(str(self.base_config).encode())

        if jobs_to_schedule:
            self.write_rerun_memo()

        # Before scheduling jobs, check the priority
        if self.args.priority and jobs_to_schedule and not self.args.force_priority:
            self.check_priority(len(jobs_to_schedule))

        self.check_num_jobs(len(jobs_to_schedule))

        self.schedule_jobs(jobs_missing_packages, jobs_to_schedule, name)

        os.remove(base_yaml_path)

        count = len(jobs_to_schedule)
        missing_count = len(jobs_missing_packages)
        log.info('Suite %s in %s scheduled %d jobs.' %
                 (suite_name, suite_path, count))
        log.info('%d/%d jobs were filtered out.', (len(configs) - count),
                 len(configs))
        if missing_count:
            log.warning('Scheduled %d/%d jobs that are missing packages!',
                        missing_count, count)
        return count
예제 #13
0
    def schedule_suite(self):
        """
        Schedule the suite-run. Returns the number of jobs scheduled.
        """
        name = self.name
        arch = util.get_arch(self.base_config.machine_type)
        suite_name = self.base_config.suite
        suite_path = os.path.normpath(os.path.join(
            self.suite_repo_path,
            self.args.suite_relpath,
            'suites',
            self.base_config.suite.replace(':', '/'),
        ))
        log.debug('Suite %s in %s' % (suite_name, suite_path))
        configs = [
            (combine_path(suite_name, item[0]), item[1]) for item in
            build_matrix(suite_path, subset=self.args.subset, seed=self.args.seed)
        ]
        log.info('Suite %s in %s generated %d jobs (not yet filtered)' % (
            suite_name, suite_path, len(configs)))

        if self.args.dry_run:
            log.debug("Base job config:\n%s" % self.base_config)

        # create, but do not write, the temp file here, so it can be
        # added to the args in collect_jobs, but not filled until
        # any backtracking is done
        base_yaml_path = NamedTemporaryFile(
            prefix='schedule_suite_', delete=False
        ).name
        self.base_yaml_paths.insert(0, base_yaml_path)

        # if newest, do this until there are no missing packages
        # if not, do it once
        backtrack = 0
        limit = self.args.newest
        while backtrack <= limit:
            jobs_missing_packages, jobs_to_schedule = \
                self.collect_jobs(arch, configs, self.args.newest)
            if jobs_missing_packages and self.args.newest:
                new_sha1 = \
                    util.find_git_parent('ceph', self.base_config.sha1)
                if new_sha1 is None:
                    util.schedule_fail('Backtrack for --newest failed', name)
                 # rebuild the base config to resubstitute sha1
                self.config_input['ceph_hash'] = new_sha1
                self.base_config = self.build_base_config()
                backtrack += 1
                continue
            if backtrack:
                log.info("--newest supplied, backtracked %d commits to %s" %
                         (backtrack, self.base_config.sha1))
            break
        else:
            if self.args.newest:
                util.schedule_fail(
                    'Exceeded %d backtracks; raise --newest value' % limit,
                    name,
                )

        if self.args.dry_run:
            log.debug("Base job config:\n%s" % self.base_config)

        with open(base_yaml_path, 'w+b') as base_yaml:
            base_yaml.write(str(self.base_config))

        if jobs_to_schedule:
            self.write_rerun_memo()

        self.schedule_jobs(jobs_missing_packages, jobs_to_schedule, name)

        os.remove(base_yaml_path)

        count = len(jobs_to_schedule)
        missing_count = len(jobs_missing_packages)
        log.info(
            'Suite %s in %s scheduled %d jobs.' %
            (suite_name, suite_path, count)
        )
        log.info('%d/%d jobs were filtered out.',
                 (len(configs) - count),
                 len(configs))
        if missing_count:
            log.warn('Scheduled %d/%d jobs that are missing packages!',
                     missing_count, count)
        return count