Пример #1
0
    def _run_service_command(
        self,
        command,
        expected_exit_code=None,
        unit_name=None,
        log_callback=None
    ):
        """Wrapper around running the command and raising exception on unexpected code

        Args:
            command: string command for systemd (stop, start, restart, etc)
            expected_exit_code: the exit code to expect, otherwise raise
            unit_name: optional unit name, defaults to self.unit_name attribute
            log_callback: logger to log against

        Raises:
            SystemdException: When expected_exit_code is not matched
        """
        unit = self.unit_name if unit_name is None else unit_name
        with self.appliance.ssh_client as ssh:
            cmd = 'systemctl {} {}'.format(quote(command), quote(unit))
            log_callback('Running {}'.format(cmd))
            result = ssh.run_command(cmd)

        if expected_exit_code is not None and result.rc != expected_exit_code:
            # TODO: Bring back address
            msg = 'Failed to {} {}\nError: {}'.format(
                command, self.unit_name, result.output)
            if log_callback:
                log_callback(msg)
            else:
                self.logger.error(msg)
            raise SystemdException(msg)

        return result
Пример #2
0
    def _run_service_command(self,
                             command,
                             expected_exit_code=None,
                             unit_name=None):
        """Wrapper around running the command and raising exception on unexpected code

        Args:
            command: string command for systemd (stop, start, restart, etc)
            expected_exit_code: the exit code to expect, otherwise raise
            unit_name: optional unit name, defaults to self.unit_name attribute

        Raises:
            SystemdException: When expected_exit_code is not matched
        """
        unit = self.unit_name if unit_name is None else unit_name
        with self.appliance.ssh_client as ssh:
            result = ssh.run_command('systemctl {} {}'.format(
                quote(command), quote(unit)))

        if expected_exit_code is not None and result.rc != expected_exit_code:
            # TODO: Bring back address
            msg = 'Failed to {} {}\nError: {}'.format(command, self.unit_name,
                                                      result.output)
            self.logger.error(msg)
            raise SystemdException(msg)

        return result
Пример #3
0
    def _run_service_command(self, command, expected_exit_code=None):
        with self.appliance.ssh_client as ssh:
            status, output = ssh.run_command('systemctl {} {}'.format(
                quote(command), quote(self.unit_name)))

        if expected_exit_code is not None and status != expected_exit_code:
            # TODO: Bring back address
            msg = 'Failed to {} {}\nError: {}'.format(command, self.unit_name,
                                                      output)
            self.logger.error(msg)
            raise SystemdException(msg)

        return status
Пример #4
0
    def _run_service_command(self, command, expected_exit_code=None):
        with self.appliance.ssh_client as ssh:
            status, output = ssh.run_command('systemctl {} {}'.format(
                quote(command), quote(self.unit_name)))

        if expected_exit_code is not None and status != expected_exit_code:
            # TODO: Bring back address
            msg = 'Failed to {} {}\nError: {}'.format(
                command, self.unit_name, output)
            self.logger.error(msg)
            raise SystemdException(msg)

        return status
Пример #5
0
 def _collect_reports(self):
     # restart evm to stop the proccesses and let the simplecov exit hook run
     self.ipapp.ssh_client.run_command('systemctl stop evmserverd')
     # collect back to the collection appliance if parallelized
     if store.current_appliance != self.collection_appliance:
         self.print_message('sending reports to {}'.format(self.collection_appliance.hostname))
         result = self.ipapp.ssh_client.run_command(
             'sshpass -p {passwd} '
             'scp -o StrictHostKeyChecking=no '
             '-r /var/www/miq/vmdb/coverage/* '
             '{addr}:/var/www/miq/vmdb/coverage/'.format(
                 addr=self.collection_appliance.hostname,
                 passwd=quote(self.ipapp.ssh_client._connect_kwargs['password'])),
             timeout=1800)
         if not result:
             self.print_message('There was an error sending reports: ' + str(result))
Пример #6
0
 def _collect_reports(self):
     # restart evm to stop the proccesses and let the simplecov exit hook run
     self.ipapp.ssh_client.run_command('systemctl stop evmserverd')
     # collect back to the collection appliance if parallelized
     if store.current_appliance != self.collection_appliance:
         self.print_message('sending reports to {}'.format(self.collection_appliance.hostname))
         result = self.ipapp.ssh_client.run_command(
             'sshpass -p {passwd} '
             'scp -o StrictHostKeyChecking=no '
             '-r /var/www/miq/vmdb/coverage/* '
             '{addr}:/var/www/miq/vmdb/coverage/'.format(
                 addr=self.collection_appliance.hostname,
                 passwd=quote(self.ipapp.ssh_client._connect_kwargs['password'])),
             timeout=1800)
         if not result:
             self.print_message('There was an error sending reports: ' + str(result))
Пример #7
0
def main(appliance, jenkins_url, jenkins_user, jenkins_token, job_name):
    if not jenkins_user or not jenkins_token:
        try:
            from cfme.utils import conf
            jenkins_user = conf.credentials.jenkins_app.user
            jenkins_token = conf.credentials.jenkins_app.token
        except (AttributeError, KeyError):
            raise ValueError(
                '--jenkins-user and --jenkins-token not provided and credentials yaml does not '
                'contain the jenkins_app entry with user and token')
    appliance_version = str(appliance.version).strip()
    print('Looking for appliance version {} in {}'.format(
        appliance_version, job_name))
    client = jenkins.Jenkins(jenkins_url,
                             username=jenkins_user,
                             password=jenkins_token)
    build_numbers = get_build_numbers(client, job_name)
    if not build_numbers:
        print('No builds for job {}'.format(job_name))
        return 1

    # Find the builds with appliance version
    eligible_build_numbers = set()
    for build_number in build_numbers:
        try:
            artifacts = client.get_build_info(job_name,
                                              build_number)['artifacts']
            if not artifacts:
                raise ValueError()
        except (KeyError, ValueError):
            print('No artifacts for {}/{}'.format(job_name, build_number))
            continue

        artifacts = group_list_dict_by(artifacts, 'fileName')
        if 'appliance_version' not in artifacts:
            print('appliance_version not in artifacts of {}/{}'.format(
                job_name, build_number))
            continue

        build_appliance_version = download_artifact(
            jenkins_user, jenkins_token, jenkins_url, job_name, build_number,
            artifacts['appliance_version']['relativePath']).strip()

        if not build_appliance_version:
            print('Appliance version unspecified for build {}'.format(
                build_number))
            continue

        if Version(build_appliance_version) < Version(appliance_version):
            print('Build {} already has lower version ({})'.format(
                build_number, build_appliance_version))
            print('Ending here')
            break

        if 'coverage-results.tgz' not in artifacts:
            print('coverage-results.tgz not in artifacts of {}/{}'.format(
                job_name, build_number))
            continue

        if not check_artifact(
                jenkins_user, jenkins_token, jenkins_url, job_name,
                build_number,
                artifacts['coverage-results.tgz']['relativePath']):
            print('Coverage archive not possible to be downloaded, skipping')
            continue

        if build_appliance_version == appliance_version:
            print('Build {} was found to contain what is needed'.format(
                build_number))
            eligible_build_numbers.add(build_number)
        else:
            print(
                'Skipping build {} because it does not have correct version ({})'
                .format(build_number, build_appliance_version))

    if not eligible_build_numbers:
        print('Could not find any coverage reports for {} in {}'.format(
            appliance_version, job_name))
        return 2

    # Stop the evm service, not needed at all
    print('Stopping evmserverd')
    appliance.evmserverd.stop()
    # Install the coverage tools on the appliance
    print('Installing simplecov')
    appliance.coverage._install_simplecov()
    # Upload the merger
    print('Installing coverage merger')
    appliance.coverage._upload_coverage_merger()
    eligible_build_numbers = sorted(eligible_build_numbers)
    with appliance.ssh_client as ssh:
        if not ssh.run_command('mkdir -p /var/www/miq/vmdb/coverage'):
            print(
                'Could not create /var/www/miq/vmdb/coverage on the appliance!'
            )
            return 3
        # Download all the coverage reports
        for build_number in eligible_build_numbers:
            print('Downloading the coverage report from build {}'.format(
                build_number))
            download_url = jenkins_artifact_url(
                jenkins_user, jenkins_token, jenkins_url, job_name,
                build_number,
                artifacts['coverage-results.tgz']['relativePath'])
            cmd = ssh.run_command(
                'curl -k -o /var/www/miq/vmdb/coverage/tmp.tgz {}'.format(
                    quote(download_url)))
            if not cmd:
                print('Could not download! - {}'.format(str(cmd)))
                return 4
            print('Extracting the coverage report from build {}'.format(
                build_number))
            extract_command = ' && '.join([
                'cd /var/www/miq/vmdb/coverage',
                'tar xf tmp.tgz --strip-components=1',
                'rm -f tmp.tgz',
            ])
            cmd = ssh.run_command(extract_command)
            if not cmd:
                print('Could not extract! - {}'.format(str(cmd)))
                return 5

        # Now run the merger
        print('Running the merger')
        cmd = ssh.run_command(
            'cd /var/www/miq/vmdb; time bin/rails runner coverage_merger.rb',
            timeout=60 * 60)
        if not cmd:
            print('Failure running the merger - {}'.format(str(cmd)))
            return 6
        else:
            print('Coverage report generation was successful')
            print(str(cmd))
            percentage = re.search(r'LOC\s+\((\d+.\d+%)\)\s+covered\.',
                                   str(cmd))
            if percentage:
                print('COVERAGE={};'.format(percentage.groups()[0]))
            else:
                print('COVERAGE=unknown;')

        print('Packing the generated HTML')
        cmd = ssh.run_command(
            'cd /var/www/miq/vmdb/coverage; tar cfz /tmp/merged.tgz merged')
        if not cmd:
            print('Could not compress! - {}'.format(str(cmd)))
            return 7
        print('Grabbing the generated HTML')
        ssh.get_file('/tmp/merged.tgz', log_path.strpath)
        print('Decompressing the generated HTML')
        rc = subprocess.call([
            'tar', 'xf',
            log_path.join('merged.tgz').strpath, '-C', log_path.strpath
        ])
        if rc == 0:
            print('Done!')
        else:
            print('Failure to extract')
            return 8
Пример #8
0
    def _run_command(self, command, timeout=RUNCMD_TIMEOUT, reraise=False, ensure_host=False,
                     ensure_user=False, container=None):
        if isinstance(command, dict):
            command = VersionPicker(command).pick(self.vmdb_version)
        original_command = command
        uses_sudo = False
        logger.info("Running command %r", command)
        container = container or self._container
        if self.is_pod and not ensure_host:
            # This command will be executed in the context of the host provider
            command_to_run = '[[ -f /etc/default/evm ]] && source /etc/default/evm; ' + command
            oc_cmd = 'oc exec --namespace={proj} {pod} -- bash -c {cmd}'.format(
                proj=self._project, pod=container, cmd=quote(command_to_run))
            command = oc_cmd
            ensure_host = True
        elif self.is_container and not ensure_host:
            command = 'docker exec {} bash -c {}'.format(container, quote(
                'source /etc/default/evm; ' + command))

        if self.username != 'root' and not ensure_user:
            # We need sudo
            command = 'sudo -i bash -c {command}'.format(command=quote(command))
            uses_sudo = True

        if command != original_command:
            logger.info("> Actually running command %r", command)
        command += '\n'

        output = []
        try:
            session = self.get_transport().open_session()
            if uses_sudo:
                # We need a pseudo-tty for sudo
                session.get_pty()
            if timeout:
                session.settimeout(float(timeout))

            session.exec_command(command)
            stdout = session.makefile()
            stderr = session.makefile_stderr()

            def write_output(line, file):
                output.append(line)
                if self._streaming:
                    file.write(line)

            while True:
                if session.exit_status_ready():
                    break
                no_data = 0
                # While the program is running loop through collecting line by line so that we don't
                # fill the buffers up without a newline.
                # Also, note that for long running programs if we try to read output when there
                # is none (and in the case of stderr may never be any)
                # we risk blocking so long that the write buffer on the remote side will fill
                # and the remote program will block on a write.
                # The blocking on our side occurs in paramiko's buffered_pipe.py's read() call,
                # which will block if its internal buffer is empty.
                if session.recv_ready():
                    try:
                        line = next(stdout)
                        write_output(line, self.f_stdout)
                    except StopIteration:
                        pass
                else:
                    no_data += 1

                if session.recv_stderr_ready():
                    try:
                        line = next(stdout)
                        write_output(line, self.f_stderr)
                    except StopIteration:
                        pass
                else:
                    no_data += 1

                if no_data == 2:
                    gevent.sleep(0.01)

            # When the program finishes, we need to grab the rest of the output that is left.
            # Also, we don't have the issue of blocking reads because since the command is
            # finished, any pending reads of SSH encrypted data will finish shortly and put in
            # the buffer or for an empty file EOF will be reached as it will be closed.
            for line in stdout:
                write_output(line, self.f_stdout)
            for line in stderr:
                write_output(line, self.f_stderr)

            exit_status = session.recv_exit_status()
            if exit_status != 0:
                logger.warning('Exit code %d!', exit_status)
            return SSHResult(rc=exit_status, output=''.join(output), command=command)
        except paramiko.SSHException:
            if reraise:
                raise
            else:
                logger.exception('Exception happened during SSH call')
        except socket.timeout:
            logger.exception(
                "Command %r timed out. Output before it failed was:\n%r",
                command,
                ''.join(output))
            raise

        # Returning two things so tuple unpacking the return works even if the ssh client fails
        # Return whatever we have in the output
        return SSHResult(rc=1, output=''.join(output), command=command)
Пример #9
0
    def run_command(
            self, command, timeout=RUNCMD_TIMEOUT, reraise=False, ensure_host=False,
            ensure_user=False, container=None):
        """Run a command over SSH.

        Args:
            command: The command. Supports taking dicts as version picking.
            timeout: Timeout after which the command execution fails.
            reraise: Does not muffle the paramiko exceptions in the log.
            ensure_host: Ensure that the command is run on the machine with the IP given, not any
                container or such that we might be using by default.
            ensure_user: Ensure that the command is run as the user we logged in, so in case we are
                not root, setting this to True will prevent from running sudo.
            container: allows to temporarily override default container
        Returns:
            A :py:class:`SSHResult` instance.
        """
        if isinstance(command, dict):
            command = version.pick(command, active_version=self.vmdb_version)
        original_command = command
        uses_sudo = False
        logger.info("Running command %r", command)
        container = container or self._container
        if self.is_pod and not ensure_host:
            # This command will be executed in the context of the host provider
            command_to_run = '[[ -f /etc/default/evm ]] && source /etc/default/evm; ' + command
            oc_cmd = 'oc exec --namespace={proj} {pod} -- bash -c {cmd}'.format(
                proj=self._project, pod=container, cmd=quote(command_to_run))
            command = oc_cmd
            ensure_host = True
        elif self.is_container and not ensure_host:
            command = 'docker exec {} bash -c {}'.format(container, quote(
                'source /etc/default/evm; ' + command))

        if self.username != 'root' and not ensure_user:
            # We need sudo
            command = 'sudo -i bash -c {command}'.format(command=quote(command))
            uses_sudo = True

        if command != original_command:
            logger.info("> Actually running command %r", command)
        command += '\n'

        output = []
        try:
            session = self.get_transport().open_session()
            if uses_sudo:
                # We need a pseudo-tty for sudo
                session.get_pty()
            if timeout:
                session.settimeout(float(timeout))

            session.exec_command(command)
            stdout = session.makefile()
            stderr = session.makefile_stderr()

            def write_output(line, file):
                output.append(line)
                if self._streaming:
                    file.write(line)

            while True:
                if session.exit_status_ready():
                    break

                # While the program is running loop through collecting line by line so that we don't
                # fill the buffers up without a newline.   Also, note that for long running programs if
                # we try to read output when there is none (and in the case of stderr may never be any)
                # we run the risk of blocking so long that the write buffer on the remote side will fill
                # and the remote program will block on a write.   The blocking on our side occurs in
                # paramiko's buffered_pipe.py's read() call, which will block if its internal buffer is
                # is empty.
                if session.recv_ready():
                    try:
                        line = stdout.next()
                        write_output(line, self.f_stdout)
                    except StopIteration:
                        pass

                if session.recv_stderr_ready():
                    try:
                        line = stderr.next()
                        write_output(line, self.f_stderr)
                    except StopIteration:
                        pass

            # When the program finishes, we need to grab the rest of the output that is left.
            # Also, we don't have the issue of blocking reads because since the command is
            # finished, any pending reads of SSH encrypted data will finish shortly and put in
            # the buffer or for an empty file EOF will be reached as it will be closed.
            for line in stdout:
                write_output(line, self.f_stdout)
            for line in stderr:
                write_output(line, self.f_stderr)

            exit_status = session.recv_exit_status()
            if exit_status != 0:
                logger.warning('Exit code %d!', exit_status)
            return SSHResult(exit_status, ''.join(output))
        except paramiko.SSHException:
            if reraise:
                raise
            else:
                logger.exception('Exception happened during SSH call')
        except socket.timeout:
            logger.exception(
                "Command %r timed out. Output before it failed was:\n%r",
                command,
                ''.join(output))
            raise

        # Returning two things so tuple unpacking the return works even if the ssh client fails
        # Return whatever we have in the output
        return SSHResult(1, ''.join(output))
def download_and_merge_coverage_data(ssh, builds, jenkins_data, wave_size):
    """Download and merge coverage data in waves.

    Download the coverage tarballs from from the specified builds in waves, merging
    the coverage data a few tarballs at a time.

    Args:
        ssh:  ssh object
        builds:  jenkins job builds from which to pull coverage data.
        jenkins_data:  Named tupple with these attributes:  url, user, token, client
        wave_size:  How many coverage tarballs to extract at a time when merging

    Returns:
        Nothing
    """
    # Note, this is totally based around the fact that coverage_merger.rb
    # doesn't care where a .resultset.json file (i.e. ruby code coverage
    # data file) comes from, such that we can reuse the merged data file
    # with successive waves of coverage data from different jenkins builds.
    #
    # What we will do for each wave is:
    #
    #   * extract some coverage data tarballs.
    #   * merge those.
    #   * cleanup the old data, and make the merged data look like just another
    #     result set.  On the last wave there is no cleanup.
    i = 0
    wave = 1
    while i < len(builds):
        logger.info('Processing wave #%s of coverage tarballs.', wave)
        build_wave = builds[i:i + wave_size]
        for build in build_wave:
            logger.info('Downloading the coverage data from build %s', build.number)
            download_url = jenkins_artifact_url(
                jenkins_data.user,
                jenkins_data.token,
                jenkins_data.url,
                build.job,
                build.number,
                build.coverage_archive)
            ssh_run_cmd(
                ssh=ssh,
                cmd='curl -k -o {} {}'.format(
                    py.path.local(COVERAGE_DIR).join('tmp.tgz'),
                    quote(download_url)),
                error_msg='Could not download coverage data from jenkins!')

            logger.info('Extracting the coverage data from build %s', build.number)
            extract_command = ' && '.join([
                'cd {}'.format(COVERAGE_DIR),
                'tar xf tmp.tgz --strip-components=1',
                'rm -f tmp.tgz'])
            ssh_run_cmd(
                ssh=ssh,
                cmd=extract_command,
                error_msg='Could not extract coverage data!')

        merge_coverage_data(
            ssh=ssh,
            coverage_dir=COVERAGE_DIR)

        # Increment index and wave count
        i += wave_size
        wave += 1

        # We have to cleanup the coverage data we just extracted, move
        # the merged .resultset.json file, and remove the merged data directory.
        # We move the .resultset.json files so that it will be seen as just another
        # result set to merge in the next wave, and the merged data directory is removed
        # so coverage_merger.rb won't get confused by results already being where it drops
        # it's results.   However it is important that we don't do that
        # on the last wave, as we want the merge results to be available after the
        # last wave.
        #
        # XXX: Yes, this is a hack.  Not even one I am proud of.
        if i < len(builds):
            cleanup_coverage_data_wave(
                ssh=ssh,
                coverage_dir=COVERAGE_DIR,
            )
def install_sonar_scanner(ssh, project_name, project_version, scanner_url, scanner_dir, server_url,
        sonar_creds):
    """ Install sonar-scanner application

    Pulls the sonar-scanner application to the appliance from scanner_url,
    installs it in scanner_dir, and configures it to send its scan data to
    server_url.  It also configures the project config for the scan, setting
    sonar.projectVersion to the appliance version, and setting sonar.sources
    to pick up both sets of sources.

    Args:
        ssh: ssh object (cfme.utils.ssh)
        project_version: Version of project to be scanned.
        scanner_url:  Where to get the scanner from.
        scanner_dir:  Where to install the scanner on the appliance.
        sonar_creds:  SonarCreds object.
        server_url:  Where to send scan data to (i.e. what sonarqube)

    Returns:
        Nothing
    """
    logger.info('Installing sonar scanner on appliance.')
    scanner_zip = '/root/scanner.zip'

    # Create install directory for sonar scanner:
    ssh_run_cmd(
        ssh=ssh,
        cmd='mkdir -p {}'.format(scanner_dir),
        error_msg='Could not create sonar scanner directory, {}, on appliance.'.format(
            scanner_dir))

    # Download the scanner
    ssh_run_cmd(
        ssh=ssh,
        cmd='wget -O {} {}'.format(scanner_zip, quote(scanner_url)),
        error_msg='Could not download scanner software, {}'.format(scanner_url))

    # Extract the scanner
    ssh_run_cmd(
        ssh=ssh,
        cmd='unzip -d {} {}'.format(scanner_dir, scanner_zip),
        error_msg='Could not extract scanner software, {}, to {}'.format(
            scanner_zip, scanner_dir))

    # Note, all the files are underneath one directory under our scanner_dir, but we don't
    # necessarily know the name of that directory.   Yes today, as I write this, the name
    # will be:
    #
    #   sonar-scanner-$version-linux
    #
    # but if they decide to change its name, any code that depended on that would break.   So
    # what will do is go into the one directory that now under our scanner_dir, and move all
    # those files up a directory (into our scanner_dir).   tar has the --strip-components
    # option that would have avoided this, however we are dealing with a zip file and unzip
    # has no similar option.
    ssh_run_cmd(
        ssh=ssh,
        cmd='cd {}; mv $(ls)/* .'.format(scanner_dir),
        error_msg='Could not move scanner files into scanner dir, {}'.format(scanner_dir))

    # Configure the scanner to point to the local sonarqube
    # WARNING:  This definitely makes the assumption the only thing we need in that config is
    #           the variable sonar.host.url set.  If that is ever wrong this will fail, perhaps
    #           mysteriously.  So the ease of this implementation is traded off against that
    #           possible future consequence.
    scanner_conf = '{}/conf/sonar-scanner.properties'.format(scanner_dir)
    ssh_run_cmd(
        ssh=ssh,
        cmd='echo "sonar.host.url={}" > {}'.format(server_url, scanner_conf),
        error_msg='Could write scanner conf, {}s'.format(scanner_conf))

    # Now configure the project
    #
    # We have sources in two directories:
    #
    #   - /opt/rh/cfme-gemset
    #   - /var/www/miq/vmdb
    #
    # It is very important that we set sonar.sources to a comma delimited
    # list of these directories but as relative paths, relative to /.   If
    # we configure them as absolute paths it will only see the files in /var/www/miq/vmdb.
    # Don't know why, it just is that way.
    #
    # Hear is an example config:
    #
    #   sonar.login=bob
    #   sonar.password=buoyant
    #   sonar.projectKey=CFME5.9-11
    #   sonar.projectName=CFME-11
    #   sonar.projectVersion=5.9.0.17
    #   sonar.language=ruby
    #   sonar.sources=opt/rh/cfme-gemset,var/www/miq/vmdb
    project_conf = 'sonar-project.properties'
    local_conf = os.path.join(log_path.strpath, project_conf)
    remote_conf = '/{}'.format(project_conf)
    sonar_auth_snippet = ''
    if sonar_creds is not None:
        sonar_auth_snippet = '''
sonar.login={login}
sonar.password={password}
'''.format(
            login=sonar_creds.username,
            password=sonar_creds.password)
    config_data = '''
sonar.projectKey={project_key}
sonar.projectName={project_name}
sonar.projectVersion={version}
sonar.language=ruby
sonar.sources=opt/rh/cfme-gemset,var/www/miq/vmdb
{auth_snippet}
'''.format(
        auth_snippet=sonar_auth_snippet,
        project_name=project_name,
        project_key=gen_project_key(name=project_name, version=project_version),
        version=project_version)

    # Write the config file locally and then copy to remote.
    logger.info('Writing %s', local_conf)
    with open(local_conf, 'w') as f:
        f.write(config_data)
    logger.info('Copying %s to appliance as %s', local_conf, remote_conf)
    ssh.put_file(local_conf, remote_conf)
def download_and_merge_coverage_data(ssh, builds, jenkins_data, wave_size):
    """Download and merge coverage data in waves.

    Download the coverage tarballs from from the specified builds in waves, merging
    the coverage data a few tarballs at a time.

    Args:
        ssh:  ssh object
        builds:  jenkins job builds from which to pull coverage data.
        jenkins_data:  Named tupple with these attributes:  url, user, token, client
        wave_size:  How many coverage tarballs to extract at a time when merging

    Returns:
        Nothing
    """
    # Note, this is totally based around the fact that coverage_merger.rb
    # doesn't care where a .resultset.json file (i.e. ruby code coverage
    # data file) comes from, such that we can reuse the merged data file
    # with successive waves of coverage data from different jenkins builds.
    #
    # What we will do for each wave is:
    #
    #   * extract some coverage data tarballs.
    #   * merge those.
    #   * cleanup the old data, and make the merged data look like just another
    #     result set.  On the last wave there is no cleanup.
    i = 0
    wave = 1
    while i < len(builds):
        logger.info('Processing wave #%s of coverage tarballs.', wave)
        build_wave = builds[i:i + wave_size]
        for build in build_wave:
            logger.info('Downloading the coverage data from build %s', build.number)
            download_url = jenkins_artifact_url(
                jenkins_data.user,
                jenkins_data.token,
                jenkins_data.url,
                build.job,
                build.number,
                build.coverage_archive)
            ssh_run_cmd(
                ssh=ssh,
                cmd='curl -k -o {} {}'.format(
                    py.path.local(COVERAGE_DIR).join('tmp.tgz'),
                    quote(download_url)),
                error_msg='Could not download coverage data from jenkins!')

            logger.info('Extracting the coverage data from build %s', build.number)
            extract_command = ' && '.join([
                'cd {}'.format(COVERAGE_DIR),
                'tar xf tmp.tgz --strip-components=1',
                'rm -f tmp.tgz'])
            ssh_run_cmd(
                ssh=ssh,
                cmd=extract_command,
                error_msg='Could not extract coverage data!')

        merge_coverage_data(
            ssh=ssh,
            coverage_dir=COVERAGE_DIR)

        # Increment index and wave count
        i += wave_size
        wave += 1

        # We have to cleanup the coverage data we just extracted, move
        # the merged .resultset.json file, and remove the merged data directory.
        # We move the .resultset.json files so that it will be seen as just another
        # result set to merge in the next wave, and the merged data directory is removed
        # so coverage_merger.rb won't get confused by results already being where it drops
        # it's results.   However it is important that we don't do that
        # on the last wave, as we want the merge results to be available after the
        # last wave.
        #
        # XXX: Yes, this is a hack.  Not even one I am proud of.
        if i < len(builds):
            cleanup_coverage_data_wave(
                ssh=ssh,
                coverage_dir=COVERAGE_DIR,
            )
def install_sonar_scanner(ssh, project_name, project_version, scanner_url, scanner_dir, server_url,
        sonar_creds):
    """ Install sonar-scanner application

    Pulls the sonar-scanner application to the appliance from scanner_url,
    installs it in scanner_dir, and configures it to send its scan data to
    server_url.  It also configures the project config for the scan, setting
    sonar.projectVersion to the appliance version, and setting sonar.sources
    to pick up both sets of sources.

    Args:
        ssh: ssh object (cfme.utils.ssh)
        project_version: Version of project to be scanned.
        scanner_url:  Where to get the scanner from.
        scanner_dir:  Where to install the scanner on the appliance.
        sonar_creds:  SonarCreds object.
        server_url:  Where to send scan data to (i.e. what sonarqube)

    Returns:
        Nothing
    """
    logger.info('Installing sonar scanner on appliance.')
    scanner_zip = '/root/scanner.zip'

    # Create install directory for sonar scanner:
    ssh_run_cmd(
        ssh=ssh,
        cmd='mkdir -p {}'.format(scanner_dir),
        error_msg='Could not create sonar scanner directory, {}, on appliance.'.format(
            scanner_dir))

    # Download the scanner
    ssh_run_cmd(
        ssh=ssh,
        cmd='wget -O {} {}'.format(scanner_zip, quote(scanner_url)),
        error_msg='Could not download scanner software, {}'.format(scanner_url))

    # Extract the scanner
    ssh_run_cmd(
        ssh=ssh,
        cmd='unzip -d {} {}'.format(scanner_dir, scanner_zip),
        error_msg='Could not extract scanner software, {}, to {}'.format(
            scanner_zip, scanner_dir))

    # Note, all the files are underneath one directory under our scanner_dir, but we don't
    # necessarily know the name of that directory.   Yes today, as I write this, the name
    # will be:
    #
    #   sonar-scanner-$version-linux
    #
    # but if they decide to change its name, any code that depended on that would break.   So
    # what will do is go into the one directory that now under our scanner_dir, and move all
    # those files up a directory (into our scanner_dir).   tar has the --strip-components
    # option that would have avoided this, however we are dealing with a zip file and unzip
    # has no similar option.
    ssh_run_cmd(
        ssh=ssh,
        cmd='cd {}; mv $(ls)/* .'.format(scanner_dir),
        error_msg='Could not move scanner files into scanner dir, {}'.format(scanner_dir))

    # Configure the scanner to point to the local sonarqube
    # WARNING:  This definitely makes the assumption the only thing we need in that config is
    #           the variable sonar.host.url set.  If that is ever wrong this will fail, perhaps
    #           mysteriously.  So the ease of this implementation is traded off against that
    #           possible future consequence.
    scanner_conf = '{}/conf/sonar-scanner.properties'.format(scanner_dir)
    ssh_run_cmd(
        ssh=ssh,
        cmd='echo "sonar.host.url={}" > {}'.format(server_url, scanner_conf),
        error_msg='Could write scanner conf, {}s'.format(scanner_conf))

    # Now configure the project
    #
    # We have sources in two directories:
    #
    #   - /opt/rh/cfme-gemset
    #   - /var/www/miq/vmdb
    #
    # It is very important that we set sonar.sources to a comma delimited
    # list of these directories but as relative paths, relative to /.   If
    # we configure them as absolute paths it will only see the files in /var/www/miq/vmdb.
    # Don't know why, it just is that way.
    #
    # Hear is an example config:
    #
    #   sonar.login=bob
    #   sonar.password=buoyant
    #   sonar.projectKey=CFME5.9-11
    #   sonar.projectName=CFME-11
    #   sonar.projectVersion=5.9.0.17
    #   sonar.language=ruby
    #   sonar.sources=opt/rh/cfme-gemset,var/www/miq/vmdb
    project_conf = 'sonar-project.properties'
    local_conf = os.path.join(log_path.strpath, project_conf)
    remote_conf = '/{}'.format(project_conf)
    sonar_auth_snippet = ''
    if sonar_creds is not None:
        sonar_auth_snippet = '''
sonar.login={login}
sonar.password={password}
'''.format(
            login=sonar_creds.username,
            password=sonar_creds.password)
    config_data = '''
sonar.projectKey={project_key}
sonar.projectName={project_name}
sonar.projectVersion={version}
sonar.language=ruby
sonar.sources=opt/rh/cfme-gemset,var/www/miq/vmdb
{auth_snippet}
'''.format(
        auth_snippet=sonar_auth_snippet,
        project_name=project_name,
        project_key=gen_project_key(name=project_name, version=project_version),
        version=project_version)

    # Write the config file locally and then copy to remote.
    logger.info('Writing %s', local_conf)
    with open(local_conf, 'w') as f:
        f.write(config_data)
    logger.info('Copying %s to appliance as %s', local_conf, remote_conf)
    ssh.put_file(local_conf, remote_conf)
Пример #14
0
def main(appliance, jenkins_url, jenkins_user, jenkins_token, job_name):
    if not jenkins_user or not jenkins_token:
        try:
            from cfme.utils import conf
            jenkins_user = conf.credentials.jenkins_app.user
            jenkins_token = conf.credentials.jenkins_app.token
        except (AttributeError, KeyError):
            raise ValueError(
                '--jenkins-user and --jenkins-token not provided and credentials yaml does not '
                'contain the jenkins_app entry with user and token')
    appliance_version = str(appliance.version).strip()
    logger.info('Looking for appliance version %s in %s', appliance_version, job_name)
    client = jenkins.Jenkins(jenkins_url, username=jenkins_user, password=jenkins_token)
    build_numbers = get_build_numbers(client, job_name)
    if not build_numbers:
        raise Exception('No builds for job {}'.format(job_name))

    # Find the builds with appliance version
    eligible_build_numbers = set()
    for build_number in build_numbers:
        try:
            artifacts = client.get_build_info(job_name, build_number)['artifacts']
            if not artifacts:
                raise ValueError()
        except (KeyError, ValueError):
            logger.info('No artifacts for %s/%s', job_name, build_number)
            continue

        artifacts = group_list_dict_by(artifacts, 'fileName')
        if 'appliance_version' not in artifacts:
            logger.info('appliance_version not in artifacts of %s/%s', job_name, build_number)
            continue

        build_appliance_version = download_artifact(
            jenkins_user, jenkins_token, jenkins_url, job_name, build_number,
            artifacts['appliance_version']['relativePath']).strip()

        if not build_appliance_version:
            logger.info('Appliance version unspecified for build %s', build_number)
            continue

        if Version(build_appliance_version) < Version(appliance_version):
            logger.info(
                'Build %s already has lower version (%s)', build_number, build_appliance_version)
            logger.info('Ending here')
            break

        if 'coverage-results.tgz' not in artifacts:
            logger.info('coverage-results.tgz not in artifacts of %s/%s', job_name, build_number)
            continue

        if not check_artifact(
                jenkins_user, jenkins_token, jenkins_url, job_name, build_number,
                artifacts['coverage-results.tgz']['relativePath']):
            logger.info('Coverage archive not possible to be downloaded, skipping')
            continue

        if build_appliance_version == appliance_version:
            logger.info('Build %s was found to contain what is needed', build_number)
            eligible_build_numbers.add(build_number)
        else:
            logger.info(
                'Skipping build %s because it does not have correct version (%s)',
                build_number,
                build_appliance_version)

    if not eligible_build_numbers:
        raise Exception(
            'Could not find any coverage reports for {} in {}'.format(appliance_version, job_name))

    # Stop the evm service, not needed at all
    logger.info('Stopping evmserverd')
    appliance.evmserverd.stop()
    # Install the coverage tools on the appliance
    logger.info('Installing simplecov')
    appliance.coverage._install_simplecov()
    # Upload the merger
    logger.info('Installing coverage merger')
    appliance.coverage._upload_coverage_merger()
    eligible_build_numbers = sorted(eligible_build_numbers)
    with appliance.ssh_client as ssh:
        if not ssh.run_command('mkdir -p {}'.format(coverage_dir)):
            raise Exception(
                'Could not create coverage directory on the appliance: {}'.format(coverage_dir))

        # Download and extract all the coverage data
        for build_number in eligible_build_numbers:
            logger.info('Downloading the coverage data from build %s', build_number)
            download_url = jenkins_artifact_url(
                jenkins_user, jenkins_token, jenkins_url, job_name, build_number,
                artifacts['coverage-results.tgz']['relativePath'])
            cmd = ssh.run_command('curl -k -o {}/tmp.tgz {}'.format(
                coverage_dir,
                quote(download_url)))
            if cmd.failed:
                raise Exception('Could not download! - {}'.format(str(cmd)))

            # Extract coverage data
            logger.info('Extracting the coverage data from build %s', build_number)
            extract_command = ' && '.join([
                'cd {}'.format(coverage_dir),
                'tar xf tmp.tgz --strip-components=1',
                'rm -f tmp.tgz'])
            cmd = ssh.run_command(extract_command)
            if cmd.failed:
                raise Exception('Could not extract! - {}'.format(str(cmd)))

        merge_coverage_data(
            ssh=ssh,
            coverage_dir=coverage_dir)
        pull_merged_coverage_data(
            ssh=ssh,
            coverage_dir=coverage_dir)
        sonar_scan(
            ssh=ssh,
            project_version=appliance.version,
            scanner_url=sonar_scanner_url,
            scanner_dir=scanner_dir,
            server_url=sonar_server_url,
            timeout=scan_timeout)
def main(appliance, jenkins_url, jenkins_user, jenkins_token, job_name):
    if not jenkins_user or not jenkins_token:
        try:
            from cfme.utils import conf
            jenkins_user = conf.credentials.jenkins_app.user
            jenkins_token = conf.credentials.jenkins_app.token
        except (AttributeError, KeyError):
            raise ValueError(
                '--jenkins-user and --jenkins-token not provided and credentials yaml does not '
                'contain the jenkins_app entry with user and token')
    appliance_version = str(appliance.version).strip()
    logger.info('Looking for appliance version %s in %s', appliance_version,
                job_name)
    client = jenkins.Jenkins(jenkins_url,
                             username=jenkins_user,
                             password=jenkins_token)
    build_numbers = get_build_numbers(client, job_name)
    if not build_numbers:
        raise Exception('No builds for job {}'.format(job_name))

    # Find the builds with appliance version
    eligible_build_numbers = set()
    for build_number in build_numbers:
        try:
            artifacts = client.get_build_info(job_name,
                                              build_number)['artifacts']
            if not artifacts:
                raise ValueError()
        except (KeyError, ValueError):
            logger.info('No artifacts for %s/%s', job_name, build_number)
            continue

        artifacts = group_list_dict_by(artifacts, 'fileName')
        if 'appliance_version' not in artifacts:
            logger.info('appliance_version not in artifacts of %s/%s',
                        job_name, build_number)
            continue

        build_appliance_version = download_artifact(
            jenkins_user, jenkins_token, jenkins_url, job_name, build_number,
            artifacts['appliance_version']['relativePath']).strip()

        if not build_appliance_version:
            logger.info('Appliance version unspecified for build %s',
                        build_number)
            continue

        if Version(build_appliance_version) < Version(appliance_version):
            logger.info('Build %s already has lower version (%s)',
                        build_number, build_appliance_version)
            logger.info('Ending here')
            break

        if 'coverage-results.tgz' not in artifacts:
            logger.info('coverage-results.tgz not in artifacts of %s/%s',
                        job_name, build_number)
            continue

        if not check_artifact(
                jenkins_user, jenkins_token, jenkins_url, job_name,
                build_number,
                artifacts['coverage-results.tgz']['relativePath']):
            logger.info(
                'Coverage archive not possible to be downloaded, skipping')
            continue

        if build_appliance_version == appliance_version:
            logger.info('Build %s was found to contain what is needed',
                        build_number)
            eligible_build_numbers.add(build_number)
        else:
            logger.info(
                'Skipping build %s because it does not have correct version (%s)',
                build_number, build_appliance_version)

    if not eligible_build_numbers:
        raise Exception(
            'Could not find any coverage reports for {} in {}'.format(
                appliance_version, job_name))

    eligible_build_numbers = sorted(eligible_build_numbers)

    # Stop the evm service, not needed at all
    logger.info('Stopping evmserverd')
    appliance.evmserverd.stop()
    # Install the coverage tools on the appliance
    logger.info('Installing simplecov')
    appliance.coverage._install_simplecov()
    # Upload the merger
    logger.info('Installing coverage merger')
    appliance.coverage._upload_coverage_merger()
    with appliance.ssh_client as ssh:
        if not ssh.run_command('mkdir -p {}'.format(coverage_dir)):
            raise Exception(
                'Could not create coverage directory on the appliance: {}'.
                format(coverage_dir))

        # Download and extract all the coverage data
        for build_number in eligible_build_numbers:
            logger.info('Downloading the coverage data from build %s',
                        build_number)
            download_url = jenkins_artifact_url(
                jenkins_user, jenkins_token, jenkins_url, job_name,
                build_number,
                artifacts['coverage-results.tgz']['relativePath'])
            cmd = ssh.run_command('curl -k -o {}/tmp.tgz {}'.format(
                coverage_dir, quote(download_url)))
            if cmd.failed:
                raise Exception('Could not download! - {}'.format(str(cmd)))

            # Extract coverage data
            logger.info('Extracting the coverage data from build %s',
                        build_number)
            extract_command = ' && '.join([
                'cd {}'.format(coverage_dir),
                'tar xf tmp.tgz --strip-components=1', 'rm -f tmp.tgz'
            ])
            cmd = ssh.run_command(extract_command)
            if cmd.failed:
                raise Exception('Could not extract! - {}'.format(str(cmd)))

        merge_coverage_data(ssh=ssh, coverage_dir=coverage_dir)
        pull_merged_coverage_data(ssh=ssh, coverage_dir=coverage_dir)
        sonar_scan(ssh=ssh,
                   project_version=appliance.version,
                   scanner_url=sonar_scanner_url,
                   scanner_dir=scanner_dir,
                   server_url=sonar_server_url,
                   timeout=scan_timeout)
Пример #16
0
    def run_command(
            self, command, timeout=RUNCMD_TIMEOUT, reraise=False, ensure_host=False,
            ensure_user=False):
        """Run a command over SSH.

        Args:
            command: The command. Supports taking dicts as version picking.
            timeout: Timeout after which the command execution fails.
            reraise: Does not muffle the paramiko exceptions in the log.
            ensure_host: Ensure that the command is run on the machine with the IP given, not any
                container or such that we might be using by default.
            ensure_user: Ensure that the command is run as the user we logged in, so in case we are
                not root, setting this to True will prevent from running sudo.

        Returns:
            A :py:class:`SSHResult` instance.
        """
        if isinstance(command, dict):
            command = version.pick(command, active_version=self.vmdb_version)
        original_command = command
        uses_sudo = False
        logger.info("Running command %r", command)
        if self.is_pod and not ensure_host:
            # This command will be executed in the context of the host provider
            command_to_run = 'source /etc/default/evm; ' + command
            oc_cmd = 'oc exec --namespace={proj} {pod} -- bash -c {cmd}'.format(
                proj=self._project, pod=self._container, cmd=quote(command_to_run))
            command = oc_cmd
            ensure_host = True
        elif self.is_container and not ensure_host:
            command = 'docker exec {} bash -c {}'.format(self._container, quote(
                'source /etc/default/evm; ' + command))

        if self.username != 'root' and not ensure_user:
            # We need sudo
            command = 'sudo -i bash -c {command}'.format(command=quote(command))
            uses_sudo = True

        if command != original_command:
            logger.info("> Actually running command %r", command)
        command += '\n'

        output = []
        try:
            session = self.get_transport().open_session()
            if uses_sudo:
                # We need a pseudo-tty for sudo
                session.get_pty()
            if timeout:
                session.settimeout(float(timeout))

            session.exec_command(command)
            stdout = session.makefile()
            stderr = session.makefile_stderr()

            def write_output(line, file):
                output.append(line)
                if self._streaming:
                    file.write(line)

            while True:
                if session.exit_status_ready():
                    break

                # While the program is running loop through collecting line by line so that we don't
                # fill the buffers up without a newline.   Also, note that for long running programs if
                # we try to read output when there is none (and in the case of stderr may never be any)
                # we run the risk of blocking so long that the write buffer on the remote side will fill
                # and the remote program will block on a write.   The blocking on our side occurs in
                # paramiko's buffered_pipe.py's read() call, which will block if its internal buffer is
                # is empty.
                if session.recv_ready():
                    try:
                        line = stdout.next()
                        write_output(line, self.f_stdout)
                    except StopIteration:
                        pass

                if session.recv_stderr_ready():
                    try:
                        line = stderr.next()
                        write_output(line, self.f_stderr)
                    except StopIteration:
                        pass

            # When the program finishes, we need to grab the rest of the output that is left.
            # Also, we don't have the issue of blocking reads because since the command is
            # finished, any pending reads of SSH encrypted data will finish shortly and put in
            # the buffer or for an empty file EOF will be reached as it will be closed.
            for line in stdout:
                write_output(line, self.f_stdout)
            for line in stderr:
                write_output(line, self.f_stderr)

            exit_status = session.recv_exit_status()
            if exit_status != 0:
                logger.warning('Exit code %d!', exit_status)
            return SSHResult(exit_status, ''.join(output))
        except paramiko.SSHException:
            if reraise:
                raise
            else:
                logger.exception('Exception happened during SSH call')
        except socket.timeout:
            logger.exception(
                "Command %r timed out. Output before it failed was:\n%r",
                command,
                ''.join(output))
            raise

        # Returning two things so tuple unpacking the return works even if the ssh client fails
        # Return whatever we have in the output
        return SSHResult(1, ''.join(output))
Пример #17
0
    def run_command(
            self, command, timeout=RUNCMD_TIMEOUT, reraise=False, ensure_host=False,
            ensure_user=False):
        """Run a command over SSH.

        Args:
            command: The command. Supports taking dicts as version picking.
            timeout: Timeout after which the command execution fails.
            reraise: Does not muffle the paramiko exceptions in the log.
            ensure_host: Ensure that the command is run on the machine with the IP given, not any
                container or such that we might be using by default.
            ensure_user: Ensure that the command is run as the user we logged in, so in case we are
                not root, setting this to True will prevent from running sudo.

        Returns:
            A :py:class:`SSHResult` instance.
        """
        if isinstance(command, dict):
            command = version.pick(command, active_version=self.vmdb_version)
        original_command = command
        uses_sudo = False
        logger.info("Running command %r", command)
        if self.is_pod and not ensure_host:
            # This command will be executed in the context of the host provider
            command_to_run = 'source /etc/default/evm; ' + command
            oc_cmd = 'oc exec --namespace={proj} {pod} -- bash -c {cmd}'.format(
                proj=self._project, pod=self._container, cmd=quote(command_to_run))
            command = oc_cmd
            ensure_host = True
        elif self.is_container and not ensure_host:
            command = 'docker exec {} bash -c {}'.format(self._container, quote(
                'source /etc/default/evm; ' + command))

        if self.username != 'root' and not ensure_user:
            # We need sudo
            command = 'sudo -i bash -c {command}'.format(command=quote(command))
            uses_sudo = True

        if command != original_command:
            logger.info("> Actually running command %r", command)
        command += '\n'

        output = []
        try:
            session = self.get_transport().open_session()
            if uses_sudo:
                # We need a pseudo-tty for sudo
                session.get_pty()
            if timeout:
                session.settimeout(float(timeout))

            session.exec_command(command)
            stdout = session.makefile()
            stderr = session.makefile_stderr()
            while True:
                if session.recv_ready:
                    for line in stdout:
                        output.append(line)
                        if self._streaming:
                            self.f_stdout.write(line)

                if session.recv_stderr_ready:
                    for line in stderr:
                        output.append(line)
                        if self._streaming:
                            self.f_stderr.write(line)

                if session.exit_status_ready():
                    break
            exit_status = session.recv_exit_status()
            return SSHResult(exit_status, ''.join(output))
        except paramiko.SSHException:
            if reraise:
                raise
            else:
                logger.exception('Exception happened during SSH call')
        except socket.timeout:
            logger.exception(
                "Command %r timed out. Output before it failed was:\n%r",
                command,
                ''.join(output))
            raise

        # Returning two things so tuple unpacking the return works even if the ssh client fails
        # Return whatever we have in the output
        return SSHResult(1, ''.join(output))
Пример #18
0
    def _run_command(self,
                     command,
                     timeout=RUNCMD_TIMEOUT,
                     ensure_host=False,
                     ensure_user=False,
                     container=None):
        if isinstance(command, dict):
            command = VersionPicker(command).pick(self.vmdb_version)
        original_command = command
        uses_sudo = False
        logger.info("Running command %r", command)
        container = container or self._container
        if self.is_pod and not ensure_host:
            # This command will be executed in the context of the host provider
            command_to_run = '[[ -f /etc/default/evm ]] && source /etc/default/evm; ' + command
            oc_cmd = 'oc exec --namespace={proj} {pod} -- bash -c {cmd}'.format(
                proj=self._project, pod=container, cmd=quote(command_to_run))
            command = oc_cmd
            ensure_host = True
        elif self.is_container and not ensure_host:
            command = 'docker exec {} bash -c {}'.format(
                container, quote('source /etc/default/evm; ' + command))

        if self.username != 'root' and not ensure_user:
            # We need sudo
            command = 'sudo -i bash -c {command}'.format(
                command=quote(command))
            uses_sudo = True

        if command != original_command:
            logger.info("> Actually running command %r", command)
        command += '\n'

        output = []
        try:
            session = self.get_transport().open_session()
            if uses_sudo:
                # We need a pseudo-tty for sudo
                session.get_pty()
            if timeout:
                session.settimeout(float(timeout))

            session.exec_command(command)
            stdout = session.makefile()
            stderr = session.makefile_stderr()

            def write_output(line, file):
                output.append(line)
                if self._streaming:
                    file.write(line)

            while True:
                if session.exit_status_ready():
                    break
                no_data = 0
                # While the program is running loop through collecting line by line so that we don't
                # fill the buffers up without a newline.
                # Also, note that for long running programs if we try to read output when there
                # is none (and in the case of stderr may never be any)
                # we risk blocking so long that the write buffer on the remote side will fill
                # and the remote program will block on a write.
                # The blocking on our side occurs in paramiko's buffered_pipe.py's read() call,
                # which will block if its internal buffer is empty.
                if session.recv_ready():
                    try:
                        line = next(stdout)
                        write_output(line, self.f_stdout)
                    except StopIteration:
                        pass
                else:
                    no_data += 1

                if session.recv_stderr_ready():
                    try:
                        line = next(stderr)
                        write_output(line, self.f_stderr)
                    except StopIteration:
                        pass
                else:
                    no_data += 1

                if no_data == 2:
                    gevent.sleep(0.01)

            # When the program finishes, we need to grab the rest of the output that is left.
            # Also, we don't have the issue of blocking reads because since the command is
            # finished, any pending reads of SSH encrypted data will finish shortly and put in
            # the buffer or for an empty file EOF will be reached as it will be closed.
            for line in stdout:
                write_output(line, self.f_stdout)
            for line in stderr:
                write_output(line, self.f_stderr)

            exit_status = session.recv_exit_status()
            if exit_status != 0:
                logger.warning('Exit code %d!', exit_status)
            return SSHResult(rc=exit_status,
                             output=''.join(output),
                             command=command)
        except socket.timeout:
            logger.exception(
                "Command %r timed out. Output before it failed was:\n%r",
                command, ''.join(output))
            raise

        # Returning two things so tuple unpacking the return works even if the ssh client fails
        # Return whatever we have in the output
        return SSHResult(rc=1, output=''.join(output), command=command)
Пример #19
0
def main(appliance, jenkins_url, jenkins_user, jenkins_token, job_name):
    if not jenkins_user or not jenkins_token:
        try:
            from cfme.utils import conf
            jenkins_user = conf.credentials.jenkins_app.user
            jenkins_token = conf.credentials.jenkins_app.token
        except (AttributeError, KeyError):
            raise ValueError(
                '--jenkins-user and --jenkins-token not provided and credentials yaml does not '
                'contain the jenkins_app entry with user and token')
    appliance_version = str(appliance.version).strip()
    print('Looking for appliance version {} in {}'.format(appliance_version, job_name))
    client = jenkins.Jenkins(jenkins_url, username=jenkins_user, password=jenkins_token)
    build_numbers = get_build_numbers(client, job_name)
    if not build_numbers:
        print('No builds for job {}'.format(job_name))
        return 1

    # Find the builds with appliance version
    eligible_build_numbers = set()
    for build_number in build_numbers:
        try:
            artifacts = client.get_build_info(job_name, build_number)['artifacts']
            if not artifacts:
                raise ValueError()
        except (KeyError, ValueError):
            print('No artifacts for {}/{}'.format(job_name, build_number))
            continue

        artifacts = group_list_dict_by(artifacts, 'fileName')
        if 'appliance_version' not in artifacts:
            print('appliance_version not in artifacts of {}/{}'.format(job_name, build_number))
            continue

        build_appliance_version = download_artifact(
            jenkins_user, jenkins_token, jenkins_url, job_name, build_number,
            artifacts['appliance_version']['relativePath']).strip()

        if not build_appliance_version:
            print('Appliance version unspecified for build {}'.format(build_number))
            continue

        if Version(build_appliance_version) < Version(appliance_version):
            print(
                'Build {} already has lower version ({})'.format(
                    build_number, build_appliance_version))
            print('Ending here')
            break

        if 'coverage-results.tgz' not in artifacts:
            print('coverage-results.tgz not in artifacts of {}/{}'.format(job_name, build_number))
            continue

        if build_appliance_version == appliance_version:
            print('Build {} waas found to contain what is needed'.format(build_number))
            eligible_build_numbers.add(build_number)
        else:
            print(
                'Skipping build {} because it does not have correct version ({})'.format(
                    build_number, build_appliance_version))

    if not eligible_build_numbers:
        print(
            'Could not find any coverage reports for {} in {}'.format(
                appliance_version, job_name))
        return 2

    # Stop the evm service, not needed at all
    print('Stopping evmserverd')
    appliance.evmserverd.stop()
    # Install the coverage tools on the appliance
    print('Installing simplecov')
    appliance.coverage._install_simplecov()
    # Upload the merger
    print('Installing coverage merger')
    appliance.coverage._upload_coverage_merger()
    eligible_build_numbers = sorted(eligible_build_numbers)
    with appliance.ssh_client as ssh:
        if not ssh.run_command('mkdir -p /var/www/miq/vmdb/coverage'):
            print('Could not create /var/www/miq/vmdb/coverage on the appliance!')
            return 3
        # Download all the coverage reports
        for build_number in eligible_build_numbers:
            print('Downloading the coverage report from build {}'.format(build_number))
            download_url = jenkins_artifact_url(
                jenkins_user, jenkins_token, jenkins_url, job_name, build_number,
                'log/coverage/coverage-results.tgz')
            cmd = ssh.run_command(
                'curl -k -o /var/www/miq/vmdb/coverage/tmp.tgz {}'.format(quote(download_url)))
            if not cmd:
                print('Could not download! - {}'.format(str(cmd)))
                return 4
            print('Extracting the coverage report from build {}'.format(build_number))
            extract_command = ' && '.join([
                'cd /var/www/miq/vmdb/coverage',
                'tar xf tmp.tgz --strip-components=1',
                'rm -f tmp.tgz',
            ])
            cmd = ssh.run_command(extract_command)
            if not cmd:
                print('Could not extract! - {}'.format(str(cmd)))
                return 5

        # Now run the merger
        print('Running the merger')
        cmd = ssh.run_command(
            'cd /var/www/miq/vmdb; time bin/rails runner coverage_merger.rb',
            timeout=60 * 60)
        if not cmd:
            print('Failure running the merger - {}'.format(str(cmd)))
            return 6
        else:
            print('Coverage report generation was successful')
            print(str(cmd))
            percentage = re.search(r'LOC\s+\((\d+.\d+%)\)\s+covered\.', str(cmd))
            if percentage:
                print('COVERAGE={};'.format(percentage.groups()[0]))
            else:
                print('COVERAGE=unknown;')

        print('Packing the generated HTML')
        cmd = ssh.run_command('cd /var/www/miq/vmdb/coverage; tar cfz /tmp/merged.tgz merged')
        if not cmd:
            print('Could not compress! - {}'.format(str(cmd)))
            return 7
        print('Grabbing the generated HTML')
        ssh.get_file('/tmp/merged.tgz', log_path.strpath)
        print('Decompressing the generated HTML')
        rc = subprocess.call(
            ['tar', 'xf', log_path.join('merged.tgz').strpath, '-C', log_path.strpath])
        if rc == 0:
            print('Done!')
        else:
            print('Failure to extract')
            return 8