def _run_service_command(self, command, expected_exit_code=None): with self.appliance.ssh_client as ssh: status, output = ssh.run_command('systemctl {} {}'.format( quote(command), quote(self.unit_name))) if expected_exit_code is not None and status != expected_exit_code: # TODO: Bring back address msg = 'Failed to {} {}\nError: {}'.format(command, self.unit_name, output) self.logger.error(msg) raise SystemdException(msg) return status
def _run_service_command(self, command, expected_exit_code=None): with self.appliance.ssh_client as ssh: status, output = ssh.run_command('systemctl {} {}'.format( quote(command), quote(self.unit_name))) if expected_exit_code is not None and status != expected_exit_code: # TODO: Bring back address msg = 'Failed to {} {}\nError: {}'.format( command, self.unit_name, output) self.logger.error(msg) raise SystemdException(msg) return status
def run_command(self, command, timeout=RUNCMD_TIMEOUT, reraise=False, ensure_host=False): if isinstance(command, dict): command = version.pick(command) logger.info("Running command `%s`", command) if self.is_container and not ensure_host: command = 'docker exec {} bash -c {}'.format(self._container, quote( 'source /etc/default/evm; ' + command)) logger.info("Actually running command `%s`", command) template = '{}\n' command = template.format(command) output = '' try: session = self.get_transport().open_session() if timeout: session.settimeout(float(timeout)) session.exec_command(command) stdout = session.makefile() stderr = session.makefile_stderr() while True: if session.recv_ready: for line in stdout: output += line if self._streaming: sys.stdout.write(line) if session.recv_stderr_ready: for line in stderr: output += line if self._streaming: sys.stderr.write(line) if session.exit_status_ready(): break exit_status = session.recv_exit_status() return SSHResult(exit_status, output) except paramiko.SSHException as exc: if reraise: raise else: logger.exception(exc) except socket.timeout as e: logger.error("Command `%s` timed out.", command) logger.exception(e) logger.error("Output of the command before it failed was:\n%s", output) raise # Returning two things so tuple unpacking the return works even if the ssh client fails return SSHResult(1, None)
def _collect_reports(self): # restart evm to stop the proccesses and let the simplecov exit hook run self.ipapp.ssh_client.run_command('systemctl stop evmserverd') # collect back to the collection appliance if parallelized if store.current_appliance != self.collection_appliance: self.print_message('sending reports to {}'.format(self.collection_appliance.address)) result = self.ipapp.ssh_client.run_command( 'sshpass -p {passwd} ' 'scp -o StrictHostKeyChecking=no ' '-r /var/www/miq/vmdb/coverage/* ' '{addr}:/var/www/miq/vmdb/coverage/'.format( addr=self.collection_appliance.address, passwd=quote(self.ipapp.ssh_client._connect_kwargs['password'])), timeout=1800) if not result: self.print_message('There was an error sending reports: ' + str(result))
def run_command( self, command, timeout=RUNCMD_TIMEOUT, reraise=False, ensure_host=False, ensure_user=False): """Run a command over SSH. Args: command: The command. Supports taking dicts as version picking. timeout: Timeout after which the command execution fails. reraise: Does not muffle the paramiko exceptions in the log. ensure_host: Ensure that the command is run on the machine with the IP given, not any container or such that we might be using by default. ensure_user: Ensure that the command is run as the user we logged in, so in case we are not root, setting this to True will prevent from running sudo. Returns: A :py:class:`SSHResult` instance. """ if isinstance(command, dict): command = version.pick(command) logger.info("Parsing command `{command}`".format(command=command)) uses_sudo = False if self.is_container and not ensure_host: command = 'docker exec {} bash -c {}'.format(self._container, quote( 'source /etc/default/evm; ' + command)) if self.username != 'root' and not ensure_user: # We need sudo command = 'sudo -i bash -c {command}'.format(command=quote(command)) uses_sudo = True logger.info("Running command `{command}`".format(command=command)) command += '\n' output = [] try: session = self.get_transport().open_session() if uses_sudo: # We need a pseudo-tty for sudo session.get_pty() if timeout: session.settimeout(float(timeout)) session.exec_command(command) stdout = session.makefile() stderr = session.makefile_stderr() while True: if session.recv_ready: for line in stdout: output.append(line) if self._streaming: sys.stdout.write(line) if session.recv_stderr_ready: for line in stderr: output.append(line) if self._streaming: sys.stderr.write(line) if session.exit_status_ready(): break exit_status = session.recv_exit_status() return SSHResult(exit_status, ''.join(output)) except paramiko.SSHException as exc: if reraise: raise else: logger.exception(exc) except socket.timeout as e: logger.error("Command `{command}` timed out.".format(command=command)) logger.exception(e) logger.error("Output of the command before it failed was:\n{output}".format( output=''.join(output))) raise # Returning two things so tuple unpacking the return works even if the ssh client fails return SSHResult(1, None)
def run_command( self, command, timeout=RUNCMD_TIMEOUT, reraise=False, ensure_host=False, ensure_user=False): """Run a command over SSH. Args: command: The command. Supports taking dicts as version picking. timeout: Timeout after which the command execution fails. reraise: Does not muffle the paramiko exceptions in the log. ensure_host: Ensure that the command is run on the machine with the IP given, not any container or such that we might be using by default. ensure_user: Ensure that the command is run as the user we logged in, so in case we are not root, setting this to True will prevent from running sudo. Returns: A :py:class:`SSHResult` instance. """ if isinstance(command, dict): command = version.pick(command) original_command = command uses_sudo = False logger.info("Running command %r", command) if self.is_pod and not ensure_host: # This command will be executed in the context of the host provider command = 'oc rsh {} bash -c {}'.format(self._container, quote( 'source /etc/default/evm; ' + command)) ensure_host = True elif self.is_container and not ensure_host: command = 'docker exec {} bash -c {}'.format(self._container, quote( 'source /etc/default/evm; ' + command)) if self.username != 'root' and not ensure_user: # We need sudo command = 'sudo -i bash -c {command}'.format(command=quote(command)) uses_sudo = True if command != original_command: logger.info("> Actually running command %r", command) command += '\n' output = [] try: session = self.get_transport().open_session() if uses_sudo: # We need a pseudo-tty for sudo session.get_pty() if timeout: session.settimeout(float(timeout)) session.exec_command(command) stdout = session.makefile() stderr = session.makefile_stderr() while True: if session.recv_ready: for line in stdout: output.append(line) if self._streaming: self.f_stdout.write(line) if session.recv_stderr_ready: for line in stderr: output.append(line) if self._streaming: self.f_stderr.write(line) if session.exit_status_ready(): break exit_status = session.recv_exit_status() return SSHResult(exit_status, ''.join(output)) except paramiko.SSHException: if reraise: raise else: logger.exception('Exception happened during SSH call') except socket.timeout: logger.exception( "Command %r timed out. Output before it failed was:\n%r", command, ''.join(output)) raise # Returning two things so tuple unpacking the return works even if the ssh client fails # Return whatever we have in the output return SSHResult(1, ''.join(output))
def main(appliance, jenkins_url, jenkins_user, jenkins_token, job_name): appliance_version = str(appliance.version).strip() print('Looking for appliance version {} in {}'.format( appliance_version, job_name)) client = jenkins.Jenkins(jenkins_url, username=jenkins_user, password=jenkins_token) build_numbers = get_build_numbers(client, job_name) if not build_numbers: print('No builds for job {}'.format(job_name)) return 1 # Find the builds with appliance version eligible_build_numbers = set() for build_number in build_numbers: try: artifacts = client.get_build_info(job_name, build_number)['artifacts'] if not artifacts: raise ValueError() except (KeyError, ValueError): print('No artifacts for {}/{}'.format(job_name, build_number)) continue artifacts = group_list_dict_by(artifacts, 'fileName') if 'appliance_version' not in artifacts: print('appliance_version not in artifacts of {}/{}'.format( job_name, build_number)) continue build_appliance_version = download_artifact( jenkins_user, jenkins_token, jenkins_url, job_name, build_number, artifacts['appliance_version']['relativePath']).strip() if Version(build_appliance_version) < Version(appliance_version): print('Build {} already has lower version ({})'.format( build_number, build_appliance_version)) print('Ending here') break if 'coverage-results.tgz' not in artifacts: print('coverage-results.tgz not in artifacts of {}/{}'.format( job_name, build_number)) continue if build_appliance_version == appliance_version: print('Build {} waas found to contain what is needed'.format( build_number)) eligible_build_numbers.add(build_number) else: print( 'Skipping build {} because it does not have correct version ({})' .format(build_number, build_appliance_version)) if not eligible_build_numbers: print('Could not find coverage reports for {} in {}'.format( appliance_version, job_name)) return 2 # Stop the evm service, not needed at all print('Stopping evmserverd') appliance.evmserverd.stop() # Install the coverage tools on the appliance print('Installing simplecov') appliance.coverage._install_simplecov() # Upload the merger print('Installing coverage merger') appliance.coverage._upload_coverage_merger() with appliance.ssh_client as ssh: if not ssh.run_command('mkdir -p /var/www/miq/vmdb/coverage'): print( 'Could not create /var/www/miq/vmdb/coverage on the appliance!' ) return 3 # Download all the coverage reports for build_number in eligible_build_numbers: print('Downloading the coverage report from build {}'.format( build_number)) download_url = jenkins_artifact_url( jenkins_user, jenkins_token, jenkins_url, job_name, build_number, 'log/coverage/coverage-results.tgz') cmd = ssh.run_command( 'curl -k -o /var/www/miq/vmdb/coverage/tmp.tgz {}'.format( quote(download_url))) if not cmd: print('Could not download! - {}'.format(str(cmd))) return 4 print('Extracting the coverage report from build {}'.format( build_number)) extract_command = ' && '.join([ 'cd /var/www/miq/vmdb/coverage', 'tar xf tmp.tgz --strip-components=1', 'rm -f tmp.tgz', ]) cmd = ssh.run_command(extract_command) if not cmd: print('Could not extract! - {}'.format(str(cmd))) return 5 # Now run the merger print('Running the merger') cmd = ssh.run_command( 'cd /var/www/miq/vmdb; time bin/rails runner coverage_merger.rb') if not cmd: print('Failure running the merger - {}'.format(str(cmd))) return 6 else: print('Coverage report generation was successful') print(str(cmd)) print('Packing the generated HTML') cmd = ssh.run_command( 'cd /var/www/miq/vmdb/coverage; tar cfz /tmp/merged.tgz merged') if not cmd: print('Could not compress! - {}'.format(str(cmd))) return 7 print('Grabbing the generated HTML') ssh.get_file('/tmp/merged.tgz', log_path.strpath) print('Decompressing the generated HTML') rc = subprocess.call([ 'tar', 'xf', log_path.join('merged.tgz').strpath, '-C', log_path.strpath ]) if rc == 0: print('Done!') else: print('Failure to extract') return 8