def do_version(self, line): """ \rDisplay version information about modules """ args = [self.config['ipvsadm'], '--version'] ipvsadm = utils.check_output(args) header = ["", "Linux Virtual Server", "===================="] print '\n'.join(header) print ipvsadm print header = ["Director", "========"] print '\n'.join(header) if not self.config['director_bin'] : director = 'director binary not defined. Unable to get version!' else: args = [self.config['director_bin'], '--version'] director = utils.check_output(args) print director print args = [self.config['iptables'], '--version'] iptables = utils.check_output(args) header = ["Packet Filtering", "================"] print '\n'.join(header) print iptables print
def filesync_nodes(self, op, filename): """ Sync a file between nodes in the cluste. op has to be one of 'remove' or 'copy'. filename is the name of the file to be copied/removed The method return True/False """ if self.nodes is not None: for node in self.nodes: if node != self.hostname: # Assumption is we only need to remotely remove a file # Or copy a file to a remote location if op == 'remove': args = ['ssh', node, 'rm', filename] elif op == 'copy': remote = node + ":" + filename args = ['scp', filename, remote] else: logger.error('Unknown operation \'%s\' in filesync method!' % op) return False logger.debug('Running command : %s' % (' '.join(args))) try: utils.check_output(args) except OSError as e: logger.error("Unable to sync state file to %s" % node) logger.error(e) return False except subprocess.CalledProcessError as e: logger.error("Unable to sync state file to %s" % node) logger.error(e) return False return True
def _testArgsException(self, args): try: check_output(args) self.fail() except CalledProcessError as e: self.assertTrue('returned non-zero exit status' in str(e)) self.assertTrue('Usage: ' in e.stderr)
def _runArgsTest(self, args): try: check_output(list_extend([SERVER_APP], args)) self.fail() except CalledProcessError as e: self.assertTrue('returned non-zero exit status' in str(e)) self.assertTrue('Usage: ' in e.stderr)
def test_bad(self): """Test wrong arguments to gather""" for args in ([], ['x'] * 4): out = utils.check_output(['cryptosite', 'gather'] + args, stderr=subprocess.STDOUT, retcode=2) out = utils.check_output(['python', '-m', 'cryptosite.gather'] + args, stderr=subprocess.STDOUT, retcode=2)
def test_bad(self): """Test wrong arguments to predict""" for args in (['x', 'y'], []): out = utils.check_output(['cryptosite', 'predict'] + args, stderr=subprocess.STDOUT, retcode=2) out = utils.check_output(['python', '-m', 'cryptosite.predict'] + args, stderr=subprocess.STDOUT, retcode=2)
def test_bad(self): """Test wrong arguments to am_bmi""" for args in (['x'],): out = utils.check_output(['cryptosite', 'am_bmi'] + args, stderr=subprocess.STDOUT, retcode=2) out = utils.check_output(['python', '-m', 'cryptosite.am_bmi'] + args, stderr=subprocess.STDOUT, retcode=2)
def _rewrite_local_git_urls(buildconfig): ''' Prefix all git repository urls in buildconfig that start with a slash (they reference local files) with a prefix of the local machine/user. ''' hostname = utils.check_output(['hostname', '-f']).strip() user = utils.check_output(['whoami']).strip() for name, config in buildconfig.items(): if not isinstance(config, dict): continue url = config.get('url') if url is not None and url.startswith('/'): config['url'] = '%s@%s:%s' % (user, hostname, url)
def commit(self, filename): try: cmd = ['dirname', filename] wd = utils.check_output(cmd, silent=True).rstrip('\n') cmd = ['basename', filename] name = utils.check_output(cmd, silent=True).rstrip('\n') args = ['git', 'commit', name] logger.info("Running command: %s" % " ".join(args)) subprocess.call(args, cwd=wd) except (OSError, subprocess.CalledProcessError) as e: logger.error(e)
def stop(config): """Stop a running Jaguar service on the local host. Examples: $ jaguar service stop """ statusCmd = 'jps -l | grep {0}'.format(JAGUAR_CLASSNAME) command = ['bash', '-c', statusCmd] debug ("Executing: {0}".format(command)) try: output = check_output(command, stderr=subprocess.STDOUT) pid = int(output.split()[0]) r = 0 while pid_exists(pid) and r < 10: kill(pid, signal.SIGKILL) r += 1 time.sleep(1) if r == 10: click.echo("Failed to stop Jaguar service.") else: click.echo("Jaguar service has been stopped successfully.") except subprocess.CalledProcessError: click.echo('No Java process {0} is found.'.format(JAGUAR_CLASSNAME), err=True) except ValueError: click.echo('Failed to convert {0} into integer.'.format(pid), err=True)
def get_real(self, protocol): """return a list of all real servers. Used for autocomplete mode in the shell.""" args = [self.ipvsadm, '-L'] result = list() prot = '' try: output = utils.check_output(args, silent=True) except OSError as e: logger.error(" %s" % e.strerror) return result lines = output.splitlines() for line in lines[3:]: if line[0:3] in ['TCP', 'UDP', 'FWM']: prot = line[0:3] elif (line.startswith(" ->") and (not protocol or protocol.upper() == prot)): r, sep , temp = line.partition(':') real = r[5:] if real not in result: result.append(real) return result
def salt_api_user(request, env): user = env['CLIENT_USER'] password_salt = '00' password = crypt.crypt(env['CLIENT_PASSWORD'], password_salt) cmd = "useradd {0} -p '{1}'".format(user, password) output = check_output(shlex.split(cmd)) request.addfinalizer(partial(delete_salt_api_user, env['CLIENT_USER'])) return output
def check_dependency(binary, expected_version): # Since flow v0.18.1 `--version` was deprecated in favor a `version` command cmd = [binary, '--version'] if binary != 'flow' else [binary, 'version'] actual_version = utils.check_output(cmd).rstrip() if actual_version != expected_version: raise Exception(('Incorrect %s version. Found %s, expected %s. ' + 'Use the --no-version option to ignore this test.') % (binary, actual_version, expected_version))
def build_ipvs(self): """Build a model fo the running ipvsadm table internally""" args = [self.ipvsadm, '-L', '-n'] try: output = utils.check_output(args) except OSError as e: logger.error("Problem with ipvsadm - %s" % e.strerror) return False except subprocess.CalledProcessErrror as e: logger.error("Problem with ipvsadm - %s" % e.output) return False # Clear out the old virtual table self.virtuals = list() # Break up the output and generate VIP and RIPs from it # Assumption is that the first 3 lines of the ipvsadm output # are just informational so we skip them for line in output.split('\n')[3:]: if (line.startswith('TCP') or line.startswith('UDP') or line.startswith('FWM')): # break the virtual line into tokens. There should only be 3 tokens = line.split() # first one is the protocol proto = tokens[0] if line.startswith('FWM'): # there's no port number in fwm mode ip = tokens[1] port = '' else: # second token will be ip:port ip, sep, port = tokens[1].rpartition(':') # 3rd is the scheduler sched = tokens[2] # [optional] 5th is the persistence timeout if len(tokens) == 5: persistence = tokens[4] else: persistence = None v = Virtual(proto, ip, port, sched, persistence) self.virtuals.append(v) # If the line doesn't begin with the above values, it is realserver else: # The reals are always added to the last vip if len(self.virtuals) > 0: tokens = line.split() if len(tokens) == 6: ip, sep, port = tokens[1].rpartition(':') method = tokens[2] weight = tokens[3] active = tokens[4] inactive = tokens[5] v = self.virtuals[-1] r = Real(ip, port, weight, method, active, inactive) v.realServers.append(r)
def _generate_iptables_rules(self): rules = [] try: routes_output = utils.check_output(['route', '-n'], stderr=subprocess.PIPE) except subprocess.CalledProcessError: logger.exception('Failed to get IP routes. %r' % { 'server_id': self.id, }) raise routes = {} for line in routes_output.splitlines(): line_split = line.split() if len(line_split) < 8 or not re.match(IP_REGEX, line_split[0]): continue routes[line_split[0]] = line_split[7] if '0.0.0.0' not in routes: raise IptablesError('Failed to find default network interface', { 'server_id': self.id, }) default_interface = routes['0.0.0.0'] rules.append(['INPUT', '-i', self.interface, '-j', 'ACCEPT']) rules.append(['FORWARD', '-i', self.interface, '-j', 'ACCEPT']) interfaces = set() for network_address in self.local_networks or ['0.0.0.0/0']: args = ['POSTROUTING', '-t', 'nat'] network = self._parse_network(network_address)[0] if network not in routes: logger.debug('Failed to find interface for local network ' + \ 'route, using default route. %r' % { 'server_id': self.id, }) interface = default_interface else: interface = routes[network] interfaces.add(interface) if network != '0.0.0.0': args += ['-d', network_address] args += ['-s', self.network, '-o', interface, '-j', 'MASQUERADE'] rules.append(args) for interface in interfaces: rules.append(['FORWARD', '-i', interface, '-o', self.interface, '-m', 'state', '--state', 'ESTABLISHED,RELATED', '-j', 'ACCEPT']) rules.append(['FORWARD', '-i', self.interface, '-o', interface, '-m', 'state', '--state', 'ESTABLISHED,RELATED', '-j', 'ACCEPT']) return rules
def check_dependency(cmd, expected_output): try: actual_output = utils.check_output(cmd).rstrip() except OSError as e: raise Exception('Error while running %s: %s' % (' '.join(cmd), str(e))) if actual_output != expected_output: raise Exception(('Ran "%s" and found %s but expected %s. ' + 'Use the --no-version option to ignore this test.') % (' '.join(cmd), actual_output, expected_output))
def process_data_url(url): downloaded_file = url.split('/')[-1] unzipped_file = os.path.splitext(downloaded_file)[0] ascii_file = '%s.ascii' % unzipped_file # Download file wget_cmd = '%s %s' % (WGET, url) logging.getLogger(__name__).debug(wget_cmd) try: utils.check_output(wget_cmd) except Exception as e: logging.getLogger(__name__).warn(e) return (url, False) # unzip file unzip_cmd = '%s %s' % (BUNZIP, downloaded_file) logging.getLogger(__name__).debug(unzip_cmd) try: utils.check_output(unzip_cmd) except Exception as e: logging.getLogger(__name__).error(e) return (url, False) # convert to ascii if args.ascii and os.path.isfile(BGPDUMP): ascii_cmd = '%s %s > %s' % (BGPDUMP, unzipped_file, ascii_file) logging.getLogger(__name__).debug(ascii_cmd) try: utils.check_output(ascii_cmd) os.remove(unzipped_file) except Exception as e: logging.getLogger(__name__).error(e) return (url, False) return (url, True)
def modified(self, filename): """Verifies that a file was modified. Returns True if it was""" try: cmd = ['dirname', filename] wd = utils.check_output(cmd, silent=True).rstrip('\n') cmd = ['basename', filename] name = utils.check_output(cmd, silent=True).rstrip('\n') args = ['git', 'status', '-s', name] stdout = utils.check_output(args, cwd=wd) except (OSError, subprocess.CalledProcessError) as e: logger.error(e) return False output = stdout.strip(' \n') if output and output.startswith('M'): logger.debug('%s was modified' % filename) return True else: return False
def test_get_eslintable_files(self): actual = PackageManager().get_eslintable_files() actual.sort() expected = utils.check_output([ 'node', '-p', ''' require("eslint/lib/util/glob-util") .listFilesToProcess(["**/*.js"]) .map(f => f.filename) .join("\\n") ''' ], cwd=NUCLIDE_PATH).strip().split('\n') expected.sort() self.assertEqual(actual, expected)
def modified(self, filename): """Check the status of the file and if modified return True""" # prepare the svn command cmd = ['svn', 'status', filename] # call the svn command try: logger.info("Running the command: %s" % " ".join(cmd)) ret = utils.check_output(cmd) if ret and ret.startswith('M'): return True except IOError as e: logger.error(e) except subprocess.CalledProcessError as e: logger.error(e.output) return False
def get_virtual(self, protocol): """return a list of the virtual servers by protocol. Used for autocomplete mode in the shell. """ args = [self.ipvsadm, '-L'] result = list() try: output = utils.check_output(args, silent=True) except OSError as e: logger.error(" %s" % e.strerror) return result lines = output.splitlines() for line in lines: if line.startswith(protocol.upper()): r, sep, temp = line.partition(':') result.append(r[5:]) return result
def svn_sync(self, filename, username, password): """Commit changed configs to svn and do update on remote node.""" # commit config locally args = ['svn', 'commit', '--username', username, '--password', password, filename] svn_cmd = ('svn commit --username ' + username + ' --password ' + password + ' ' + filename) logger.info('Running command : %s' % svn_cmd) try: result = subprocess.call(svn_cmd, shell=True) except OSError as e: logger.error("Problem with configuration sync - %s" % e.strerror) # update config on all nodes n = self.config['nodes'] if n != '': nodes = n.replace(' ', '').split(',') else: nodes = None try: hostname = utils.check_output(['hostname', '-s']) except (OSError, subprocess.CalledProcessError): hostname = '' if nodes is not None: svn_cmd = ('svn update --username ' + username + ' --password ' + password + ' ' + filename) for node in nodes: if node != hostname: args = 'ssh ' + node + ' ' + svn_cmd logger.info('Running command : %s' % (' '.join(args))) try: subprocess.call(args, shell=True) except OSError as e: logger.error("Problem with configuration sync - %s" % e.strerror)
def update(self, filename, node): """ Check the status of the file and if modified return True. Assumption is that a 'remote' named 'lvsm' is created and points to the matching git repo on each opposite node. ex. remote.lvsm.url=user@node1:/etc/lvsm/ """ try: cmd = ['dirname', filename] logger.debug('Updating %s' % filename) wd = utils.check_output(cmd, silent=True).rstrip('\n') logger.debug('working directory used by git: %s' % wd) # remote = 'lvsm' args = ['ssh', node, 'cd', wd, ';', 'git', 'pull', self.remote, self.branch] logger.info('Running command: %s' % " ".join(args)) subprocess.call(args) except (OSError, subprocess.CalledProcessError) as e: logger.error(e) return False
def _check_rev_list(revision): """Checks whether revision is reachable from HEAD of git project.""" logging.info('Checking if revision {rev} exists in {proj}'.format( rev=revision, proj=git_project_path)) try: cmd = [ 'git', '-C', path, 'rev-list', 'HEAD..{}'.format(revision) ] output = utils.check_output(cmd).strip() except subprocess.CalledProcessError as error: logging.error('Error: {}'.format(error)) return False else: if output: logging.debug( '{proj} does not have the following revisions: {rev}'. format(proj=git_project_path, rev=output)) return False else: logging.info( 'Found revision {rev} in project {proj}'.format( rev=revision, proj=git_project_path)) return True
def _make_buildscript(hook, buildconfig, keep_buildconfig=False): if not keep_buildconfig: buildconfig_file = tempfile.NamedTemporaryFile(prefix='buildconfig_', delete=False) buildconfig_filename = buildconfig_file.name else: buildconfig_filename = 'buildconfig.json' buildconfig_file = open(buildconfig_filename, 'w') with buildconfig_file: buildconfig_file.write(json.dumps(buildconfig.config, indent=2)) buildconfig_file.write('\n') buildconfig_file.close() build = utils.check_output([hook, _userdir, buildconfig.taskfilename, buildconfig_filename, buildconfig.taskname], cwd=_hooksdir) if not keep_buildconfig: os.unlink(buildconfig_filename) # Extract buildid and create buildname buildid = _extract_buildid(build) buildname = buildconfig.taskname + "_" + buildid return build, buildname
def delete_salt_api_user(username): cmd = "userdel {0}".format(username) check_output(shlex.split(cmd))
def _run(self, args=None): if args is None: args = [] return check_output(list_extend([CLIENT_APP], args), env=self.env)
def main(): """Main program loop.""" global output_dir global log_level parser = argparse.ArgumentParser(description="DocWriter Usage information") parser.add_argument( "files", nargs="+", help="list of source files to parse, wildcards are allowed", ) parser.add_argument( "-t", "--title", metavar="T", help="set project title, as in '-t \"My Project\"'", ) parser.add_argument( "-o", "--output", metavar="DIR", required=True, help="set output directory, as in '-o mydir'", ) parser.add_argument( "-p", "--prefix", metavar="PRE", help="set documentation prefix, as in '-p ft2'", ) group = parser.add_mutually_exclusive_group() group.add_argument( "-q", "--quiet", help="run quietly, show only errors", action="store_true", ) group.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true") args = parser.parse_args() # process options project_title = "Project" project_prefix = None output_dir = None if args.title: project_title = args.title if args.output: utils.output_dir = args.output if args.prefix: project_prefix = args.prefix if args.quiet: log_level = logging.ERROR if args.verbose: log_level = logging.DEBUG # set up the logger setup_logger(level=log_level) log = logging.getLogger("docwriter") # check all packages status = check.check() if status != 0: sys.exit(3) utils.check_output() # create context and processor source_processor = sources.SourceProcessor() content_processor = content.ContentProcessor() # retrieve the list of files to process file_list = utils.make_file_list(args.files) for filename in file_list: source_processor.parse_file(filename) content_processor.parse_sources(source_processor) # process sections content_processor.finish() # clean up directory log.info("Cleaning output directory") utils.clean_markdown_dir() formatter = tomarkdown.MdFormatter(content_processor, project_title, project_prefix) # build the docs utils.build_message() formatter.toc_dump() formatter.index_dump() formatter.section_dump_all()
def test_get_osarch(env, caller_client): expected = check_output(['rpm', '--eval', '%{_host_cpu}']).strip() assert caller_client.cmd('grains.get', 'osarch') == expected
def testHelp(self): out, err = check_output([SERVER_APP, '--help']) self.assertTrue('Usage: ' in out)
def test_unknown_command(self): """Check 'cryptosite' with an unknown command""" for args in (['bad-command'], ['help', 'bad-command']): out = utils.check_output(['cryptosite'] + args, retcode=1)
def deploy_oauth_reqs(): '''oauth Integration in OCP''' # Token generation for session_secret session_secret = secrets.token_hex(43) secret_name = 'grafana-proxy' if not utils.check_if_exists(k8s_object='secret', k8s_object_name=secret_name, target=deploy_options.target, namespace=deploy_options.namespace, profile=deploy_options.profile): cmd = "{} -n {} create secret generic {} --from-literal=session_secret={}"\ .format(CMD_BIN, deploy_options.namespace, secret_name, session_secret) utils.check_output(cmd) ## Create and Annotate Serviceaccount sa_name = 'grafana' if not utils.check_if_exists(k8s_object='sa', k8s_object_name=sa_name, target=deploy_options.target, namespace=deploy_options.namespace, profile=deploy_options.profile): cmd = "{} -n {} create serviceaccount {} ".format( CMD_BIN, deploy_options.namespace, sa_name) utils.check_output(cmd) json_manifest = '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"grafana"}}' annotation_name = 'serviceaccounts.openshift.io/oauth-redirectreference.grafana' cmd = "{} -n {} annotate serviceaccount {} --overwrite {}='{}'".format( CMD_BIN, deploy_options.namespace, sa_name, annotation_name, json_manifest) utils.check_output(cmd) # Get OCP Certificate if not utils.check_if_exists(k8s_object='secret', k8s_object_name='openshift-custom-ca', target=deploy_options.target, namespace=deploy_options.namespace, profile=deploy_options.profile): secret_name = 'router-certs-default' namespace = 'openshift-ingress' template = '{{index .data "tls.crt"}}' cmd = "{} get secret {} --namespace={} --template '{}'".format( CMD_BIN, secret_name, namespace, template) ca_cert = utils.check_output(cmd) # Renderized secret with CA Certificate of the OCP Cluster src_file = os.path.join(os.getcwd(),\ 'deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-custom-ca.yaml') dst_file = os.path.join(os.getcwd(),\ 'build', deploy_options.namespace, 'assisted-installer-ocp-prometheus-custom-ca.yaml') topic = 'OCP Custom CA' with open(src_file, "r") as src: with open(dst_file, "w+") as dst: data = src.read() data = data.replace("BASE64_CERT", ca_cert) data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) print("Deploying {}: {}".format(topic, dst_file)) dst.write(data) utils.apply(target=deploy_options.target, namespace=deploy_options.namespace, profile=deploy_options.profile, file=dst_file)
def main(): parser = argparse.ArgumentParser() parser.add_argument("--subsystem-test", help="deploy in subsystem mode", action="store_true") deploy_options = deployment_options.load_deployment_options(parser) utils.verify_build_directory(deploy_options.namespace) dst_file = os.path.join(os.getcwd(), 'build', deploy_options.namespace, 'deploy_ui.yaml') image_fqdn = deployment_options.get_image_override(deploy_options, "ocp-metal-ui", "UI_IMAGE") tag = deployment_options.get_tag(image_fqdn) clone_directory = os.path.join(os.getcwd(), "build/assisted-installer-ui") if not os.path.exists(clone_directory): utils.check_output( f"git clone --branch master {UI_REPOSITORY} {clone_directory}") cmd = f"cd {clone_directory} && git pull" if tag == "latest": log.warning( "No hash specified. Will run the deployment generation script from the top of master branch" ) else: cmd += f" && git reset --hard {tag}" cmd += f" && deploy/deploy_config.sh -t {clone_directory}/deploy/ocp-metal-ui-template.yaml " \ f"-i {image_fqdn} -n {deploy_options.namespace} > {dst_file}" utils.check_output(cmd) log.info("Deploying %s", dst_file) utils.apply(target=deploy_options.target, namespace=deploy_options.namespace, profile=deploy_options.profile, file=dst_file) # in case of openshift deploy ingress as well if deploy_options.target == "oc-ingress": src_file = os.path.join(os.getcwd(), 'deploy/ui/ui_ingress.yaml') dst_file = os.path.join(os.getcwd(), 'build', deploy_options.namespace, 'ui_ingress.yaml') with open(src_file, "r") as src: with open(dst_file, "w+") as dst: data = src.read() data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) data = data.replace( 'REPLACE_HOSTNAME', utils.get_service_host('assisted-installer-ui', deploy_options.target, deploy_options.domain, deploy_options.namespace, deploy_options.profile)) log.info("Deploying ingress from %s", dst_file) dst.write(data) utils.apply(target=deploy_options.target, namespace=deploy_options.namespace, profile=deploy_options.profile, file=dst_file)
def testHelp(self): out, err = check_output([APP, "--help"]) self.assertTrue("Usage: " in out)
def post(self): data = Cmd.parser.parse_args() cmd = data['cmd'] std_path = data['std_path'] output_path = data['output_path'] module = data['module'] nolog = data['nolog'] activity = { 'cmd': cmd, 'std_path': std_path, 'output_path': output_path, 'status': 'Running' } if nolog == 'False': activities = utils.reading_json(current_path + '/storages/activities.json') if activities.get(module): activities[module].append(activity) else: activities[module] = [activity] utils.just_write(current_path + '/storages/activities.json', activities, is_json=True) slack.slack_noti('log', self.options, mess={ 'title': "{0} | {1} | Execute".format( self.options['TARGET'], module), 'content': '```{0}```'.format(cmd), }) utils.print_info("Execute: {0} ".format(cmd)) stdout = execute.run(cmd) # just ignore for testing purpose # stdout = "<< stdoutput >> << {0} >>".format(cmd) utils.check_output(output_path) if nolog == 'False': # change status of log activities = utils.reading_json(current_path + '/storages/activities.json') for item in activities[module]: if item['cmd'] == cmd: if stdout is None: item['status'] = 'Error' else: item['status'] = 'Done' try: if std_path != '': utils.just_write(std_path, stdout) slack.slack_file( 'std', self.options, mess={ 'title': "{0} | {1} | std".format( self.options['TARGET'], module), 'filename': '{0}'.format(std_path), }) if output_path != '': slack.slack_file( 'verbose-report', self.options, mess={ 'channel': self.options['VERBOSE_REPORT_CHANNEL'], 'filename': output_path }) except: pass utils.just_write(current_path + '/storages/activities.json', activities, is_json=True) return jsonify(status="200", output_path=output_path)
def tess_pages(tiffs: str, file_format: str, output) -> str: fname_file = f"{tiffs[:-6]}.txt" make_tess_list(tiffs, fname_file) check_output(tesseract(fname_file, output, format=file_format)) return f"{tiffs[:-10]}*"
def burst_pdf(pdf: str) -> str: # create unique signature to ensure no name collisions signature = int(random.random() * 100000000) check_output(pdftk.burst(pdf, f"tmp{signature}_%04d.pdf")) return f"tmp{signature}*.pdf"
def user(): return check_output(['whoami']).strip()
def get_full_sha(upstream_dir: Path, short_sha: str) -> str: return check_output(['git', 'rev-parse', short_sha], cwd=upstream_dir).strip()
def test_task(self): check_output("""Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum -------------------------------- Lorem Ipsum simply dummy text printing and typesetting industry. Lorem Ipsum has been industry's standard dummy text ever since 1500s, when an unknown printer took a galley type and scrambled it to make a type specimen book. It has survived not only five centuries, but also leap into electronic typesetting, remaining essentially unchanged. It popularised in 1960s with release Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing """)
focused_window = get_focused_window() print_con(c) # print_con(c.nodes[0]) print( f"Resolving redundancy on '{c.workspace().name}' ({format_con(c.nodes[0])} inside {format_con(c)}) ..." ) c.nodes[0].command('focus; move {}'.format('up' if c.orientation == 'vertical' else 'left')) # This shouldn't be necessary ... if c.parent.type == 'workspace': c.nodes[0].command('move left; move up') focused_window.command('focus') count += 1 workspaces.append(c.workspace().name) if count < limit: print("Maximum corrections reached; run script again to resolve more.") print( f"{count} redundanc{'y has' if count == 1 else 'ies have'} been resolved.") print(f"Returning focus to {format_con(original_focus)} ...") original_focus.command('focus') if 'notify' in argv[1:]: from utils import check_output message = f"{count or 'No'} container redundanc{'y' if count == 1 else 'ies'} {'resolved' if count else 'exist'}" sub_text = '\n'.join(workspaces) if count >= limit: sub_text += "\n\nRun the script again to resolve more" check_output(['notify-send', '-t', '2000', message, sub_text])
def collect_job_metrics(gpu_infos, all_conns, type1_zombies, type2_zombies): stats_obj = docker_stats.stats() if stats_obj is None: logger.warning("docker stats returns None") return None result = [] for container_id, stats in stats_obj.items(): pai_service_name = None # TODO speed this up, since this is O(n^2) for service_name in pai_services: if stats["name"].startswith(service_name): pai_service_name = service_name[4:] # remove "k8s_" prefix break inspect_info = docker_inspect.inspect(container_id) pid = inspect_info["pid"] if inspect_info is not None else None inspect_labels = utils.walk_json_field_safe(inspect_info, "labels") if not inspect_labels and pai_service_name is None: continue # other container, maybe kubelet or api-server # get network consumption, since all our services/jobs running in host network, # network statistic from docker is not specific to that container. We have to # get network statistic by ourselves. lsof_result = network.lsof(pid) net_in, net_out = network.get_container_network_metrics( all_conns, lsof_result) if logger.isEnabledFor(logging.DEBUG): debug_info = utils.check_output( "ps -o cmd fp {0} | tail -n 1".format(pid), shell=True) logger.debug( "pid %s with cmd `%s` has lsof result %s, in %d, out %d", pid, debug_info, lsof_result, net_in, net_out) if pai_service_name is None: gpuIds, otherLabels = parse_from_labels(inspect_info["labels"]) otherLabels.update(inspect_info["env"]) for id in gpuIds: if gpu_infos: labels = copy.deepcopy(otherLabels) labels["minor_number"] = id result.append( Metric("container_GPUPerc", labels, gpu_infos[id]["gpuUtil"])) result.append( Metric("container_GPUMemPerc", labels, gpu_infos[id]["gpuMemUtil"])) result.append( Metric("container_CPUPerc", otherLabels, stats["CPUPerc"])) result.append( Metric("container_MemUsage", otherLabels, stats["MemUsage_Limit"]["usage"])) result.append( Metric("container_MemLimit", otherLabels, stats["MemUsage_Limit"]["limit"])) result.append(Metric("container_NetIn", otherLabels, net_in)) result.append(Metric("container_NetOut", otherLabels, net_out)) result.append( Metric("container_BlockIn", otherLabels, stats["BlockIO"]["in"])) result.append( Metric("container_BlockOut", otherLabels, stats["BlockIO"]["out"])) result.append( Metric("container_MemPerc", otherLabels, stats["MemPerc"])) else: labels = {"name": pai_service_name} result.append( Metric("service_cpu_percent", labels, stats["CPUPerc"])) result.append( Metric("service_mem_usage_byte", labels, stats["MemUsage_Limit"]["usage"])) result.append( Metric("service_mem_limit_byte", labels, stats["MemUsage_Limit"]["limit"])) result.append( Metric("service_mem_usage_percent", labels, stats["MemPerc"])) result.append(Metric("service_net_in_byte", labels, net_in)) result.append(Metric("service_net_out_byte", labels, net_out)) result.append( Metric("service_block_in_byte", labels, stats["BlockIO"]["in"])) result.append( Metric("service_block_out_byte", labels, stats["BlockIO"]["out"])) result.extend( generate_zombie_count(stats_obj, type1_zombies, type2_zombies)) return result
def main(): '''Deploy Prometheus operator and Instance ''' if deploy_options.target != "oc-ingress": # Deploy Operator Group deployer( 'deploy/monitoring/prometheus/assisted-installer-operator-group.yaml', 'OperatorGroup') # Deploy Subscription deploy_prometheus_sub(OLM_NS, CAT_SRC) # Deploy Prom svc deployer( 'deploy/monitoring/prometheus/assisted-installer-k8s-prometheus-svc.yaml', 'Prometheus Service') # Deploy Prometheus Instance deployer( 'deploy/monitoring/prometheus/assisted-installer-k8s-prometheus-subscription-instance.yaml', 'Prometheus Instance on K8s') sleep(10) utils.check_k8s_rollout('statefulset', 'prometheus-assisted-installer-prometheus', deploy_options.namespace) # Deploy Prom svc Monitor deployer( 'deploy/monitoring/prometheus/assisted-installer-prometheus-svc-monitor.yaml', 'Prometheus Service Monitor') else: # Deploy Operator Group try: deployer( 'deploy/monitoring/prometheus/assisted-installer-operator-group.yaml', 'OperatorGroup') except: cmd = "{} -n {} get OperatorGroup --no-headers".format( CMD_BIN, deploy_options.namespace) if not utils.check_output(cmd): print( "The creation of an OperatorGroup is Forbidden for you user please request a creation of one before execute this again, exiting..." ) sys.exit(1) else: print("Another OperatorGroup exists, continuing") # Deploy Subscription deploy_prometheus_sub(OLM_NS, CAT_SRC) # Deploy Oauth Pre-reqs for OCP integration deploy_oauth_reqs() # Deploy Prom svc; # We create the service first in order to self-generate the secret prometheus-k8s-tls deployer( 'deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-svc.yaml', 'Prometheus Service on OCP') # Deploy Prometheus Instance deployer( 'deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-subscription-instance.yaml', 'Prometheus Instance on OCP') sleep(10) utils.check_k8s_rollout('statefulset', 'prometheus-assisted-installer-prometheus', deploy_options.namespace) # Deploy Prom svc Monitor deployer( 'deploy/monitoring/prometheus/assisted-installer-prometheus-svc-monitor.yaml', 'Prometheus Service Monitor') # Deploy Prometheus Route deploy_prometheus_route()