def list_boards(self): """Return a list with all the supported boards""" # Print table click.echo('\nSupported boards:\n') BOARDLIST_TPL = ('{board:25} {fpga:20} {type:<5} {size:<5} {pack:<10}') terminal_width, _ = click.get_terminal_size() click.echo('-' * terminal_width) click.echo(BOARDLIST_TPL.format( board=click.style('Board', fg='cyan'), fpga='FPGA', type='Type', size='Size', pack='Pack')) click.echo('-' * terminal_width) for board in self.boards: fpga = self.boards.get(board).get('fpga') click.echo(BOARDLIST_TPL.format( board=click.style(board, fg='cyan'), fpga=fpga, type=self.fpgas.get(fpga).get('type'), size=self.fpgas.get(fpga).get('size'), pack=self.fpgas.get(fpga).get('pack'))) click.secho(BOARDS_MSG, fg='green')
def users(): """List all of the team's users and their status""" try: response = slack_client.users.list(presence=True) except Exception as e: click.echo(str(e)) if response.successful: users = response.body['members'] # Collect array of arrays that contain user data in column order table_data = [] for user in users: if not user['deleted']: user_data = [user['name'], user.get('real_name', None), user.get('presence', 'bot'), user['profile'].get('email', None)] table_data.append(user_data) table_data.sort(key=operator.itemgetter(2)) table_headers = [click.style('User', fg='yellow'), click.style('Name', fg='yellow'), click.style('Presence', fg='yellow'), click.style('Email', fg='yellow')] click.secho(tabulate.tabulate(table_data, table_headers, tablefmt="fancy_grid")) else: click.secho('wtf')
def status(db, full, days, passphrase): ensure_passphrase(passphrase, db.config) credentials = db.credentials() for cred in credentials: decrypted = decrypt(cred['password'], recipient=db.config['recipient'], passphrase=passphrase, homedir=db.config['homedir']) cred["password"] = decrypted if credentials: limit = db.config['status_repeated_passwords_limit'] credentials = checkers.repeated(credentials, limit) credentials = checkers.modified(credentials, days) for c in credentials: if c['repeated']: c['repeated'] = click.style(str(c['repeated']), 'red') if c['modified']: c['modified'] = click.style(str(c['modified']), 'red') table = Table(['fullname', 'repeated', 'modified'], table_format=db.config['table_format'], missing=click.style('OK', 'green')) click.echo(table.render(credentials))
def process(self): terminal_width, _ = click.get_terminal_size() start_time = time() click.echo( "[%s] Processing %s (%s)" % ( datetime.now().strftime("%c"), click.style(self.name, fg="cyan", bold=True), ", ".join(["%s: %s" % (k, v) for k, v in self.options.iteritems()]), ) ) click.secho("-" * terminal_width, bold=True) result = self._run() is_error = result["returncode"] != 0 summary_text = " Took %.2f seconds " % (time() - start_time) half_line = "=" * ((terminal_width - len(summary_text) - 10) / 2) click.echo( "%s [%s]%s%s" % ( half_line, ( click.style(" ERROR ", fg="red", bold=True) if is_error else click.style("SUCCESS", fg="green", bold=True) ), summary_text, half_line, ), err=is_error, ) return not is_error
def cli(force): """ Update AerisCloud """ if not force and config.get('github', 'enabled', default=False) == 'true': client = Github().gh repo = client.repository('aeriscloud', 'aeriscloud') latest_release = repo.iter_releases().next() latest_version = latest_release.tag_name[1:] if semver.compare(version, latest_version) != -1: click.secho('AerisCloud is already up to date!', fg='green') sys.exit(0) click.echo('A new version of AerisCloud is available: %s (%s)' % ( click.style(latest_version, fg='green', bold=True), click.style(latest_release.name, bold=True) )) # retrieve install script in a tmpfile tmp = tempfile.NamedTemporaryFile() r = requests.get('https://raw.githubusercontent.com/' + 'AerisCloud/AerisCloud/develop/scripts/install.sh') if r.status_code != 200: fatal('error: update server returned %d (%s)' % ( r.status_code, r.reason)) tmp.write(r.content) tmp.flush() os.environ['INSTALL_DIR'] = aeriscloud_path call(['bash', tmp.name]) tmp.close()
def save_photo_caption(logger, filename, caption): if not os.path.exists(filename) or not caption: return basename = os.path.basename(filename) basename_fmt = click.style(basename, bold=True) meta = Metadata(filename) existing_caption = meta.get_caption() if caption == existing_caption: logger.log('{}: {} (already there)'.format( basename_fmt, existing_caption, )) elif existing_caption: logger.log('{}: {} ? {} (keeping original)'.format( basename_fmt, click.style(existing_caption, fg='blue'), click.style(caption, fg='blue'), )) else: try: meta.set_caption(caption) logger.log('{}: - → {}'.format( basename_fmt, click.style(caption, fg='green'), )) except FileFormatError: logger.log('{}: - (not an image file)'.format(basename_fmt))
def cli(jobset): """ Given a Hydra project, inspect latest evaluation and print a summary of failed builds """ url = "http://hydra.nixos.org/jobset/{}".format(jobset) # get the last evaluation click.echo(click.style( 'Getting latest evaluation for {}'.format(url), fg='green')) d = get_response_text(url) evaluations = d('#tabs-evaluations').find('a[class="row-link"]') latest_eval_url = evaluations[0].get('href') # parse last evaluation page click.echo(click.style( 'Parsing evaluation {}'.format(latest_eval_url), fg='green')) d = get_response_text(latest_eval_url + '?full=1') # TODO: aborted evaluations # TODO: dependency failed without propagated builds for tr in d('img[alt="Failed"]').parents('tr'): a = pq(tr)('a')[1] print("- [ ] [{}]({})".format(a.text, a.get('href'))) sys.stdout.flush() maintainers = get_maintainers(a.text) if maintainers: print(" - maintainers: {}".format(", ".join(map(lambda u: '@' + u, maintainers)))) # TODO: print last three persons that touched this file # TODO: pinpoint the diff that broke this build, or maybe it's transient or maybe it never worked? sys.stdout.flush()
def scan_list(): """Show recently launched scans""" key = get_api_key() # Get the list api = shodan.Shodan(key) try: scans = api.scans() except shodan.APIError as e: raise click.ClickException(e.value) if len(scans) > 0: click.echo(u'# {} Scans Total - Showing 10 most recent scans:'.format(scans['total'])) click.echo(u'# {:20} {:<15} {:<10} {:<15s}'.format('Scan ID', 'Status', 'Size', 'Timestamp')) # click.echo('#' * 65) for scan in scans['matches'][:10]: click.echo( u'{:31} {:<24} {:<10} {:<15s}'.format( click.style(scan['id'], fg='yellow'), click.style(scan['status'], fg='cyan'), scan['size'], scan['created'] ) ) else: click.echo("You haven't yet launched any scans.")
def status(ctx, job_name): queue = ctx.obj['queue'] queue.fetch() tpl = '[{:>7}] {:<49} {:>20}' header = tpl.format('status', 'branch', 'artifacts') click.echo(header) click.echo('-' * len(header)) job = queue.get(job_name) statuses = queue.github_statuses(job) for task_name, task in sorted(job.tasks.items()): status = statuses[task_name] assets = queue.github_assets(task) uploaded = 'uploaded {} / {}'.format( sum(a in assets for a in task.artifacts), len(task.artifacts) ) leadline = tpl.format(status.state.upper(), task.branch, uploaded) click.echo(click.style(leadline, fg=COLORS[status.state])) for artifact in task.artifacts: try: asset = assets[artifact] except KeyError: state = 'pending' if status.state == 'pending' else 'missing' filename = '{:>70} '.format(artifact) else: state = 'ok' filename = '{:>70} '.format(asset.name) statemsg = '[{:>7}]'.format(state.upper()) click.echo(filename + click.style(statemsg, fg=COLORS[state]))
def migrate_description(obj, verbose, html_log): h = HTML2Text() h.unicode_snob = True input_html = re.sub(r'^\r?\n$', '<br>', unicode(obj.description)) result = h.handle(input_html) if verbose: click.echo(click.style('\n' + ' ' * 80, bg='cyan', fg='black')) click.echo(click.style(repr(obj), fg='cyan')) click.echo(_deleted(highlight(unicode(obj.description), HtmlLexer(), Terminal256Formatter()))) click.echo(_added(result)) if re.search(r'</\w+>', result): click.echo(click.style('[FAIL] ', fg='yellow', bold=True) + click.style(repr(obj), fg='cyan')) click.echo(click.style(obj.description, fg='yellow', dim=True)) choice = click.prompt("What do you want to do? [s = skip / c = change anyway / q = quit]") if choice == 's': return elif choice == 'q': sys.exit(1) else: _new_row(html_log, obj, result) else: _new_row(html_log, obj, result) obj.description = result
def lib_update(ctx, libid): lm = LibraryManager() for id_, latest_version in (lm.get_latest_versions() or {}).items(): if libid and int(id_) not in libid: continue info = lm.get_info(int(id_)) click.echo("Updating [ %s ] %s library:" % ( click.style(id_, fg="yellow"), click.style(info['name'], fg="cyan"))) current_version = info['version'] if latest_version is None: click.secho("Unknown library", fg="red") continue click.echo("Versions: Current=%s, Latest=%s \t " % ( current_version, latest_version), nl=False) if current_version == latest_version: click.echo("[%s]" % (click.style("Up-to-date", fg="green"))) continue else: click.echo("[%s]" % (click.style("Out-of-date", fg="red"))) ctx.invoke(lib_uninstall, libid=[int(id_)]) ctx.invoke(lib_install, libid=[int(id_)])
def get_user_credentials(two1_dir="~/.two1/two1.json"): """ Collect user credentials at CLI. """ with open(os.path.expanduser(two1_dir), "r") as f: username = json.load(f)["username"] try: w = wallet.Wallet() except: logger.info(click.style("A technical error occured. Please try the previous command again.", fg="magenta")) sys.exit() machine_auth = machine_auth_wallet.MachineAuthWallet(w) rest_client = _rest_client.TwentyOneRestClient(TWO1_HOST, machine_auth, username) address = w.current_address correct_password = False pw = click.prompt(click.style("Please enter your 21 password", fg=PROMPT_COLOR), hide_input=True) while not correct_password: try: rest_client.login(payout_address=address, password=pw) correct_password = True except: pw = click.prompt(click.style("Incorrect 21 password. Please try again", fg="magenta"), hide_input=True) return username, pw
def err(self, message): style = {'bg': 'red', 'fg': 'white', 'bold': True} message = ( click.style(self.name + ' ', dim=True) + click.style(' {} '.format(message), **style) ) click.echo(message, err=True)
def showModel(ctx, targetRegion, targetEnv, targetRole, targetService, targetPolicy): ctxRoles = ctx.model['roles'] offset = 0 width = 120 for region in ctxRoles: if targetRegion != None and region != targetRegion: continue for env in ctxRoles[region]: if targetEnv != None and env != targetEnv: continue for roleName in ctxRoles[region][env]: if (targetPolicy == None and targetService == None) and targetRole == None: ctx.log(click.style('Role: %s: ' % (roleName), fg='cyan')) offset = 10 else: offset = 0 for policyName in ctxRoles[region][env][roleName]: if targetPolicy != None and policyName != targetPolicy: continue modelPolicy = ctx.dumps(ctx.modelPolicies[policyName]) if modelPolicy != None: if targetPolicy == None: # Don't display the policy name if only 1 policy is # being shown. ctx.log(click.style('%*sPolicy: %s: ' % (offset,'',policyName), fg='cyan')) utils.showPolicyJson(ctx, modelPolicy, offset, width)
def _echo_request_line(self, flow): if flow.request.stickycookie: stickycookie = click.style("[stickycookie] ", fg="yellow", bold=True) else: stickycookie = "" if flow.client_conn: client = click.style(strutils.bytes_to_escaped_str(flow.client_conn.address.host), bold=True) else: client = click.style("[replay]", fg="yellow", bold=True) method = flow.request.method method_color = dict( GET="green", DELETE="red" ).get(method.upper(), "magenta") method = click.style(strutils.bytes_to_escaped_str(method), fg=method_color, bold=True) if self.showhost: url = flow.request.pretty_url else: url = flow.request.url url = click.style(strutils.bytes_to_escaped_str(url), bold=True) httpversion = "" if flow.request.http_version not in ("HTTP/1.1", "HTTP/1.0"): httpversion = " " + flow.request.http_version # We hide "normal" HTTP 1. line = "{stickycookie}{client} {method} {url}{httpversion}".format( stickycookie=stickycookie, client=client, method=method, url=url, httpversion=httpversion ) self.echo(line)
def update_upstream(output_file, content, env_var): """ Updates upstream yaml file. """ scripts_path = os.getenv(env_var) if not scripts_path: raise ValueError('$' + env_var + ' is not set properly.') try: os.chdir(scripts_path) except OSError as err: click.echo(click.style(str(err), fg=COLORS['error']), err=True) click.echo(click.style('Make sure your env is set properly.', fg=COLORS['debug']), err=True) sys.exit(1) with open(output_file, 'w') as file_handler: file_handler.write(yaml.dump(content)) git_commands = ["git add -u", "git commit -m 'Update repos.yml'", "git push origin master" ] for cmd in git_commands: click.echo(click.style('+ ' + cmd, fg=COLORS['debug'])) try: check_call(cmd, shell=True) except subprocess.CalledProcessError as err: click.echo(click.style(str(err), fg=COLORS['error']), err=True) sys.exit(1)
def _format_commit_comment_event(self, event): """Format commit comment and commit hash. :type event: :class:`github3` Event. :param event: An instance of `github3` Event. """ item = click.style(self.event_type_mapping[event.type] + ' ', fg=self.config.clr_secondary) item += click.style( self._format_sha(event.payload['comment'].commit_id), fg=self.config.clr_tertiary) item += click.style(' at ', fg=self.config.clr_secondary) item += click.style(self.format_user_repo(event.repo), fg=self.config.clr_tertiary) try: item += click.style( '#' + str(event.payload['pull_request'].number) + ' ', fg=self.config.clr_tertiary) except KeyError: pass item += self._format_time(event) try: item += self._format_indented_message( event.payload['pull_request'].title) item += self._format_indented_message( event.payload['comment'].body, indent=' ') except KeyError: item += self._format_indented_message( event.payload['comment'].body) return item
def create_admin(username, firstname, lastname, email, password): """ Creates an admin user """ auth_type = { AUTH_DB: "Database Authentications", AUTH_OID: "OpenID Authentication", AUTH_LDAP: "LDAP Authentication", AUTH_REMOTE_USER: "******", AUTH_OAUTH: "OAuth Authentication", } click.echo( click.style( "Recognized {0}.".format( auth_type.get(current_app.appbuilder.sm.auth_type, "No Auth method") ), fg="green", ) ) role_admin = current_app.appbuilder.sm.find_role( current_app.appbuilder.sm.auth_role_admin ) user = current_app.appbuilder.sm.add_user( username, firstname, lastname, email, role_admin, password ) if user: click.echo(click.style("Admin User {0} created.".format(username), fg="green")) else: click.echo(click.style("No user created an error occured", fg="red"))
def update(root, repo_name, repos_file, env): """ Update dependencies [used by CI] """ repos_file = os.path.abspath(repos_file) try: with open(repos_file, 'r') as file_handler: repos = yaml.safe_load(file_handler) except IOError as err: click.echo(click.style(str(err), fg=COLORS['error'])) sys.exit(1) catkin_output = catkin_pkg.packages.find_packages(root) local_pkgs = [pkg.name for pkg in catkin_output.values()] try: repo_dependencies = set(repos[repo_name]) except KeyError as err: click.echo(click.style(str(err) + ' not found in ' + repos_file, fg=COLORS['error'])) click.echo(click.style(str(repos.keys()), fg=COLORS['debug'])) sys.exit(1) if repo_dependencies == set(local_pkgs): click.echo(click.style('Nothing changed', fg=COLORS['success'])) else: click.echo(click.style('Updating packages...', fg=COLORS['info'])) repos[repo_name] = local_pkgs utils.update_upstream(repos_file, repos, env)
def _format_indented_message(self, message, newline=True, indent=' ', sha=''): """Format an indented message. :type message: str :param message: The commit comment. :type newline: bool :param newline: Determines whether to prepend a newline. :type indent: str :param indent: The indent, consisting of blank chars. TODO: Consider passing an int denoting # blank chars, or try to calculate the indent dynamically. :type sha: str :param sha: The commit hash. :rtype: str :return: The formattted commit comment. """ subsequent_indent = indent if sha != '': subsequent_indent += ' ' message = self.strip_line_breaks(message) formatted_message = click.wrap_text( text=click.style(sha, fg=self.config.clr_tertiary)+message, initial_indent=indent, subsequent_indent=subsequent_indent) if newline: formatted_message = click.style('\n' + formatted_message) return formatted_message
def main(config, interval, verbosity, max_tests, gen_config): """ AMQPeek - Simple, flexible RMQ monitor """ configure_logging(verbosity) if gen_config: try: gen_config_file() except ConfigExistsError: click.echo( click.style( "An AMQPeek config already exists in the current directory", fg="red", ) ) else: click.echo(click.style("AMQPeek config created", fg="green")) click.echo( click.style( "Edit the file with your details and settings " "before running AMQPeek", fg="green", ) ) sys.exit(0) try: app_config = read_config(config) except IOError: click.echo( click.style( "No configuration file found. " "Specify a configuration file with --config. " "To generate a base config file use --gen_config.", fg="red", ) ) sys.exit(0) connector = Connector(**app_config["rabbit_connection"]) queue_config = build_queue_data(app_config) monitor = Monitor( connector=connector, queue_details=queue_config, interval=interval, max_connections=max_tests, ) notifiers = create_notifiers(app_config["notifiers"]) for notifier in notifiers: monitor.add_notifier(notifier) monitor.run()
def cli(assets_file): """Given a lighting alert for a specific area""" with open(assets_file) as f: assets_data = json.load(f) assets_dict = {} # save the data as a dict format to improve execution efficiency count_aline = 0 for asset in assets_data: count_aline += 1 try: if asset["quadKey"] not in assets_dict: assets_dict[asset["quadKey"]] = [(asset["assetOwner"], asset["assetName"])] else: assets_dict[asset["quadKey"]].append((asset["assetOwner"], asset["assetName"])) except Exception as e: click.echo(click.style("Invalid asset input #%s: "% count_aline + str(asset),fg = 'red')) """Since quadkey might not be accurate enough there might be more than one asset owners sharing a same quadkey """ count_lline = 0 for line in sys.stdin: lightning_data = json.loads(line) count_lline += 1 try: if lightning_data["flashType"] == 9: # exclude flashType is 'heartbeat' continue latitude = lightning_data['latitude'] longitude = lightning_data['longitude'] qk = quadkey.from_geo((latitude, longitude), 12) # conver lightning data to quadkey if qk.key in assets_dict: asset_tuples = assets_dict[qk.key] for asset in asset_tuples: click.echo('lightning alert for %s : %s' % (asset[0], asset[1])) del assets_dict[qk.key] # to prevent alerting several times except Exception as e: click.echo(click.style("Invalid strike input #%s: "% count_lline + str(line), fg = 'red'))
def print_conclusion(flow, success, start_time): if success: conclusion = (V, click.style('SUCCESS', bg='green', fg='black')) else: conclusion = (X, click.style('FAILED', bg='red')) print_rows(chain(report(flow), [conclusion + (fmt_time(time.time() - start_time), '')]))
def create_app(name, engine): """ Create a Skeleton application (needs internet connection to github) """ try: if engine.lower() == "sqlalchemy": url = urlopen(SQLA_REPO_URL) dirname = "Flask-AppBuilder-Skeleton-master" elif engine.lower() == "mongoengine": url = urlopen(MONGOENGIE_REPO_URL) dirname = "Flask-AppBuilder-Skeleton-me-master" zipfile = ZipFile(BytesIO(url.read())) zipfile.extractall() os.rename(dirname, name) click.echo(click.style("Downloaded the skeleton app, good coding!", fg="green")) return True except Exception as e: click.echo(click.style("Something went wrong {0}".format(e), fg="red")) if engine.lower() == "sqlalchemy": click.echo( click.style( "Try downloading from {0}".format(SQLA_REPO_URL), fg="green" ) ) elif engine.lower() == "mongoengine": click.echo( click.style( "Try downloading from {0}".format(MONGOENGIE_REPO_URL), fg="green" ) ) return False
def format_thread(self, view_entry): """Format a thread. :type view_entry: :class:`github3` Thread :param view_entry: An instance of `github3` Thread. :rtype: str :return: The formatted thread. """ thread = view_entry.item item = self.format_index_title(view_entry.index, thread.subject['title']) item += click.style('(' + view_entry.url + ')', fg=self.config.clr_view_link) item += '\n' item += click.style((' ' + 'Seen: ' + str(not thread.unread).ljust(7) + ' '), fg=self.config.clr_secondary) item += click.style(('Type: ' + str(thread.subject['type']).ljust(12) + ' '), fg=self.config.clr_tertiary) item += click.style(('Updated: ' + str(self.pretty_dt(thread.updated_at)) + ' '), fg=self.config.clr_time) return item
def collect_static(static_folder): """ Copies flask-appbuilder static files to your projects static folder """ appbuilder_static_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "static/appbuilder" ) app_static_path = os.path.join(os.getcwd(), static_folder) if not os.path.isdir(app_static_path): click.echo( click.style( "Static folder does not exist creating: %s" % app_static_path, fg="green" ) ) os.makedirs(app_static_path) try: shutil.copytree( appbuilder_static_path, os.path.join(app_static_path, "appbuilder") ) except Exception: click.echo( click.style( "Appbuilder static folder already exists on your project", fg="red" ) )
def format_trending_entry(self, view_entry): """Formats a trending repo entry. :type view_entry: dict :param view_entry: The URITemplates feed. :rtype: str :return: The formattted trending entry. """ trending_entry = view_entry.item item_parts = trending_entry.title.split(' ') title = item_parts[0] item = self.format_index_title(view_entry.index, title) summary_parts = trending_entry.summary.split('\n') summary = summary_parts[0] if len(summary_parts) > 1 else '' summary = self.strip_line_breaks(summary) language = summary_parts[-1] if language == '()': language = '(Unknown)' language = re.sub(r'(\()', r'', language) language = re.sub(r'(\))', r'', language) if summary: item += '\n' summary = click.wrap_text( text=summary, initial_indent=' ', subsequent_indent=' ') item += click.style(summary, self.config.clr_message) item += '\n' item += click.style(' ' + language, fg=self.config.clr_message) return item
def format_item(self, item, index): """Format an item. :type item: :class:`haxor.Item` :param item: An instance of `haxor.Item`. :type index: int :param index: The index for the given item, used with the hn view [index] commend. :rtype: str :return: The formatted item. """ formatted_item = self.format_index_title(index, item.title) if item.url is not None: netloc = urlparse(item.url).netloc netloc = re.sub('www.', '', netloc) formatted_item += click.style('(' + netloc + ')', fg=self.config.clr_view_link) formatted_item += '\n ' formatted_item += click.style(str(item.score) + ' points ', fg=self.config.clr_num_points) formatted_item += click.style('by ' + item.by + ' ', fg=self.config.clr_user) submission_time = str(pretty_date_time(item.submission_time)) formatted_item += click.style(submission_time + ' ', fg=self.config.clr_time) num_comments = str(item.descendants) if item.descendants else '0' formatted_item += click.style('| ' + num_comments + ' comments', fg=self.config.clr_num_comments) return formatted_item
def get_test_times(num_tests): from sauceutils import SauceTools sauce = SauceTools("https://saucelabs.com", "polarqa", "d609b648-22e3-44bb-a38e-c28931df837d") jobs = [] last_time = int(time.time()) test_times = defaultdict(list) bar_length = int(math.ceil(num_tests/500)) with click.progressbar(xrange(bar_length), label="Downloading statistics from Sauce Labs", fill_char=click.style('+', fg='green', bold=True), empty_char=click.style('-', fg='red'), width=40) as bar: for i in bar: jobs += sauce.get_jobs(num_jobs=500, full=True, end_date=last_time) last_time = int(jobs[-1]['start_time']) # Only add tests that have passed for job in jobs: if job['passed'] and job['end_time']: test_times[job['name'].lower()].append([float(job['creation_time']), float(job['end_time']) - float(job['start_time'])]) click.secho("Sorted through statistics", fg='white') return test_times
def _echo_response_line(self, flow): if flow.response.is_replay: replay = click.style("[replay] ", fg="yellow", bold=True) else: replay = "" code = flow.response.status_code code_color = None if 200 <= code < 300: code_color = "green" elif 300 <= code < 400: code_color = "magenta" elif 400 <= code < 600: code_color = "red" code = click.style(str(code), fg=code_color, bold=True, blink=(code == 418)) reason = click.style(strutils.bytes_to_escaped_str(flow.response.reason), fg=code_color, bold=True) if flow.response.content is None: size = "(content missing)" else: size = human.pretty_size(len(flow.response.content)) size = click.style(size, bold=True) arrows = click.style("<<", bold=True) line = "{replay} {arrows} {code} {reason} {size}".format( replay=replay, arrows=arrows, code=code, reason=reason, size=size ) self.echo(line)
def read_nitrate_case(testcase, makefile_data=None): """ Read old metadata from nitrate test case """ data = {'tag': []} echo("test case found '{0}'.".format(testcase.identifier)) # Test identifier data['extra-nitrate'] = testcase.identifier # Beaker task name (taken from summary) if testcase.summary: data['extra-summary'] = testcase.summary echo(style('extra-summary: ', fg='green') + data['extra-summary']) # Contact if testcase.tester: # Full 'Name Surname <*****@*****.**>' form if testcase.tester.name is not None: data['contact'] = '{} <{}>'.format( testcase.tester.name, testcase.tester.email) else: try: # Use contact from Makefile if it's there and email matches if re.search(testcase.tester.email, makefile_data['contact']): data['contact'] = makefile_data['contact'] else: raise KeyError except (KeyError, TypeError): # Otherwise use just the email address data['contact'] = testcase.tester.email echo(style('contact: ', fg='green') + data['contact']) # Environment if testcase.arguments: data['environment'] = tmt.utils.shell_to_dict(testcase.arguments) if not data['environment']: data.pop('environment') else: echo(style('environment:', fg='green')) echo(pprint.pformat(data['environment'])) # Possible multihost tag (detected in Makefile) if makefile_data: data['tag'].extend(makefile_data.get('tag', [])) # Tags if testcase.tags: tags = [] for tag in testcase.tags: if tag.name == 'fmf-export': continue tags.append(tag.name) # Add the tier attribute, if there are multiple TierX tags, # pick the one with the lowest index. tier_match = re.match(r'^Tier ?(?P<num>\d+)$', tag.name, re.I) if tier_match: num = tier_match.group('num') if 'tier' in data: log.warning('Multiple Tier tags found, using the one ' 'with a lower index') if int(num) < int(data['tier']): data['tier'] = num else: data['tier'] = num # Include possible multihost tag (avoid duplicates) data['tag'] = sorted(set(tags + data['tag'])) echo(style('tag: ', fg='green') + str(data['tag'])) # Tier try: echo(style('tier: ', fg='green') + data['tier']) except KeyError: pass # Component data['component'] = [comp.name for comp in testcase.components] echo(style('component: ', fg='green') + ' '.join(data['component'])) # Status data['enabled'] = testcase.status.name == "CONFIRMED" echo(style('enabled: ', fg='green') + str(data['enabled'])) # Set manual attribute to manual tests only if not testcase.automated: data['manual'] = True # Relevancy field = tmt.utils.StructuredField(testcase.notes) try: relevancy = field.get('relevancy') if relevancy: data['adjust'] = relevancy_to_adjust(relevancy) echo(style('adjust:', fg='green')) echo(tmt.utils.dict_to_yaml(data['adjust']).strip()) except tmt.utils.StructuredFieldError: pass # Extend bugs detected from Makefile with those linked in Nitrate try: data['link'] = makefile_data['link'].copy() except (KeyError, TypeError): pass for bug in testcase.bugs: add_bug(bug.bug, data) # Header and footer from notes (do not import the warning back) data['description'] = re.sub( tmt.export.WARNING, '', field.header() + field.footer()) # Extras: [pepa] and [hardware] try: extra_pepa = field.get('pepa') if extra_pepa: data['extra-pepa'] = extra_pepa echo(style('extra-pepa:', fg='green')) echo(data['extra-pepa'].rstrip('\n')) except tmt.utils.StructuredFieldError: pass try: extra_hardware = field.get('hardware') if extra_hardware: data['extra-hardware'] = extra_hardware echo(style('extra-hardware:', fg='green')) echo(data['extra-hardware'].rstrip('\n')) except tmt.utils.StructuredFieldError: pass return data
def read(path, makefile, nitrate, purpose, disabled, types): """ Read old metadata from various sources Returns tuple (common_data, individual_data) where 'common_data' are metadata which belong to main.fmf and 'individual_data' contains data for individual testcases (if multiple nitrate testcases found). """ data = dict() echo("Checking the '{0}' directory.".format(path)) # Make sure there is a metadata tree initialized try: tree = fmf.Tree(path) except fmf.utils.RootError: raise ConvertError("Initialize metadata tree using 'tmt init'.") # Makefile (extract summary, test, duration and requires) if makefile: echo(style('Makefile ', fg='blue'), nl=False) makefile_path = os.path.join(path, 'Makefile') try: with open(makefile_path, encoding='utf-8') as makefile_file: makefile = makefile_file.read() except IOError: raise ConvertError("Unable to open '{0}'.".format(makefile_path)) echo("found in '{0}'.".format(makefile_path)) # If testinfo.desc exists read it to preserve content and remove it testinfo_path = os.path.join(path, 'testinfo.desc') if os.path.isfile(testinfo_path): try: with open(testinfo_path, encoding='utf-8') as testinfo: old_testinfo = testinfo.read() os.remove(testinfo_path) except IOError: raise ConvertError( "Unable to open '{0}'.".format(testinfo_path)) else: old_testinfo = None # Make Makefile 'makeable' without extra dependecies # (replace targets, make include optional and remove rhts-lint) makefile = makefile.replace('$(METADATA)', 'testinfo.desc') makefile = re.sub( r'^include /usr/share/rhts/lib/rhts-make.include', '-include /usr/share/rhts/lib/rhts-make.include', makefile, flags=re.MULTILINE) makefile = re.sub('.*rhts-lint.*', '', makefile) # Create testinfo.desc file with resolved variables try: process = subprocess.run( ["make", "testinfo.desc", "-C", path, "-f", "-"], input=makefile, check=True, encoding='utf-8', stdout=subprocess.DEVNULL) except FileNotFoundError: raise ConvertError( "Install tmt-test-convert to convert metadata from Makefile.") except subprocess.CalledProcessError: raise ConvertError( "Failed to convert metadata using 'make testinfo.desc'.") # Read testinfo.desc try: with open(testinfo_path, encoding='utf-8') as testinfo_file: testinfo = testinfo_file.read() except IOError: raise ConvertError("Unable to open '{0}'.".format(testinfo_path)) # Beaker task name try: beaker_task = re.search(r'Name:\s*(.*)\n', testinfo).group(1) echo(style('task: ', fg='green') + beaker_task) data['extra-task'] = beaker_task data['extra-summary'] = beaker_task except AttributeError: raise ConvertError("Unable to parse 'Name' from testinfo.desc.") # Summary try: data['summary'] = re.search( r'^Description:\s*(.*)\n', testinfo, re.M).group(1) echo(style('summary: ', fg='green') + data['summary']) except AttributeError: pass # Test script try: data['test'] = re.search( r'^run:.*\n\t(.*)$', makefile, re.M).group(1) echo(style('test: ', fg='green') + data['test']) except AttributeError: raise ConvertError("Makefile is missing the 'run' target.") # Detect framework try: test_path = os.path.join(path, data["test"]) with open(test_path, encoding="utf-8") as test_file: if re.search("beakerlib", test_file.read()): data["framework"] = "beakerlib" else: data["framework"] = "shell" echo(style("framework: ", fg="green") + data["framework"]) except IOError: raise ConvertError("Unable to open '{0}'.".format(test_path)) # Contact try: data['contact'] = re.search( r'^Owner:\s*(.*)', testinfo, re.M).group(1) echo(style('contact: ', fg='green') + data['contact']) except AttributeError: pass # Component try: data['component'] = re.search( r'^RunFor:\s*(.*)', testinfo, re.M).group(1).split() echo(style('component: ', fg='green') + ' '.join(data['component'])) except AttributeError: pass # Duration try: data['duration'] = re.search( r'^TestTime:\s*(.*)', testinfo, re.M).group(1) echo(style('duration: ', fg='green') + data['duration']) except AttributeError: pass # Environment variables = re.findall(r'^Environment:\s*(.*)', testinfo, re.M) if variables: data['environment'] = {} for variable in variables: key, value = variable.split('=', maxsplit=1) data['environment'][key] = value echo(style('environment:', fg='green')) echo(pprint.pformat(data['environment'])) # RhtsRequires (optional) goes to require requires = re.findall(r'^RhtsRequires:\s*(.*)', testinfo, re.M) if requires: data['require'] = [ require for line in requires for require in line.split()] echo(style('require: ', fg='green') + ' '.join(data['require'])) # Requires (optional) goes to recommend recommends = re.findall(r'^Requires:\s*(.*)', testinfo, re.M) if recommends: data['recommend'] = [ recommend for line in recommends for recommend in line.split()] echo( style('recommend: ', fg='green') + ' '.join(data['recommend'])) # Convert Type into tags try: makefile_type = re.search( r'^Type:\s*(.*)', testinfo, re.M).group(1) if 'all' in [type_.lower() for type_ in types]: tags = makefile_type.split() else: tags = [type_ for type_ in types if type_.lower() in makefile_type.lower().split()] if tags: echo(style("tag: ", fg="green") + " ".join(tags)) data["tag"] = tags except AttributeError: pass # Add relevant bugs to the 'link' attribute for bug_line in re.findall(r'^Bug:\s*([0-9\s]+)', testinfo, re.M): for bug in re.findall(r'(\d+)', bug_line): add_bug(bug, data) # Warn if makefile has extra lines in run and build targets def target_content(target): """ Extract lines from the target content """ regexp = rf"^{target}:.*\n((?:\t[^\n]*\n?)*)" target = re.search(regexp, makefile, re.M).group(1) return [line.strip('\t') for line in target.splitlines()] run_target_list = target_content("run") run_target_list.remove(data["test"]) if run_target_list: echo(style( f"warn: Extra lines detected in the 'run' target:", fg="yellow")) for line in run_target_list: echo(f" {line}") build_target_list = target_content("build") if len(build_target_list) > 1: echo(style( f"warn: Multiple lines detected in the 'build' target:", fg="yellow")) for line in build_target_list: echo(f" {line}") # Restore the original testinfo.desc content (if existed) if old_testinfo: try: with open(testinfo_path, 'w', encoding='utf-8') as testinfo: testinfo.write(old_testinfo) except IOError: raise ConvertError( "Unable to write '{0}'.".format(testinfo_path)) # Remove created testinfo.desc otherwise else: os.remove(testinfo_path) # Purpose (extract everything after the header as a description) if purpose: echo(style('Purpose ', fg='blue'), nl=False) purpose_path = os.path.join(path, 'PURPOSE') try: with open(purpose_path, encoding='utf-8') as purpose: content = purpose.read() echo("found in '{0}'.".format(purpose_path)) for header in ['PURPOSE', 'Description', 'Author']: content = re.sub('^{0}.*\n'.format(header), '', content) data['description'] = content.lstrip('\n') echo(style('description:', fg='green')) echo(data['description'].rstrip('\n')) except IOError: echo("not found.") # Nitrate (extract contact, environment and relevancy) if nitrate: common_data, individual_data = read_nitrate( beaker_task, data, disabled) else: common_data = data individual_data = [] # Remove keys which are inherited from parent parent_path = os.path.dirname(path.rstrip('/')) parent_name = '/' + os.path.relpath(parent_path, tree.root) parent = tree.find(parent_name) if parent: for test in [common_data] + individual_data: for key in list(test): if parent.get(key) == test[key]: test.pop(key) log.debug('Common metadata:\n' + pprint.pformat(common_data)) log.debug('Individual metadata:\n' + pprint.pformat(individual_data)) return common_data, individual_data
def print_changes(scheduler_handle, print_fn=print, preview=False): changeset = scheduler_handle.get_change_set() if len(changeset) == 0: if preview: print_fn( click.style('No planned changes to schedules.', fg='magenta', bold=True)) print_fn('{num} schedules will remain unchanged'.format( num=len(scheduler_handle.all_schedule_defs()))) else: print_fn( click.style('No changes to schedules.', fg='magenta', bold=True)) print_fn('{num} schedules unchanged'.format( num=len(scheduler_handle.all_schedule_defs()))) return print_fn( click.style('Planned Changes:' if preview else 'Changes:', fg='magenta', bold=True)) for change in changeset: change_type, schedule_name, changes = change if change_type == "add": print_fn(click.style(' + %s (add)' % schedule_name, fg='green')) if change_type == "change": print_fn( click.style(' ~ %s (update)' % schedule_name, fg='yellow')) for change_name, diff in changes: if len(diff) == 2: old, new = diff print_fn( click.style('\t %s: ' % change_name, fg='yellow') + click.style(old, fg='red') + " => " + click.style(new, fg='green')) else: print_fn( click.style('\t %s: ' % change_name, fg='yellow') + click.style(diff, fg='green')) if change_type == "remove": print_fn(click.style(' - %s (delete)' % schedule_name, fg='red'))
def waiting(msg): click.echo(click.style(Term.WAITING_PREFIX + msg, fg='yellow'))
def read_nitrate(beaker_task, common_data, disabled): """ Read old metadata from nitrate test cases """ # Need to import nitrate only when really needed. Otherwise we get # traceback when nitrate is not installed or config file not available. try: import gssapi import nitrate except ImportError: raise ConvertError('Install tmt-test-convert to import metadata.') # Check test case echo(style('Nitrate ', fg='blue'), nl=False) if beaker_task is None: raise ConvertError('No test name detected for nitrate search') # Find all testcases try: if disabled: testcases = list(nitrate.TestCase.search(script=beaker_task)) # Find testcases that do not have 'DISABLED' status else: testcases = list(nitrate.TestCase.search( script=beaker_task, case_status__in=[1, 2, 4])) except (nitrate.NitrateError, gssapi.raw.misc.GSSError) as error: raise ConvertError(error) if not testcases: echo("No {0}testcase found for '{1}'.".format( '' if disabled else 'non-disabled ', beaker_task)) return common_data, [] elif len(testcases) > 1: echo("Multiple test cases found for '{0}'.".format(beaker_task)) # Process individual test cases individual_data = list() md_content = dict() for testcase in testcases: # Testcase data must be fetched due to # https://github.com/psss/python-nitrate/issues/24 testcase._fetch() data = read_nitrate_case(testcase, common_data) individual_data.append(data) # Check testcase for manual data md_content_tmp = read_manual_data(testcase) if any(md_content_tmp.values()): md_content = md_content_tmp # Write md file if there is something to write # or try to remove if there isn't. md_path = os.getcwd() + '/test.md' if md_content: write_markdown(md_path, md_content) else: try: os.remove(md_path) echo(style(f"Test case file '{md_path}' " "successfully removed.", fg='magenta')) except FileNotFoundError: pass except IOError: raise ConvertError( "Unable to remove '{0}'.".format(md_path)) # Merge environment from Makefile and Nitrate if 'environment' in common_data: for case in individual_data: if 'environment' in case: case_environment = case['environment'] case['environment'] = common_data['environment'].copy() case['environment'].update(case_environment) # Merge description from PURPOSE with header/footer from Nitrate notes for testcase in individual_data: if 'description' in common_data: testcase['description'] = common_data['description'] + \ testcase['description'] if 'description' in common_data: common_data.pop('description') # Find common data from individual test cases common_candidates = dict() histogram = dict() for testcase in individual_data: if individual_data.index(testcase) == 0: common_candidates = copy.copy(testcase) for key in testcase: histogram[key] = 1 else: for key, value in testcase.items(): if key in common_candidates: if value != common_candidates[key]: common_candidates.pop(key) if key in histogram: histogram[key] += 1 for key in histogram: if key in common_candidates and histogram[key] < len(individual_data): common_candidates.pop(key) # Add common data to main.fmf for key, value in common_candidates.items(): common_data[key] = value # If there is only single testcase found there is no need to continue if len(individual_data) <= 1: return common_data, [] # Remove common data from individual fmfs for common_key in common_candidates: for testcase in individual_data: if common_key in testcase: testcase.pop(common_key) return common_data, individual_data
def info(msg): click.echo(click.style(Term.INFO_PREFIX + msg, fg='blue'))
def warning(msg): click.echo(click.style(Term.WARNING_PREFIX + msg, fg='yellow'))
def fatal(msg): click.echo(click.style(Term.FATAL_PREFIX + msg, fg='red'), err=True) sys.exit(1)
def success(msg): click.echo(click.style(Term.SUCCESS_PREFIX + msg, fg='green'))
def upload(context, family_id, force_restart): """Upload results from analyses.""" click.echo(click.style("----------------- UPLOAD ----------------------")) context.obj["status"] = Store(context.obj["database"]) if family_id: family_obj = context.obj["status"].family(family_id) if not family_obj: message = f"family not found: {family_id}" click.echo(click.style(message, fg="red")) context.abort() if not family_obj.analyses: message = f"no analysis exists for family: {family_id}" click.echo(click.style(message, fg="red")) context.abort() analysis_obj = family_obj.analyses[0] if analysis_obj.uploaded_at is not None: message = f"analysis already uploaded: {analysis_obj.uploaded_at.date()}" click.echo(click.style(message, fg="red")) context.abort() if not force_restart and analysis_obj.upload_started_at is not None: if dt.datetime.now( ) - analysis_obj.upload_started_at > dt.timedelta(hours=24): raise AnalysisUploadError( f"The upload started at {analysis_obj.upload_started_at} " f"something went wrong, restart it with the --restart flag" ) message = f"analysis upload already started: {analysis_obj.upload_started_at.date()}" click.echo(click.style(message, fg="yellow")) return context.obj["housekeeper_api"] = hk.HousekeeperAPI(context.obj) context.obj["madeline_api"] = madeline.api.MadelineAPI(context.obj) context.obj["genotype_api"] = gt.GenotypeAPI(context.obj) context.obj["lims_api"] = lims.LimsAPI(context.obj) context.obj["tb_api"] = tb.TrailblazerAPI(context.obj) context.obj["chanjo_api"] = coverage_app.ChanjoAPI(context.obj) context.obj["deliver_api"] = DeliverAPI( context.obj, hk_api=context.obj["housekeeper_api"], lims_api=context.obj["lims_api"], case_tags=CASE_TAGS, sample_tags=SAMPLE_TAGS, ) context.obj["scout_api"] = scoutapi.ScoutAPI(context.obj) context.obj["analysis_api"] = AnalysisAPI( context.obj, hk_api=context.obj["housekeeper_api"], scout_api=context.obj["scout_api"], tb_api=context.obj["tb_api"], lims_api=context.obj["lims_api"], deliver_api=context.obj["deliver_api"], ) context.obj["report_api"] = ReportAPI( store=context.obj["status"], lims_api=context.obj["lims_api"], chanjo_api=context.obj["chanjo_api"], analysis_api=context.obj["analysis_api"], scout_api=context.obj["scout_api"], ) context.obj["scout_upload_api"] = UploadScoutAPI( hk_api=context.obj["housekeeper_api"], scout_api=context.obj["scout_api"], madeline_api=context.obj["madeline_api"], analysis_api=context.obj["analysis_api"], lims_api=context.obj["lims_api"], ) if context.invoked_subcommand is not None: return if not family_id: _suggest_cases_to_upload(context) context.abort() family_obj = context.obj["status"].family(family_id) analysis_obj = family_obj.analyses[0] if analysis_obj.uploaded_at is not None: message = f"analysis already uploaded: {analysis_obj.uploaded_at.date()}" click.echo(click.style(message, fg="yellow")) else: analysis_obj.upload_started_at = dt.datetime.now() context.obj["status"].commit() context.invoke(coverage, re_upload=True, family_id=family_id) context.invoke(validate, family_id=family_id) context.invoke(genotypes, re_upload=False, family_id=family_id) context.invoke(observations, case_id=family_id) context.invoke(scout, case_id=family_id) analysis_obj.uploaded_at = dt.datetime.now() context.obj["status"].commit() click.echo(click.style(f"{family_id}: analysis uploaded!", fg="green"))
def error(msg): click.echo(click.style(Term.ERROR_PREFIX + msg, fg='red'))