def make_nginx_conf(bench_path, yes=False): from bench import env from bench.config.common_site_config import get_config template = env.get_template('nginx.conf') bench_path = os.path.abspath(bench_path) sites_path = os.path.join(bench_path, "sites") config = get_config(bench_path) sites = prepare_sites(config, bench_path) nginx_conf = template.render(**{ "sites_path": sites_path, "http_timeout": config.get("http_timeout"), "sites": sites, "webserver_port": config.get('webserver_port'), "socketio_port": config.get('socketio_port'), "bench_name": get_bench_name(bench_path), "error_pages": get_error_pages(), # for nginx map variable "random_string": "".join(random.choice(string.ascii_lowercase) for i in xrange(7)) }) conf_path = os.path.join(bench_path, "config", "nginx.conf") if not yes and os.path.exists(conf_path): click.confirm('nginx.conf already exists and this will overwrite it. Do you want to continue?', abort=True) with open(conf_path, "w") as f: f.write(nginx_conf)
def create_config_file(filename): """ Create main configuration file if it doesn't exist. """ import textwrap from six.moves.urllib import parse if not os.path.exists(filename): old_default_config_file = os.path.join(os.path.dirname(filename), '.tksrc') if os.path.exists(old_default_config_file): upgrade = click.confirm("\n".join(textwrap.wrap( "It looks like you recently updated Taxi. Some " "configuration changes are required. You can either let " "me upgrade your configuration file or do it " "manually.")) + "\n\nProceed with automatic configuration " "file upgrade?", default=True ) if upgrade: settings = Settings(old_default_config_file) settings.convert_to_4() with open(filename, 'w') as config_file: settings.config.write(config_file) os.remove(old_default_config_file) return else: print("Ok then.") sys.exit(0) response = click.confirm( "The configuration file %s does not exist yet.\nDo you want to" " create it now?" % filename, default=True ) if response: config = resource_string('taxi', 'etc/taxirc.sample').decode('utf-8') available_backends = backends_registry._entry_points.keys() context = {} context['backend'] = click.prompt( "Enter the backend you want to use (choices are %s)" % ', '.join(available_backends), type=click.Choice(available_backends) ) context['username'] = click.prompt("Enter your username") context['password'] = parse.quote( click.prompt("Enter your password", hide_input=True), safe='' ) context['hostname'] = click.prompt( "Enter the hostname of the backend (eg. " "timesheets.example.com)", type=Hostname() ) templated_config = config.format(**context) with open(filename, 'w') as f: f.write(templated_config) else: print("Ok then.") sys.exit(1)
def search(substring, include_deleted, include_pending, include_external, include_system, **criteria): """Searches users matching some criteria""" assert set(criteria.viewkeys()) == {'first_name', 'last_name', 'email', 'affiliation'} criteria = {k: v for k, v in criteria.viewitems() if v is not None} res = search_users(exact=(not substring), include_deleted=include_deleted, include_pending=include_pending, external=include_external, allow_system_user=include_system, **criteria) if not res: print(cformat('%{yellow}No results found')) return elif len(res) > 100: click.confirm('{} results found. Show them anyway?'.format(len(res)), abort=True) users = sorted((u for u in res if isinstance(u, User)), key=lambda x: (x.first_name.lower(), x.last_name.lower(), x.email)) externals = sorted((ii for ii in res if isinstance(ii, IdentityInfo)), key=lambda x: (_safe_lower(x.data.get('first_name')), _safe_lower(x.data.get('last_name')), _safe_lower(x.data['email']))) if users: table_data = [['ID', 'First Name', 'Last Name', 'Email', 'Affiliation']] for user in users: table_data.append([unicode(user.id), user.first_name, user.last_name, user.email, user.affiliation]) table = AsciiTable(table_data, cformat('%{white!}Users%{reset}')) table.justify_columns[0] = 'right' print(table.table) if externals: if users: print() table_data = [['First Name', 'Last Name', 'Email', 'Affiliation', 'Source', 'Identifier']] for ii in externals: data = ii.data table_data.append([data.get('first_name', ''), data.get('last_name', ''), data['email'], data.get('affiliation', '-'), ii.provider.name, ii.identifier]) table = AsciiTable(table_data, cformat('%{white!}Externals%{reset}')) print(table.table)
def patch_version(self, version): """Patch package version in `__init__.py`. This will also set the `meta.version` to the new version. :param version: The new version. :type version: str """ if not Version.check(version): click.secho('Invalid version value `{:s}`.'.format(version), fg='red') click.Abort() assert version not in self.get_git_tags() message = 'Bumping {:s} -- {:s} to {:s}, are you sure?'.format( self.meta.title, self.meta.version, version) click.confirm(message, abort=True, default=True) contents = self.read_from_package('__init__.py') contents = re.sub( r'__version__ = ([\'"]){:s}\1'.format( re.escape(self.meta.version)), r'__version__ = \g<1>{:s}\g<1>'.format(version), contents) with open(self.get_filename_package('__init__.py'), 'w') as stream: stream.write(contents) self.meta.version = version
def wizard(ctx, version, username, password, repository, projects): """ Interactive Release Wizard """ for project in projects: if subprocess.check_output(["git", "status", "--porcelain"], cwd=join(root_dir, project)): raise RuntimeError("%s repository is not clean." % project) # Build test release ctx.invoke(sdist, projects=projects) ctx.invoke(test, projects=projects) click.confirm("Please test the release now. Is it ok?", abort=True) # bump version, update docs and contributors ctx.invoke(set_version, version=version, projects=projects) ctx.invoke(docs) ctx.invoke(contributors) # version bump commit + tag ctx.invoke(git, args=["commit", "-a", "-m", "bump version"], projects=projects) ctx.invoke(git, args=["tag", "v" + version], projects=projects) ctx.invoke(git, args=["push"], projects=projects) ctx.invoke(git, args=["push", "--tags"], projects=projects) # Re-invoke sdist with bumped version ctx.invoke(sdist, projects=projects) click.confirm("All good, can upload to PyPI?", abort=True) ctx.invoke(upload_release, username=username, password=password, repository=repository) click.echo("All done!")
def workflow(backup_context, dryrun): """Runs all commands for a typical backup workflow. """ backup_context.invoke(mkdir) backup_context.invoke(difflocal) click.confirm( "About to copy files to workdir - do you want to continue?", abort=True ) backup_context.forward(cp) if dryrun: print("Skipping s3 preview step since dryrun mode is on...") # NOTE: This is skipped when in a dryrun, since no files were moved on # disk in earlier steps, so we can't look at the disk or DB to display # what we would actually be doing here... # IDEA: What if copying or deleting files triggered an update to # the DB, so that we could print what the current state would be? # And the code paths that actually do modify files could either # query the DB to figure out what to do, or a single function could # "sync" the disk with the desired layout expressed in the DB state... else: click.confirm("About to upload files to s3 - do you want to continue?", abort=True) # Reinitialize DB with updated paths, since we may have just moved # files into the workdir backup_context.obj.init_db() backup_context.forward(upload) click.echo( "All done - to delete your files from Dropbox, run the rm-dropbox-files command." )
def setup_letsencrypt(site, custom_domain, bench_path): site_path = os.path.join(bench_path, "sites", site, "site_config.json") if not os.path.exists(os.path.dirname(site_path)): print "No site named "+site return if custom_domain: domains = get_domains(site, bench_path) for d in domains: if (isinstance(d, dict) and d['domain']==custom_domain): print "SSL for Domain {0} already exists".format(custom_domain) return if not custom_domain in domains: print "No custom domain named {0} set for site".format(custom_domain) return click.confirm('Running this will stop the nginx service temporarily causing your sites to go offline\n' 'Do you want to continue?', abort=True) if not get_config(bench_path).get("dns_multitenant"): print "You cannot setup SSL without DNS Multitenancy" return create_config(site, custom_domain) run_certbot_and_setup_ssl(site, custom_domain, bench_path) setup_crontab()
def make_and_push_tag(version): """Tag the current commit and push that tag to origin""" click.confirm( "Push tag '%s' to origin?" % version, default=True, abort=True ) run(['git', 'tag', "-s", "v%s" % version], check=True) run(['git', 'push', '--tags', 'origin'], check=True)
def main(earliest_night, latest_night, data_dir, jar, xml, db, out, queue, walltime, engine, num_runs, vmem, log_level, port, source, conditions, max_delta_t, local, password): level=logging.INFO if log_level is 'DEBUG': level = logging.DEBUG elif log_level is 'WARN': level = logging.WARN elif log_level is 'INFO': level = logging.INFO logging.captureWarnings(True) logging.basicConfig(format=('%(asctime)s - %(name)s - %(levelname)s - ' + '%(message)s'), level=level) jarpath = os.path.abspath(jar) xmlpath =os. path.abspath(xml) outpath = os.path.abspath(out) erna.ensure_output(out) db_path = os.path.abspath(db) output_directory = os.path.dirname(outpath) #create dir if it doesnt exist os.makedirs(output_directory, exist_ok=True) logger.info("Writing output data to {}".format(out)) factdb = sqlalchemy.create_engine("mysql+pymysql://factread:{}@129.194.168.95/factdata".format(password)) data_conditions=dcc.conditions[conditions] df_runs = erna.load(earliest_night, latest_night, data_dir, source_name=source, timedelta_in_minutes=max_delta_t, factdb=factdb, data_conditions=data_conditions) logger.info("Would process {} jobs with {} runs per job".format(len(df_runs)//num_runs, num_runs)) click.confirm('Do you want to continue processing and start jobs?', abort=True) job_list = make_jobs(jarpath, xmlpath, db_path, output_directory, df_runs, engine, queue, vmem, num_runs, walltime) job_outputs = gridmap.process_jobs(job_list, max_processes=len(job_list), local=local) erna.collect_output(job_outputs, out, df_runs)
def collect_hosts(): """ Collect host information from user. This will later be filled in using ansible. Returns: a list of host information collected from the user """ click.clear() click.echo('***Host Configuration***') message = """ The OpenShift Master serves the API and web console. It also coordinates the jobs that have to run across the environment. It can even run the datastore. For wizard based installations the database will be embedded. It's possible to change this later using etcd from Red Hat Enterprise Linux 7. Any Masters configured as part of this installation process will also be configured as Nodes. This is so that the Master will be able to proxy to Pods from the API. By default this Node will be unscheduleable but this can be changed after installation with 'oadm manage-node'. The OpenShift Node provides the runtime environments for containers. It will host the required services to be managed by the Master. http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master http://docs.openshift.com/enterprise/3.0/architecture/infrastructure_components/kubernetes_infrastructure.html#node """ click.echo(message) hosts = [] more_hosts = True ip_regex = re.compile(r'^\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}$') while more_hosts: host_props = {} hostname_or_ip = click.prompt('Enter hostname or IP address:', default='', value_proc=validate_prompt_hostname) if ip_regex.match(hostname_or_ip): host_props['ip'] = hostname_or_ip else: host_props['hostname'] = hostname_or_ip host_props['master'] = click.confirm('Will this host be an OpenShift Master?') host_props['node'] = True rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?', type=click.Choice(['rpm', 'container']), default='rpm') if rpm_or_container == 'container': host_props['containerized'] = True else: host_props['containerized'] = False host = Host(**host_props) hosts.append(host) more_hosts = click.confirm('Do you want to add additional hosts?') return hosts
def edit_history(version): """Interactively edit HISTORY.rst""" click.echo( "Edit HISTORY.rst to add changelog and release date for %s" % version ) edit('HISTORY.rst') click.confirm("Is HISTORY.rst up to date?", default=True, abort=True)
def destroy(cli_context, cluster_name, assume_yes, ec2_region, ec2_vpc_id): """ Destroy a cluster. """ provider = cli_context.obj['provider'] option_requires( option='--provider', conditional_value='ec2', requires_all=['--ec2-region'], scope=locals()) if provider == 'ec2': cluster = ec2.get_cluster( cluster_name=cluster_name, region=ec2_region, vpc_id=ec2_vpc_id) else: raise UnsupportedProviderError(provider) if not assume_yes: cluster.print() click.confirm( text="Are you sure you want to destroy this cluster?", abort=True) logger.info("Destroying {c}...".format(c=cluster.name)) cluster.destroy()
def stop(cli_context, cluster_name, ec2_region, ec2_vpc_id, assume_yes): """ Stop an existing, running cluster. """ provider = cli_context.obj['provider'] option_requires( option='--provider', conditional_value='ec2', requires_all=['--ec2-region'], scope=locals()) if provider == 'ec2': cluster = ec2.get_cluster( cluster_name=cluster_name, region=ec2_region, vpc_id=ec2_vpc_id) else: raise UnsupportedProviderError(provider) cluster.stop_check() if not assume_yes: cluster.print() click.confirm( text="Are you sure you want to stop this cluster?", abort=True) logger.info("Stopping {c}...".format(c=cluster_name)) cluster.stop() logger.info("{c} is now stopped.".format(c=cluster_name))
def handle_merge_error(e, source, target): print('-'*80) print('Error when merging {source} into {target}'.format(source=source, target=target)) print(e) print('You can open a new terminal, try to manually resolve the conflict/error and continue') print('-'*80) click.confirm('Have you manually resolved the error?', abort=True)
def attach(gandi, disk, vm, position, read_only, background, force): """ Attach disk to vm. disk can be a disk name, or ID vm can be a vm name, or ID """ if not force: proceed = click.confirm("Are you sure you want to attach disk '%s'" " to vm '%s'?" % (disk, vm)) if not proceed: return disk_info = gandi.disk.info(disk) attached = disk_info.get('vms_id', False) if attached and not force: gandi.echo('This disk is still attached') proceed = click.confirm('Are you sure you want to detach %s?' % disk) if not proceed: return result = gandi.disk.attach(disk, vm, background, position, read_only) if background and result: gandi.pretty_echo(result) return result
def ssh(monitor, private_ip, keys_dir, ssh_user, tmux_all, no_pem): """ SSH into ec2 servers. """ instances = monitor.instances if instances: if tmux_all: click.confirm( 'Do you want to ssh to {} instances'.format( len(instances)), abort=True) cmds = [ ' '.join(_get_ssh_cmd(instance, keys_dir, ssh_user, private_ip, no_pem)) for instance in instances] uid = str(uuid.uuid4())[:6] session_name = '{}-{}'.format(NAME, uid) subprocess.call( ['tmux', 'new-session', '-s', session_name, '-d', cmds[0]]) for cmd in cmds[1:]: subprocess.call( ['tmux', 'split-window', '-t', session_name, cmd]) subprocess.call( ['tmux', 'select-layout', '-t', session_name, 'even-vertical']) subprocess.call(['tmux', 'attach', '-t', session_name]) else: echo_instances(instances, True) index = click.prompt( 'Please select an instance from the list', type=click.IntRange(0, len(instances)), default=0) instance = instances[index] cmd = _get_ssh_cmd( instance, keys_dir, ssh_user, private_ip, no_pem) subprocess.call(cmd) else: click.echo('No instances available!')
def reset_secret_key(force: bool): if not force: click.echo('Resetting the secret key will cause all user sessions to be reset.') click.confirm('Do you want to continue?') create_secret_key() click.echo('Secret key reset.')
def add_to_spotify(db, spotify, album, original_artist, original_album): album = spotify.album(album["uri"]) tracks = album["tracks"] track_ids = [t["uri"] for t in tracks["items"]] while tracks["next"]: tracks = spotify.next(tracks) track_ids.extend(t["uri"] for t in tracks["items"]) click.echo("Adding {0} tracks to Spotify...".format(len(track_ids))) for chunk in chunked(track_ids, 50): response = spotify.current_user_saved_tracks_add(chunk) if response is not None: click.secho("F**k, something broke:") pprint(response) click.confirm("Continue?", abort=True) return cursor = db.cursor() cursor.execute( """UPDATE collection SET complete = 1 WHERE artist = ? AND album = ?""", [original_artist, original_album], ) db.commit() click.secho("Done ", fg="green", nl=False) time.sleep(0.25)
def enumerate(ctx, config, **kwargs): """ Query all properties of a given device """ devices = config.devices.find_or_create(kwargs) ## # @todo Make concurrency configurable queue = WorkQueue(concurrency=5) if not devices and not config.quiet: click.echo('No devices were matched') ctx.exit(1) if len(devices) > 1: message = "Parameters specified match %i devices. Are you sure?" % len(devices) if not config.assumeyes: click.confirm(message, default=False, abort=True) ## # Devices tend to be listed sequentially in config. Shuffle them in an # attempt to distribute the load across potential controllers. # @todo Perhaps this should be configurable? shuffle(devices) [ queue.append(device.enumerate) for device in devices] queue.execute() click.echo(config.formatter.devices(devices, summary=False))
def request_access_interactive(region, odd_host): region = click.prompt('AWS region', default=region) odd_host = click.prompt('Odd SSH bastion hostname', default=odd_host) all_instances = piu.utils.list_running_instances(region, []) stack_instances = [instance for instance in all_instances if instance.name and instance.stack_name and instance.stack_version] instance_count = len(stack_instances) if instance_count == 0: raise click.ClickException('No running instances were found.') stack_instances.sort(key=operator.attrgetter('stack_name', 'stack_version')) print() table_entries = [dict(index=idx, **instance._asdict()) for idx, instance in enumerate(stack_instances, start=1)] print_table( 'index name stack_name stack_version private_ip instance_id'.split(), table_entries) print() if instance_count > 1: allowed_choices = ["{}".format(n) for n in range(1, instance_count + 1)] instance_index = int(click.prompt('Choose an instance (1-{})'.format(instance_count), type=click.Choice(allowed_choices))) - 1 else: click.confirm('Connect to {}?'.format(stack_instances[0].name), default=True, abort=True) instance_index = 0 host = stack_instances[instance_index].private_ip reason = click.prompt('Reason', default='Troubleshooting') return (host, odd_host, reason)
def unignore(alias, chapters): """Unignore chapters for a series. Enter one or more chapters after the alias to mark them as new. Enter the chapter identifiers as they are listed when using the chapters command. To unignore all of the chapters for a particular series, use the word "all" in place of the chapters. """ s = db.Series.alias_lookup(alias) query = db.session.query(db.Chapter).filter(db.Chapter.series == s, db.Chapter.downloaded == -1) if len(chapters) == 1 and chapters[0].lower() == 'all': click.echo('Unignoring {} chapters for {}'.format(len(s.chapters), s.name)) click.confirm('Do you want to continue', prompt_suffix='? ', abort=True) else: query = query.filter(db.Chapter.chapter.in_(chapters)) chapters = [x.to_object() for x in query.all()] for chapter in chapters: chapter.mark_new() if len(chapters) == 1: output.chapter('Unignored chapter {} for {}'.format( chapters[0].chapter, s.name )) else: output.series('Unignored {} chapters for {}'.format( len(chapters), s.name ))
def lesson(ctx, lesson_id): """ Run tests to check given LESSON_ID. """ lessons = ctx.obj['lessons'] lesson = lessons[lesson_id] if lesson['status'] == 'complete': click.confirm( "This less was already completed. Do you want to re-run?", abort=True) click.secho("Running tests for lesson {0} {1}...".format( lesson_id, lesson['title']), fg='blue') result = run_lesson(lesson['test_file']) if result: lessons[lesson_id]['status'] = 'complete' click.secho("Good job!", fg='green') else: lessons[lesson_id]['status'] = 'in-progress' click.secho("\nTest: tutorial/{0}".format(lesson.get('test_file')), fg='blue') click.secho("Hint: {0}".format(lesson.get('hint')), fg='blue') click.secho("URL: {0}".format(lesson.get('url', 'n/a')), fg='blue') save_lesson_statuses(ctx.obj['status_filename'], lessons) if not result: ctx.exit(2)
def inject(): """Starts the injection of base dotfiles. Injection is used to describe linking and copying.""" mappings = dotfiles.get_all_mappings() status_mappings = dotfiles.generate_injection_statuses(mappings) injection_statuses_not_ok = [ ms for ms in status_mappings if not mapping.okay_status(ms.injection_status) ] if injection_statuses_not_ok: raise click.ClickException("\n" + "\n".join( "{}: {}".format(status.name, ", ".join(str(m) for m in mappings)) for status, mappings in dotfiles.status_mappings( injection_statuses_not_ok).items())) injections_to_perform = [ ms for ms in status_mappings if ms.injection_status is mapping.InjectionStatus.CanInject ] if not injections_to_perform: click.secho("No actions to perform.", fg='green') raise click.Abort() click.confirm( "Confirm to inject the following " "mappings:\n {}\n".format( "\n ".join(str(m.mapping) for m in injections_to_perform)), abort=True) injection_method = mapping.injection_method_picker( constants.DEFAULT_METHOD) injection_method([ms.mapping for ms in injections_to_perform])
def purge(force, queue): from sentry.celery import app for q in app.conf.CELERY_QUEUES: if q.name == queue: queue = q break else: raise click.ClickException('unknown queue: %r' % queue) from sentry.monitoring.queues import get_backend_for_celery try: backend = get_backend_for_celery(app) except KeyError as e: raise click.ClickException('unknown broker type: %r' % e.message) size = backend.get_size(queue) if size == 0: click.echo('Queue is empty, nothing to purge', err=True) return if not force: click.confirm('Are you sure you want to purge %d messages from the queue %r?' % (size, queue.name), abort=True) click.echo('Poof, %d messages deleted' % backend.purge_queue(queue), err=True)
def remove(source): if source not in injections: raise click.ClickException( 'Injection `{}` does not exist'.format(source)) with dotfiles.acquire_mapping_json(writeable=True) as mapping_json: mappings = mapping_json['mappings'] for i, obj in enumerate(mappings): if obj.get('source') == source: location = i else: click.ClickException( 'Injection `{}` does not exist within json.'.format(source)) raw_mapping = mappings[location] mp = mapping.Mapping( raw_mapping.get('source'), raw_mapping.get('destination')) if mp.injection_status is mapping.InjectionStatus.AlreadyInjected: click.confirm( "Mapping `{}` is already injected. Do you want to eject?". format(source), abort=True) click.secho("Unlinking `{}` ... ".format(mp.destination), nl=False) os.unlink(mp.user_destination) click.secho("√", fg='green') click.secho('Removing injection `{}` ... '.format(source), nl=False) del mappings[location] click.secho('√', fg='green')
def delete(jobs_names, base_dir, confirm): """ Delete jobs on the Jenkins server. """ jenkins_url = conf.get(base_dir, ['server', 'location']) if confirm and jobs_names: question = click.style(click.wrap_text( 'Are you sure you want to delete the following jobs on the ' 'Jenkins server?' ), fg='red', bold=True) jobs_list = '\n'.join(' %s' % n for n in jobs_names) click.confirm('%s\n\n%s\n\n' % (question, jobs_list), abort=True) exit_code = 0 for name in jobs_names: try: jenkins_url = jenkins_api.handle_auth( base_dir, jenkins_api.delete_job, jenkins_url, name ) except requests.HTTPError as exc: if exc.response.status_code == 404: click.secho('%s was not found' % name, fg='red') exit_code = 1 sys.exit(exit_code)
def quickstart(): """Quickstart wizard for setting up twtxt.""" width = click.get_terminal_size()[0] width = width if width <= 79 else 79 click.secho("twtxt - quickstart", fg="cyan") click.secho("==================", fg="cyan") click.echo() help_text = "This wizard will generate a basic configuration file for twtxt with all mandatory options set. " \ "Have a look at the README.rst to get information about the other available options and their meaning." click.echo(textwrap.fill(help_text, width)) click.echo() nick = click.prompt("➤ Please enter your desired nick", default=os.environ.get("USER", "")) twtfile = click.prompt("➤ Please enter the desired location for your twtxt file", "~/twtxt.txt", type=click.Path()) disclose_identity = click.confirm("➤ Do you want to disclose your identity? Your nick and URL will be shared", default=False) click.echo() add_news = click.confirm("➤ Do you want to follow the twtxt news feed?", default=True) conf = Config.create_config(nick, twtfile, disclose_identity, add_news) open(os.path.expanduser(twtfile), "a").close() click.echo() click.echo("✓ Created config file at '{0}'.".format(click.format_filename(conf.config_file)))
def solve(ctx, lesson_id, yes, view_only): """ View / copy LESSON_ID into place. """ lesson = ctx.obj['lessons'][lesson_id] click.echo("Solution for lesson {0} {1}:\n".format(lesson_id, lesson['title'])) source_file = os.path.join('solutions/', lesson['test_file']) dest_file = 'click_tutorial/cli.py' click.secho(79*'-', fg='blue') with open(source_file) as solution_file: click.secho(solution_file.read(), fg='green') click.secho(79*'-', fg='blue') if view_only: return if not yes: click.confirm("NOTE: If you proceed you will lose any changes you have made to {0}.\n" "Overwrite {0} with solution?".format(dest_file), abort=True) try: click.echo("copy: {0} -> {1}".format(source_file, dest_file)) shutil.copyfile(source_file, dest_file) except IOError as e: click.secho(str(e), fg='red') ctx.exit(1) click.echo("You may now view the solution file at click_tutorial/cli.py\n" \ "or run the tests for the lesson with:\n\n" \ "tutorial lesson {0}".format(lesson_id))
def entry(title, episode, audio): """ Creating file-relative-to-cwd.md Copying audio to file-relative-to-cwd.mp3 """ e = entry_factory(title, episode, audio) post_text = CONTENT / e.markdown post_audio = CONTENT / 'episodes' / e.audio if (not post_text.exists() or click.confirm('{} already exists. Override?'.format(post_text), abort=True)): click.echo('Writing entry: {}'.format(post_text)) e.save(post_text) click.echo('Done') if audio: if (not post_audio.exists() or click.confirm('{} already exists. Override?'.format(post_audio), abort=True)): Path(audio).copy(post_audio) click.echo('Done') else: click.echo('Doing nothing. Entry already exists: {}'.format(post_audio)) else: click.echo('No audio defined.')
def profile_keygen(size): userdata = get_userdata() if 'rsa' in userdata: click.confirm('This profile already has an RSA key pair. Do you want to replace it?', abort=True) click.confirm( 'Are you sure? This make make any existing password databases inaccessible and cannot be undone!', abort=True ) password = click.prompt('Password (optional)', hide_input=True, default='') if password: confirm_password = click.prompt('Again', hide_input=True) if password != confirm_password: raise click.UsageError("Passwords don't match!") click.echo('Generating key pair. This may take a while...') key = RSA.generate(size) public_key = key.publickey() userdata['rsa'] = { 'key': b64encode(key.exportKey('PEM', passphrase=password or None)).decode(), 'public_key': b64encode(public_key.exportKey('PEM')).decode() } write_userdata(userdata) click.echo('Key pair created. Your public key is:\n\n{0}'.format(public_key.exportKey('OpenSSH').decode()))
def merge(watson, frames_with_conflict, force): """ Perform a merge of the existing frames with a conflicting frames file. When storing the frames on a file hosting service, there is the possibility that the frame file goes out-of-sync due to one or more of the connected clients going offline. This can cause the frames to diverge. If the `--force` command is specified, the merge operation will automatically be performed. The only argument is a path to the the conflicting `frames` file. Merge will output statistics about the merge operation. Example: \b $ watson merge frames-with-conflicts 120 frames will be left unchanged 12 frames will be merged 3 frame conflicts need to be resolved To perform a merge operation, the user will be prompted to select the frame they would like to keep. Example: \b $ watson merge frames-with-conflicts --force 120 frames will be left unchanged 12 frames will be merged 3 frame conflicts need to be resolved Will resolve conflicts: frame 8804872: < { < "project": "tailordev", < "start": "2015-07-28 09:33:33", < "stop": "2015-07-28 10:39:36", < "tags": [ < "intern", < "daily-meeting" < ] < } --- > { > "project": "tailordev", > "start": "2015-07-28 09:33:33", > "stop": "**2015-07-28 11:39:36**", > "tags": [ > "intern", > "daily-meeting" > ] > } Select the frame you want to keep: left or right? (L/r) """ original_frames = watson.frames conflicting, merging = watson.merge_report(frames_with_conflict) # find the length of the largest returned list, then get the number of # digits of this length dig = len(str(max(len(original_frames), len(merging), len(conflicting)))) click.echo("{:<{width}} frames will be left unchanged".format( len(original_frames) - len(conflicting), width=dig)) click.echo("{:<{width}} frames will be merged".format(len(merging), width=dig)) click.echo("{:<{width}} frames will need to be resolved".format( len(conflicting), width=dig)) # No frames to resolve or merge. if not conflicting and not merging: return # Confirm user would like to merge if not force and not click.confirm("Do you want to continue?"): return if conflicting: click.echo("Will resolve conflicts:") date_format = 'YYYY-MM-DD HH:mm:ss' for conflict_frame in conflicting: original_frame = original_frames[conflict_frame.id] # Print original frame original_frame_data = { 'project': original_frame.project, 'start': original_frame.start.format(date_format), 'stop': original_frame.stop.format(date_format), 'tags': original_frame.tags } click.echo("frame {}:".format(style('short_id', original_frame.id))) click.echo(u"{}".format('\n'.join('<' + line for line in json.dumps( original_frame_data, indent=4, ensure_ascii=False).splitlines()))) click.echo("---") # make a copy of the namedtuple conflict_frame_copy = conflict_frame._replace() # highlight conflicts if conflict_frame.project != original_frame.project: project = '**' + str(conflict_frame.project) + '**' conflict_frame_copy = conflict_frame_copy._replace(project=project) if conflict_frame.start != original_frame.start: start = '**' + str(conflict_frame.start.format(date_format)) + '**' conflict_frame_copy = conflict_frame_copy._replace(start=start) if conflict_frame.stop != original_frame.stop: stop = '**' + str(conflict_frame.stop.format(date_format)) + '**' conflict_frame_copy = conflict_frame_copy._replace(stop=stop) for idx, tag in enumerate(conflict_frame.tags): if tag not in original_frame.tags: conflict_frame_copy.tags[idx] = '**' + str(tag) + '**' # Print conflicting frame conflict_frame_data = { 'project': conflict_frame_copy.project, 'start': conflict_frame_copy.start.format(date_format), 'stop': conflict_frame_copy.stop.format(date_format), 'tags': conflict_frame_copy.tags } click.echo("{}".format('\n'.join('>' + line for line in json.dumps( conflict_frame_data, indent=4, ensure_ascii=False).splitlines()))) resp = click.prompt( "Select the frame you want to keep: left or right? (L/r)", value_proc=options(['L', 'r'])) if resp == 'r': # replace original frame with conflicting frame original_frames[conflict_frame.id] = conflict_frame # merge in any non-conflicting frames for frame in merging: start, stop, project, id, tags, updated_at = frame.dump() original_frames.add(project, start, stop, tags=tags, id=id, updated_at=updated_at) watson.frames = original_frames watson.frames.changed = True watson.save()
def _get_config_from_prompts(should_prompt_for_example: bool = True) -> Dict: """Ask user to provide necessary inputs. Args: should_prompt_for_example: Whether to include a prompt for example. Returns: Resulting config dictionary. """ # set output directory to the current directory output_dir = os.path.abspath(os.path.curdir) # get project name project_name_prompt = _get_prompt_text( "Project Name:", "Please enter a human readable name for your new project.", "Spaces and punctuation are allowed.", start="", ) project_name = _get_user_input(project_name_prompt, default="New Kedro Project") normalized_project_name = re.sub(r"[^\w-]+", "-", project_name).lower().strip("-") # get repo name repo_name_prompt = _get_prompt_text( "Repository Name:", "Please enter a directory name for your new project repository.", "Alphanumeric characters, hyphens and underscores are allowed.", "Lowercase is recommended.", ) repo_name = _get_user_input( repo_name_prompt, default=normalized_project_name, check_input=_assert_repo_name_ok, ) # get python package_name default_pkg_name = normalized_project_name.replace("-", "_") pkg_name_prompt = _get_prompt_text( "Python Package Name:", "Please enter a valid Python package name for your project package.", "Alphanumeric characters and underscores are allowed.", "Lowercase is recommended. Package name must start with a letter " "or underscore.", ) python_package = _get_user_input(pkg_name_prompt, default=default_pkg_name, check_input=_assert_pkg_name_ok) # option for whether iris example code is included in the project if should_prompt_for_example: code_example_prompt = _get_prompt_text( "Generate Example Pipeline:", "Do you want to generate an example pipeline in your project?", "Good for first-time users. (default=N)", ) include_example = click.confirm(code_example_prompt, default=False) else: include_example = False return { "output_dir": output_dir, "project_name": project_name, "repo_name": repo_name, "python_package": python_package, "include_example": include_example, }
def _evaluate_command(self, text): """Used to run a command entered by the user during CLI operation (Puts the E in REPL) returns (results, MetaQuery) """ logger = self.logger logger.debug('sql: %r', text) all_success = True meta_changed = False # CREATE, ALTER, DROP, etc mutated = False # INSERT, DELETE, etc db_changed = False path_changed = False output = [] total = 0 # Run the query. start = time() on_error_resume = self.on_error == 'RESUME' res = self.pgexecute.run(text, self.pgspecial, exception_formatter, on_error_resume) for title, cur, headers, status, sql, success in res: logger.debug("headers: %r", headers) logger.debug("rows: %r", cur) logger.debug("status: %r", status) threshold = self.row_limit if self._should_show_limit_prompt(status, cur): click.secho('The result set has more than %s rows.' % threshold, fg='red') if not click.confirm('Do you want to continue?'): click.secho("Aborted!", err=True, fg='red') break if self.pgspecial.auto_expand: max_width = self.cli.output.get_size().columns else: max_width = None formatted = format_output( title, cur, headers, status, self.table_format, self.null_string, self.pgspecial.expanded_output, max_width) output.extend(formatted) total = time() - start # Keep track of whether any of the queries are mutating or changing # the database if success: mutated = mutated or is_mutating(status) db_changed = db_changed or has_change_db_cmd(sql) meta_changed = meta_changed or has_meta_cmd(sql) path_changed = path_changed or has_change_path_cmd(sql) else: all_success = False meta_query = MetaQuery(text, all_success, total, meta_changed, db_changed, path_changed, mutated) return output, meta_query
def edit_db(search_str): try: import readline except ImportError: pass sql = 'SELECT * FROM shows WHERE name like :search' conn = sqlite3.connect(Config.db_file) conn.row_factory = tvu.dict_factory curs = conn.cursor() values = {'search': '%{}%'.format(search_str)} results = curs.execute(sql, values) data = [] for i in results: data.append(i) if len(data) == 0: sys.exit('"%s" not found' % search_str) elif len(data) > 1: click.echo('Multiple shows found, type a number to edit.') click.echo('Type "<ctrl> c" to cancel.') click.echo() for index, show in enumerate(data): click.echo(' %s. %s' % (index + 1, show['name'])) click.echo() choice = click.prompt('Choose number', default=1, type=click.IntRange(min=1, max=len(data))) idchoice = choice - 1 if idchoice not in range(len(data)): sys.exit('Invalid choice: %s' % choice) row = data[idchoice] else: row = data[0] editcolor = 'green' if Config.is_win else 31 dirty = False is_error = False click.echo() click.echo( tvu.format_paragraphs(''' While editing a field, hit <enter> in an empty field to leave it unchanged and skip to the next one. Type "<ctrl> c" to cancel all edits. The current value is shown in ()\'s beside the field name.''')) click.echo() title = '%s' % row['name'] click.echo(tvu.style(title, bold=True)) click.echo() try: msg = tvu.style('Search engine name (%s): ', fg=editcolor) new_search_engine_name = input(msg % (row['search_engine_name'])) if not new_search_engine_name: new_search_engine_name = row['search_engine_name'] else: dirty = True msg = tvu.style('Current season (%s): ', fg=editcolor) new_season = input(msg % (row['season'])) if not new_season: new_season = str(row['season']) else: dirty = True msg = tvu.style('Last episode (%s): ', fg=editcolor) new_episode = input(msg % (row['episode'])) if not new_episode: new_episode = str(row['episode']) else: dirty = True msg = tvu.style('Status (%s): ', fg=editcolor) new_status = input(msg % (row['status'])) if not new_status: new_status = row['status'] else: dirty = True except KeyboardInterrupt: click.echo('\nDatabase edit canceled.') sys.exit(0) if dirty is False: click.echo('No changes made.') sys.exit(0) if not new_season.isdigit(): click.echo('Error: Season must be a number') is_error = True if not new_episode.isdigit(): click.echo('Error: Episode must be a number') is_error = True if new_status not in ['active', 'inactive']: click.echo('Error: Status must be either "active" or "inactive"') is_error = True if is_error: sys.exit(1) click.echo() if not click.confirm( 'Are these changes correct? (you can always change it back)', default='Y'): click.echo('Edits cancelled.') sys.exit() sql = '''UPDATE shows SET season=:season, episode=:episode, status=:status, search_engine_name=:search_engine_name WHERE thetvdb_series_id=:tvdb_id''' row_values = { 'season': new_season, 'episode': new_episode, 'status': new_status, 'search_engine_name': new_search_engine_name, 'tvdb_id': row['thetvdb_series_id'] } curs.execute(sql, row_values) conn.commit() conn.close()
def cli( suffix, target_person_size, crop=513, test_crop=513, # pylint: disable=too-many-locals, too-many-arguments only_missing=False, noswap=False, core_joints=False, human_annotations=False, up3d_fp=UP3D_FP): """Create segmentation datasets from select SMPL fits.""" np.random.seed(1) with_rlswap = not noswap if human_annotations: assert core_joints if test_crop < target_person_size or crop < target_person_size: LOGGER.critical("Too small crop size!") raise Exception("Too small crop size!") landmark_mapping = landmark_mesh_91 if core_joints: LOGGER.info("Using the core joints.") # Order is important here! This way, we maintain LSP compatibility. landmark_mapping = OrderedDict([ ('neck', landmark_mapping['neck']), ('head_top', landmark_mapping['head_top']), ]) n_landmarks = len(landmark_mapping) + 12 LOGGER.info( "Creating pose dataset with %d landmarks with target " "person size %f and suffix `%s`.", n_landmarks, target_person_size, suffix) assert ' ' not in suffix dset_fromroot = path.join(str(n_landmarks), str(target_person_size), suffix) dset_fp = path.join(DSET_ROOT_FP, dset_fromroot) if path.exists(dset_fp): if not click.confirm("Dataset folder exists: `%s`! Continue?" % (dset_fp)): return else: os.makedirs(dset_fp) LOGGER.info("Creating list files...") list_fp = path.join(path.dirname(__file__), '..', 'training', 'list') if not path.exists(list_fp): os.makedirs(list_fp) train_list_f = open( path.join( list_fp, 'train_%d_%s_%s.txt' % (n_landmarks, target_person_size, suffix)), 'w') val_list_f = open( path.join( list_fp, 'val_%d_%s_%s.txt' % (n_landmarks, target_person_size, suffix)), 'w') train_val_list_f = open( path.join( list_fp, 'trainval_%d_%s_%s.txt' % (n_landmarks, target_person_size, suffix)), 'w') test_list_f = open( path.join( list_fp, 'test_%d_%s_%s.txt' % (n_landmarks, target_person_size, suffix)), 'w') scale_f = open( path.join( list_fp, 'scale_%d_%s_%s.txt' % (n_landmarks, target_person_size, suffix)), 'w') with open(path.join(up3d_fp, 'train.txt'), 'r') as f: train_spec = [line.strip() for line in f.readlines()] with open(path.join(up3d_fp, 'val.txt'), 'r') as f: val_spec = [line.strip() for line in f.readlines()] with open(path.join(up3d_fp, 'test.txt'), 'r') as f: test_spec = [line.strip() for line in f.readlines()] LOGGER.info("Processing...") list_ids = np.zeros((4, ), dtype='int') add_dataset(dset_fp, dset_fromroot, list_ids, up3d_fp, train_list_f, val_list_f, train_val_list_f, test_list_f, scale_f, train_spec, val_spec, test_spec, target_person_size, landmark_mapping, crop, test_crop, 0, only_missing=only_missing, with_rlswap=with_rlswap, human_annotations=human_annotations) train_list_f.close() val_list_f.close() train_val_list_f.close() test_list_f.close() scale_f.close() LOGGER.info("Done.")
def push(username, yes, **kwargs): """Push builds out to the repositories.""" resume = kwargs.pop('resume') resume_all = False initialize_db(config) db_factory = transactional_session_maker() composes = [] with db_factory() as session: if not resume and session.query(Compose).count(): if yes: click.echo( 'Existing composes detected: {}. Resuming all.'.format( ', '.join( [str(c) for c in session.query(Compose).all()]))) else: click.confirm( 'Existing composes detected: {}. Do you wish to resume them all?' .format(', '.join( [str(c) for c in session.query(Compose).all()])), abort=True) resume = True resume_all = True # If we're resuming a push if resume: for compose in session.query(Compose).all(): if len(compose.updates) == 0: # Compose objects can end up with 0 updates in them if the composer ejects all # the updates in a compose for some reason. Composes with no updates cannot be # serialized because their content_type property uses the content_type of the # first update in the Compose. Additionally, it doesn't really make sense to go # forward with running an empty Compose. It makes the most sense to delete them. click.echo( "{} has no updates. It is being removed.".format( compose)) session.delete(compose) continue if not resume_all: if yes: click.echo('Resuming {}.'.format(compose)) elif not click.confirm('Resume {}?'.format(compose)): continue # Reset the Compose's state and error message. compose.state = ComposeState.requested compose.error_message = '' composes.append(compose) else: updates = [] # Accept both comma and space separated request list requests = kwargs['request'].replace(',', ' ').split(' ') requests = [UpdateRequest.from_string(val) for val in requests] query = session.query(Update).filter(Update.request.in_(requests)) if kwargs.get('builds'): query = query.join(Update.builds) query = query.filter( or_(*[ Build.nvr == build for build in kwargs['builds'].split(',') ])) query = _filter_releases(session, query, kwargs.get('releases')) for update in query.all(): # Skip unsigned updates (this checks that all builds in the update are signed) update_sig_status(update) if not update.signed: click.echo( 'Warning: {update.get_title()} has unsigned builds and has been skipped' ) continue updates.append(update) composes = Compose.from_updates(updates) for c in composes: session.add(c) # We need to flush so the database knows about the new Compose objects, so the # Compose.updates relationship will work properly. This is due to us overriding the # primaryjoin on the relationship between Composes and Updates. session.flush() # Now we need to refresh the composes so their updates property will not be empty. for compose in composes: session.refresh(compose) # Now we need to sort the composes so their security property can be used to prioritize # security updates. The security property relies on the updates property being # non-empty, so this must happen after the refresh above. composes = sorted(composes) for compose in composes: click.echo('\n\n===== {} =====\n'.format(compose)) for update in compose.updates: click.echo(update.get_title()) if composes: if yes: click.echo('\n\nPushing {:d} updates.'.format( sum([len(c.updates) for c in composes]))) else: click.confirm('\n\nPush these {:d} updates?'.format( sum([len(c.updates) for c in composes])), abort=True) click.echo('\nLocking updates...') else: click.echo('\nThere are no updates to push.') composes = [c.__json__(composer=True) for c in composes] if composes: click.echo('\nSending composer.start message') bodhi.server.notifications.publish( topic='composer.start', msg=dict( api_version=2, composes=composes, resume=resume, agent=username, ), force=True, )
def confirm(msg: str, yes: bool) -> Optional[bool]: return None if yes else click.confirm(msg, abort=True)
def create_job(name, operation, filters, configfile, sweepfile): """create job Create a job object in backend that will trigger an operation on datasets filtered by the filters. Args: name (str): name of new job operation (str): name of operation to run on datasets filters (str): string filters for dataset names to run job on configfile (str): json configuration for the job sweepfile (str): sweep json to launch a suite of jobs """ from cli.datasets import filter_datasets from cli.jobs import create_new_job datasets = [] for dfilter in filters: try: with Loader(f"Filtering datasets by '{dfilter}'..."): filtered_datasets = filter_datasets(dfilter) filtered_datasets_names = [*filtered_datasets.keys()] click.echo( f"Filtered datasets by filter '{dfilter}':\n{filtered_datasets_names}" ) datasets.append(filtered_datasets.values()) except requests.exceptions.HTTPError as e: click.secho(f"Failed to filter datsets {e}", fg="red", err=True) job_configs = [] if configfile: config = read_json(configfile) job_configs.append(config) click.echo(f"Parsed config file {configfile} : {config}") elif sweepfile: sweep_config = read_json(sweepfile) try: configs = resolve_sweep(sweep_config) except Exception as e: click.secho(f"Failed to resolve sweep file {sweepfile} {e}", fg="yellow", err=True) return job_configs.extend(configs) click.echo(f"Parsed sweep file {sweepfile} : {sweep_config}") else: job_configs.append(dict()) click.confirm(f"Launch {len(job_configs)} jobs?", abort=True) for i, config in enumerate(job_configs): job_name = name if i == 0 else f"{name} {i}" try: create_new_job(job_name, operation, config, datasets) click.secho( f"Created {operation} job '{job_name}' with config {config}", fg="green") except requests.exceptions.HTTPError as e: click.secho(f"Failed to create job: {e}", fg="red", err=True) click.echo(f"Finished creating {len(job_configs)} jobs with name '{name}'")
def set_up_environment(base_dir, experiment, train_number, predict_number, graph_number, db_host, mount_dirs, singularity, queue, clean_up): input_params = locals() train_files = {} graph_setup_dir = os.path.join( os.path.join(base_dir, experiment), "03_graph/setup_t{}_p{}_g{}".format(train_number, predict_number, graph_number)) predict_setup_dir = os.path.join( os.path.join(base_dir, experiment), "02_predict/setup_t{}_p{}".format(train_number, predict_number)) train_setup_dir = os.path.join(os.path.join(base_dir, experiment), "01_train/train_{}".format(train_number)) if clean_up: if __name__ == "__main__": if click.confirm( 'Are you sure you want to remove {} and all its contents?'. format(graph_setup_dir), default=False): rmtree(graph_setup_dir) else: print("Abort clean up") if not os.path.exists(predict_setup_dir): raise ValueError("No prediction at {}".format(predict_setup_dir)) if not os.path.exists(graph_setup_dir): os.makedirs(graph_setup_dir) else: if __name__ == "__main__": if click.confirm( 'Graph setup {} exists already, overwrite?'.format( graph_setup_dir), default=False): rmtree(graph_setup_dir) os.makedirs(graph_setup_dir) else: print("Abort.") return else: raise ValueError( "Graph setup exists already, choose different graph number or clean up." ) copyfile(os.path.join(predict_setup_dir, "predict_config.ini"), os.path.join(graph_setup_dir, "predict_config.ini")) copyfile(os.path.join(predict_setup_dir, "data_config.ini"), os.path.join(graph_setup_dir, "data_config.ini")) copyfile("./graph/graph.py", os.path.join(graph_setup_dir, "graph.py")) worker_config = create_worker_config(mount_dirs, singularity, queue) graph_config = create_graph_config(graph_number) with open(os.path.join(graph_setup_dir, "worker_config.ini"), "w+") as f: worker_config.write(f) with open(os.path.join(graph_setup_dir, "graph_config.ini"), "w+") as f: graph_config.write(f)
def delete( urn: str, force: bool, soft: bool, env: str, platform: str, entity_type: str, query: str, registry_id: str, dry_run: bool, include_removed: bool, ) -> None: """Delete metadata from datahub using a single urn or a combination of filters""" cli_utils.test_connectivity_complain_exit("delete") # one of urn / platform / env / query must be provided if not urn and not platform and not env and not query and not registry_id: raise click.UsageError( "You must provide either an urn or a platform or an env or a query for me to delete anything" ) # default query is set to "*" if not provided query = "*" if query is None else query if not force and not soft and not dry_run: click.confirm( "This will permanently delete data from DataHub. Do you want to continue?", abort=True, ) if urn: # Single urn based delete session, host = cli_utils.get_session_and_host() entity_type = guess_entity_type(urn=urn) logger.info(f"DataHub configured with {host}") deletion_result: DeletionResult = delete_one_urn_cmd( urn, soft=soft, dry_run=dry_run, entity_type=entity_type, cached_session_host=(session, host), ) if not dry_run: if deletion_result.num_records == 0: click.echo(f"Nothing deleted for {urn}") else: click.echo( f"Successfully deleted {urn}. {deletion_result.num_records} rows deleted" ) elif registry_id: # Registry-id based delete if soft and not dry_run: raise click.UsageError( "Soft-deleting with a registry-id is not yet supported. Try --dry-run to see what you will be deleting, before issuing a hard-delete using the --hard flag" ) deletion_result = delete_for_registry(registry_id=registry_id, soft=soft, dry_run=dry_run) else: # log warn include_removed + hard is the only way to work if include_removed and soft: logger.warn( "A filtered delete including soft deleted entities is redundant, because it is a soft delete by default. Please use --include-removed in conjunction with --hard" ) # Filter based delete deletion_result = delete_with_filters( env=env, platform=platform, dry_run=dry_run, soft=soft, entity_type=entity_type, search_query=query, force=force, include_removed=include_removed, ) if not dry_run: message = "soft delete" if soft else "hard delete" click.echo( f"Took {(deletion_result.end_time_millis-deletion_result.start_time_millis)/1000.0} seconds to {message} {deletion_result.num_records} rows for {deletion_result.num_entities} entities" ) else: click.echo( f"{deletion_result.num_entities} entities with {deletion_result.num_records if deletion_result.num_records != UNKNOWN_NUM_RECORDS else 'unknown'} rows will be affected. Took {(deletion_result.end_time_millis-deletion_result.start_time_millis)/1000.0} seconds to evaluate." ) if deletion_result.sample_records: click.echo( tabulate(deletion_result.sample_records, RUN_TABLE_COLUMNS, tablefmt="grid"))
import os import click import requests import bz2 import zipfile, io dirname, _ = os.path.split(os.path.abspath(__file__)) if click.confirm("Setup ROUGE?", default=True): print( f'Please run the following command and add it to your startup script: \n export ROUGE_HOME={os.path.join(dirname, "summ_eval/ROUGE-1.5.5/")}' ) if click.confirm("Setup SuPERT?", default=True): print( f'Please run the following command and add it to your startup script: \n export PYTHONPATH=$PYTHONPATH:{os.path.join(dirname, "summ_eval/")}' ) if click.confirm("Download METEOR jar?", default=True): if not os.path.exists(os.path.join(dirname, "summ_eval/meteor-1.5.jar")): url = 'https://github.com/Maluuba/nlg-eval/blob/master/nlgeval/pycocoevalcap/meteor/meteor-1.5.jar?raw=true' r = requests.get(url) with open(os.path.join(dirname, "summ_eval/meteor-1.5.jar"), "wb") as outputf: outputf.write(r.content) else: print("METEOR jar already downloaded!") if click.confirm("Download embeddings for S3 and ROUGE-WE metrics?", default=True): if not os.path.exists(os.path.join(dirname, "summ_eval/embeddings")):
def transfer(api): '''Massive datasets or reuses transfer''' header(transfer.__doc__) me = api.get('me') is_admin = 'admin' in me['roles'] # Prompt user for object type type_choice = prompt_choices('Transfer types', *TYPE_CHOICES) # Prompt user for source source_choices = ADMIN_SOURCE_CHOICES if is_admin else SOURCE_CHOICES source_choice = prompt_choices('Transfer from ?', *source_choices) if source_choice == MY_ORGS: org_choices = enumerate((o['name'] for o in me['organizations']), 1) org_choice = prompt_choices('Your organizations', *org_choices) org_index = int(org_choice) - 1 source = me['organizations'][org_index] elif source_choice == ANY_USER: source = suggest.users(api) elif source_choice == ANY_ORG: source = suggest.organizations(api) # Prompt user for target target_choice = prompt_choices('Target types', *TARGET_CHOICES) if target_choice == AN_USER: target = suggest.users(api) elif target_choice == AN_ORG: target = suggest.organizations(api) # Prompt user for message message = click.prompt('Please enter the transfer reason') # Fetch items qs = {'owner': source['id']} if source_choice in (MINE, ANY_USER) else {'organization': source['id']} qs['page_size'] = '1000' # Try to fetch once endpoint = 'datasets/' if type_choice == IS_DATASET else 'reuses/' items = api.get(endpoint, fields='data{id,slug,title,owner,organization},total', **qs) # Display a summary and ask for confirmation if source_choice == MINE: source_label = 'your user' elif source_choice in (MY_ORGS, ANY_ORG): source_label = '"{name}" organization'.format(**source) else: source_label = '"{first_name} {last_name}" user'.format(**source) if target_choice == AN_ORG: target_label = '"{name}" organization'.format(**target) else: target_label = '"{first_name} {last_name}" user'.format(**target) label_arrow('Summary', dedent(''' Will transfer all {type} ({total}) from {source} to {target}. The transfer reason is: {message}''').format( type=white('datasets' if type_choice == IS_DATASET else 'reuses'), source=white(source_label), target=white(target_label), message=message, total=white(items['total']), )) click.confirm('Are you sure ?', abort=True) # Perform item_type = 'Dataset' if type_choice == IS_DATASET else 'Reuse' for item in items['data']: log.info('Transfering %s(%s)', item_type, item['id']) request_payload = { 'comment': message, 'recipient': { 'class': 'Organization' if target_choice == AN_ORG else 'User', 'id': target['id'], }, 'subject': { 'class': item_type, 'id': item['id'], } } request_response = api.post('transfer/', request_payload) accept_payload = { 'response': 'accept', 'comment': 'Automatically accepted by udata-cli', } transfer_url = 'transfer/{id}/'.format(**request_response) accept_reponse = api.post(transfer_url, accept_payload) msg = ( '{subject[class]}({subject[id]}) ' 'transfered to ' '{recipient[class]}({recipient[id]})' ).format(**accept_reponse) log.info(msg) success('Transfered {0} item(s)'.format(items['total']))
def login(client_id='', client_secret=''): """Authorize spotify-cli to access the Spotify API.""" # verify both creds are provided if client_id or client_secret: if client_id and client_secret: click.echo( 'Authenticating with provided Client ID and secret.\n' 'Please ensure that the URL below is listed as a valid ' 'redirect URI in your Spotify application:\n\n{}\n'.format( REDIRECT_URI)) else: click.echo('Please provide both the Client ID and secret.', err=True) return config = Spotify.get_config() if config.get('client_id') and not client_id: reuse_creds = click.confirm( 'You used a custom Client ID and secret to authenticate last time. ' 'Use these again?\n' '(Type "n" to revert to the default ID and secret)', default=True, ) if not reuse_creds: client_id = '' client_secret = '' click.echo('Removing custom client ID and secret.\n') else: client_id = config.get('client_id') client_secret = config.get('client_secret') Spotify.update_config({ 'client_id': client_id, 'client_secret': client_secret, }) # select scopes import webbrowser from PyInquirer import prompt enabled_scopes = Spotify.get_config().get('auth_scopes', []) choices = [] for scope in AUTH_SCOPES_MAPPING: if scope['value'] == 'default': continue choices.append({ 'name': scope['name'], 'checked': scope['name'] in enabled_scopes, }) click.echo('By default, spotify-cli will enable reading & ' 'modifying the playback state.\n') choice = prompt([{ 'type': 'checkbox', 'name': 'scopes', 'message': ('Please select which additional features ' 'you want to authorize.'), 'choices': choices, }]) if not choice: return # confirm additional_scopes = choice.get('scopes', []) click.echo( '\n{} features selected. This will overwite your existing credentials.' .format(len(additional_scopes))) click.confirm('Proceed with these settings?', default=True, abort=True) # handle auth and save credentials url = build_auth_url(additional_scopes, client_id) webbrowser.open(url) click.echo( '\nGo to the following link in your browser:\n\n\t{}\n'.format(url)) auth_code = input('Enter verification code: ') click.echo('\nObtaining access token...') Spotify.refresh(auth_code) Spotify.update_config({'auth_scopes': additional_scopes}) click.echo('Credentials saved to {}'.format(CREDS_PATH)) return
def sync( to_install: Iterable[InstallRequirement], to_uninstall: Iterable[InstallRequirement], dry_run: bool = False, install_flags: Optional[List[str]] = None, ask: bool = False, python_executable: Optional[str] = None, ) -> int: """ Install and uninstalls the given sets of modules. """ exit_code = 0 python_executable = python_executable or sys.executable if not to_uninstall and not to_install: log.info("Everything up-to-date", err=False) return exit_code pip_flags = [] if log.verbosity < 0: pip_flags += ["-q"] if ask: dry_run = True if dry_run: if to_uninstall: click.echo("Would uninstall:") for pkg in sorted(to_uninstall): click.echo(f" {pkg}") if to_install: click.echo("Would install:") for ireq in sorted(to_install, key=key_from_ireq): click.echo(f" {format_requirement(ireq)}") exit_code = 1 if ask and click.confirm("Would you like to proceed with these changes?"): dry_run = False exit_code = 0 if not dry_run: if to_uninstall: run( # nosec [ python_executable, "-m", "pip", "uninstall", "-y", *pip_flags, *sorted(to_uninstall), ], check=True, ) if to_install: if install_flags is None: install_flags = [] # prepare requirement lines req_lines = [] for ireq in sorted(to_install, key=key_from_ireq): ireq_hashes = get_hashes_from_ireq(ireq) req_lines.append(format_requirement(ireq, hashes=ireq_hashes)) # save requirement lines to a temporary file tmp_req_file = tempfile.NamedTemporaryFile(mode="wt", delete=False) tmp_req_file.write("\n".join(req_lines)) tmp_req_file.close() try: run( # nosec [ python_executable, "-m", "pip", "install", "-r", tmp_req_file.name, *pip_flags, *install_flags, ], check=True, ) finally: os.unlink(tmp_req_file.name) return exit_code
def update_transfer_appliance(ctx, from_json, force, wait_for_state, max_wait_seconds, wait_interval_seconds, id, transfer_appliance_label, lifecycle_state, customer_shipping_address, if_match): if isinstance(id, six.string_types) and len(id.strip()) == 0: raise click.UsageError( 'Parameter --id cannot be whitespace or empty string') if isinstance(transfer_appliance_label, six.string_types) and len( transfer_appliance_label.strip()) == 0: raise click.UsageError( 'Parameter --transfer-appliance-label cannot be whitespace or empty string' ) if not force: if customer_shipping_address: if not click.confirm( "WARNING: Updates to customer-shipping-address will replace any existing values. Are you sure you want to continue?" ): ctx.abort() kwargs = {} if if_match is not None: kwargs['if_match'] = if_match _details = {} if lifecycle_state is not None: _details['lifecycleState'] = lifecycle_state if customer_shipping_address is not None: _details['customerShippingAddress'] = cli_util.parse_json_parameter( "customer_shipping_address", customer_shipping_address) client = cli_util.build_client('dts', 'transfer_appliance', ctx) result = client.update_transfer_appliance( id=id, transfer_appliance_label=transfer_appliance_label, update_transfer_appliance_details=_details, **kwargs) if wait_for_state: if hasattr(client, 'get_transfer_appliance') and callable( getattr(client, 'get_transfer_appliance')): try: wait_period_kwargs = {} if max_wait_seconds is not None: wait_period_kwargs['max_wait_seconds'] = max_wait_seconds if wait_interval_seconds is not None: wait_period_kwargs[ 'max_interval_seconds'] = wait_interval_seconds click.echo( 'Action completed. Waiting until the resource has entered state: {}' .format(wait_for_state), file=sys.stderr) result = oci.wait_until( client, client.get_transfer_appliance(result.data.id), 'lifecycle_state', wait_for_state, **wait_period_kwargs) except oci.exceptions.MaximumWaitTimeExceeded as e: # If we fail, we should show an error, but we should still provide the information to the customer click.echo( 'Failed to wait until the resource entered the specified state. Outputting last known resource state', file=sys.stderr) cli_util.render_response(result, ctx) sys.exit(2) except Exception: click.echo( 'Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr) cli_util.render_response(result, ctx) raise else: click.echo( 'Unable to wait for the resource to enter the specified state', file=sys.stderr) cli_util.render_response(result, ctx)
async def cmd_deploy( repo: str, ref: str, environment: str, task: str, transient: bool, production: bool, description: str, force: bool, check_constraints: bool, exclude_check_run_names: Sequence[str], exclude_check_run_conclusions: Sequence[str], interactive: bool, ): tags = ", ".join(get_commit_tags(ref)) if tags: tags = f" ({tags})" print_info(f"{repo}@{ref}{tags} will be deployed to {environment}") print(f" transient {bool_to_str(transient)}") print(f" production {bool_to_str(production)}") print(f" force {bool_to_str(force)}") print(f" description {description}") async with GitHub(repo_path=repo) as gh: recent_deployment = await gh.get_recent_deployment(environment) recent_deployment_ref = recent_deployment.ref if recent_deployment else None git_log = get_git_log(recent_deployment_ref, ref) if recent_deployment_ref else None print() if git_log is not None: print("\n".join(git_log)) elif recent_deployment_ref == ref: print_info("This commit is currently deployed") else: print_info( "First Deployment to this environment, not showing the commit list" ) if interactive: print() if not click.confirm("Start deployment?"): return print_info("Creating deployment") async with GitHub(repo_path=repo) as gh: deployment_id = await deploy( gh=gh, ref=ref, environment=environment, task=task, transient=transient, production=production, description=description, check_constraints=check_constraints, exclude_check_run_names=exclude_check_run_names, exclude_check_run_conclusions=exclude_check_run_conclusions, force=force, ) print(f"::set-output name=deployment_id::{deployment_id}") print_success(f"Deployment {deployment_id} created")
def add_env_to_apps_and_version(apps_and_version, environment, apps, config_data): if apps in config_data[environment]: apps_and_version[apps][environment] = config_data[environment][apps] if __name__ == '__main__': ap = argparse.ArgumentParser() ap.add_argument("-g", "--group-var-folder", dest='group_var_folder', required=True, help="Group Var folder") ap.add_argument("-f", "--group-var-file-prefix", dest='group_var_file_prefix', required=False, help="Group Var env file prefix", default='tag_env_') ap.add_argument("-s", "--serverless-var-file-prefix", dest='serverless_var_file_prefix', required=False, help="Serverless Var env file prefix", default='') ap.add_argument("--force", dest='force', help="Do not ask confirmations", action='store_true') ap.add_argument("-e", "--exported-file-path", dest='exported_file_path', required=False, help="Path where to export the html file", default='./compiled-versions.html', ) args = ap.parse_args() if os.path.isdir(args.exported_file_path): raise Exception('exported-file-path cannot be a folder') if os.path.exists(args.exported_file_path) and not args.force: if not click.confirm("File already exists, ok to overwrite?"): click.echo("Exiting") exit(0) create_version_html(args.group_var_folder, args.group_var_file_prefix, args.serverless_var_file_prefix, args.exported_file_path) click.echo("All done")
def upgrade(ctx): """ Upgrade from free tier to standard tier. """ if click.confirm('Are you sure you want to upgrade to the standard tier?'): gigalixir_user.upgrade(ctx.obj['host'])
def create_task(instance, user, password, assigned_to, assignment_group, short_description, description, comments, story, state, type_, priority, planned_hours, blocked_reason, quantity, noprompt, verbose): """Creates one or more tasks for the supplied story""" if verbose >= 2: root_logger.setLevel(logging.DEBUG) elif verbose == 1: root_logger.setLevel(logging.INFO) else: root_logger.setLevel(logging.WARNING) if not instance or not user or not password: click.secho( 'error: snow instance, user or password not set via cli or in config file', fg='red', err=True) click.secho('default config file: ' + click.format_filename(config_file), fg='red', err=True) sys.exit(1) logging.info('instance: %s', instance) logging.info('user: %s', user) logging.info('password: %s', '[REDACTED]') # change DRAFT state back to '-6', because asking a user to enter a negative number is ugly if state == 0: state = -6 # if supplied a blocked_reason, set blocked to True, else False blocked = bool(blocked_reason) snow_client = pysnow.Client(instance=instance, user=user, password=password) logging.debug('snow_client: %s', snow_client) # check the story supplied exists, save sys_id, user and group for later story_fields = ['sys_id', 'assigned_to', 'assignment_group'] logging.debug('story_fields: %s', story_fields) story_query = {'number': story} logging.debug('story_query: %s', story_query) story_resource = snow_client.resource(api_path='/table/rm_story') logging.debug('story_resource: %s', story_resource) story_response = story_resource.get( fields=story_fields, query=story_query, display_value=False, exclude_reference_link=True, ) logging.debug('story_response: %s', story_response) try: story_response_one = story_response.one() except: click.secho('error: story ' + story + ', stopping', fg='red', err=True) raise logging.debug('story_response_one: %s', story_response_one) story_sys_id = story_response_one['sys_id'] logging.info('story: %s', story) logging.info('story_sys_id: %s', story_sys_id) # find owner if none supplied if not assigned_to: assigned_to = story_response_one['assigned_to'] logging.info('assigned_to: %s', assigned_to) # find group if none supplied if not assignment_group: assignment_group = story_response_one['assignment_group'] logging.info('assignment_group: %s', assignment_group) logging.info('short_description: %s', short_description) logging.info('description: %s', description) logging.info('comments: %s', comments) logging.info('state: %s', state) logging.info('type_: %s', type_) logging.info('priority: %s', priority) logging.info('planned_hours: %s', planned_hours) logging.info('blocked: %s', blocked) logging.info('blocked_reason: %s', blocked_reason) logging.info('quantity: %s', quantity) logging.info('noprompt: %s', noprompt) # make task plural if more than one ess = 's' if quantity > 1 else '' click.secho("Creating {} task{} for story {}".format(quantity, ess, story), fg='blue', err=False) if not noprompt: click.confirm("Do you want to continue?", abort=True) task_resource = snow_client.resource(api_path='/table/rm_scrum_task') logging.debug('task_resource: %s', task_resource) for _ in range(quantity): new_task_payload = { "assigned_to": assigned_to, "assignment_group": assignment_group, "short_description": short_description, "description": description, "comments": comments, "state": state, "type": type_, "priority": priority, "planned_hours": planned_hours, "blocked": blocked, "blocked_reason": blocked_reason, "parent": story_sys_id, } logging.debug('new_task_payload: %s', new_task_payload) try: createtask_response = task_resource.create( payload=new_task_payload, ) logging.debug('createtask_response: %s', createtask_response) except: click.secho('error: create task failed for story ' + story + ', stopping', fg='red', err=True) raise try: createtask_response_one = createtask_response.one() logging.debug('createtask_response_one: %s', createtask_response_one) except: click.secho('error: create task failed for story ' + story + ', stopping', fg='red', err=True) raise task = createtask_response_one['number'] click.secho("success: for story {}; created task {}".format( story, task), fg='green', err=False)
def execute_backfill_command(cli_args, print_fn, instance=None): instance = instance or DagsterInstance.get() external_pipeline = get_external_pipeline_from_kwargs(cli_args) external_repository = get_external_repository_from_kwargs(cli_args) # We should move this to use external repository # https://github.com/dagster-io/dagster/issues/2556 recon_repo = recon_repo_from_external_repo(external_repository) repo_def = recon_repo.get_definition() noprompt = cli_args.get('noprompt') pipeline_def = repo_def.get_pipeline(external_pipeline.name) # Resolve partition set all_partition_sets = repo_def.partition_set_defs + [ schedule_def.get_partition_set() for schedule_def in repo_def.schedule_defs if isinstance(schedule_def, PartitionScheduleDefinition) ] pipeline_partition_sets = [ x for x in all_partition_sets if x.pipeline_name == pipeline_def.name ] if not pipeline_partition_sets: raise click.UsageError( 'No partition sets found for pipeline `{}`'.format(pipeline_def.name) ) partition_set_name = cli_args.get('partition_set') if not partition_set_name: if len(pipeline_partition_sets) == 1: partition_set_name = pipeline_partition_sets[0].name elif noprompt: raise click.UsageError('No partition set specified (see option `--partition-set`)') else: partition_set_name = click.prompt( 'Select a partition set to use for backfill: {}'.format( ', '.join(x.name for x in pipeline_partition_sets) ) ) partition_set = next((x for x in pipeline_partition_sets if x.name == partition_set_name), None) if not partition_set: raise click.UsageError('No partition set found named `{}`'.format(partition_set_name)) # Resolve partitions to backfill partitions = gen_partitions_from_args(partition_set, cli_args) # Print backfill info print_fn('\n Pipeline: {}'.format(pipeline_def.name)) print_fn('Partition set: {}'.format(partition_set.name)) print_fn(' Partitions: {}\n'.format(print_partition_format(partitions, indent_level=15))) # Confirm and launch if noprompt or click.confirm( 'Do you want to proceed with the backfill ({} partitions)?'.format(len(partitions)) ): print_fn('Launching runs... ') backfill_id = make_new_backfill_id() run_tags = merge_dicts( PipelineRun.tags_for_backfill_id(backfill_id), get_tags_from_args(cli_args), ) for partition in partitions: run = instance.create_run_for_pipeline( pipeline_def=pipeline_def, mode=partition_set.mode, solids_to_execute=frozenset(partition_set.solid_selection) if partition_set and partition_set.solid_selection else None, environment_dict=partition_set.run_config_for_partition(partition), tags=merge_dicts(partition_set.tags_for_partition(partition), run_tags), ) instance.launch_run(run.run_id, external_pipeline) # Remove once we can handle synchronous execution... currently limited by sqlite time.sleep(0.1) print_fn('Launched backfill job `{}`'.format(backfill_id)) else: print_fn(' Aborted!')
def login(token, username, password): """Login to Polyaxon.""" polyaxon_client = PolyaxonClient() if username and not token: # Use user or email / password login if not password: password = click.prompt( "Please enter your password", type=str, hide_input=True ) password = password.strip() if not password: logger.info( "You entered an empty string. " "Please make sure you enter your password correctly." ) sys.exit(1) try: body = V1Credentials(username=username, password=password) access_auth = polyaxon_client.auth_v1.login(body=body) except (ApiException, HTTPError) as e: AuthConfigManager.purge() CliConfigManager.purge() handle_cli_error(e, message="Could not login.") sys.exit(1) if not access_auth.token: Printer.print_error("Failed to login") return else: if not token: token_url = "{}/app/token".format(polyaxon_client.config.host) click.confirm( "Authentication token page will now open in your browser. Continue?", abort=True, default=True, ) click.launch(token_url) logger.info("Please copy and paste the authentication token.") token = click.prompt( "This is an invisible field. Paste token and press ENTER", type=str, hide_input=True, ) if not token: logger.info( "Empty token received. " "Make sure your shell is handling the token appropriately." ) logger.info( "See docs for help: http://polyaxon.com/docs/polyaxon_cli/commands/auth" ) return access_auth = polyaxon_sdk.models.V1Auth(token=token.strip(" ")) # Set user try: AuthConfigManager.purge() polyaxon_client = PolyaxonClient(token=access_auth.token) user = polyaxon_client.users_v1.get_user() except (ApiException, HTTPError) as e: handle_cli_error(e, message="Could not load user info.") sys.exit(1) access_token = AccessTokenConfig(username=user.username, token=access_auth.token) AuthConfigManager.set_config(access_token) polyaxon_client.config.token = access_auth.token Printer.print_success("Login successful") # Reset current cli server_versions = get_server_versions(polyaxon_client=polyaxon_client) current_version = get_current_version() log_handler = get_log_handler(polyaxon_client=polyaxon_client) CliConfigManager.reset( check_count=0, current_version=current_version, server_versions=server_versions.to_dict(), log_handler=log_handler, )
def main(args=None): """Main commandline entrypoint.""" if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser(description="Find My iPhone CommandLine Tool") parser.add_argument( "--username", action="store", dest="username", default="", help="Apple ID to Use", ) parser.add_argument( "--password", action="store", dest="password", default="", help=( "Apple ID Password to Use; if unspecified, password will be " "fetched from the system keyring." ), ) parser.add_argument( "-n", "--non-interactive", action="store_false", dest="interactive", default=True, help="Disable interactive prompts.", ) parser.add_argument( "--delete-from-keyring", action="store_true", dest="delete_from_keyring", default=False, help="Delete stored password in system keyring for this username.", ) parser.add_argument( "--list", action="store_true", dest="list", default=False, help="Short Listings for Device(s) associated with account", ) parser.add_argument( "--llist", action="store_true", dest="longlist", default=False, help="Detailed Listings for Device(s) associated with account", ) parser.add_argument( "--locate", action="store_true", dest="locate", default=False, help="Retrieve Location for the iDevice (non-exclusive).", ) # Restrict actions to a specific devices UID / DID parser.add_argument( "--device", action="store", dest="device_id", default=False, help="Only effect this device", ) # Trigger Sound Alert parser.add_argument( "--sound", action="store_true", dest="sound", default=False, help="Play a sound on the device", ) # Trigger Message w/Sound Alert parser.add_argument( "--message", action="store", dest="message", default=False, help="Optional Text Message to display with a sound", ) # Trigger Message (without Sound) Alert parser.add_argument( "--silentmessage", action="store", dest="silentmessage", default=False, help="Optional Text Message to display with no sounds", ) # Lost Mode parser.add_argument( "--lostmode", action="store_true", dest="lostmode", default=False, help="Enable Lost mode for the device", ) parser.add_argument( "--lostphone", action="store", dest="lost_phone", default=False, help="Phone Number allowed to call when lost mode is enabled", ) parser.add_argument( "--lostpassword", action="store", dest="lost_password", default=False, help="Forcibly active this passcode on the idevice", ) parser.add_argument( "--lostmessage", action="store", dest="lost_message", default="", help="Forcibly display this message when activating lost mode.", ) # Output device data to an pickle file parser.add_argument( "--outputfile", action="store_true", dest="output_to_file", default="", help="Save device data to a file in the current directory.", ) command_line = parser.parse_args(args) username = command_line.username password = command_line.password if username and command_line.delete_from_keyring: utils.delete_password_in_keyring(username) failure_count = 0 while True: # Which password we use is determined by your username, so we # do need to check for this first and separately. if not username: parser.error("No username supplied") if not password: password = utils.get_password( username, interactive=command_line.interactive ) if not password: parser.error("No password supplied") try: api = PyiCloudService(username.strip(), password.strip()) if ( not utils.password_exists_in_keyring(username) and command_line.interactive and confirm("Save password in keyring?") ): utils.store_password_in_keyring(username, password) if api.requires_2fa: # fmt: off print( "\nTwo-step authentication required.", "\nPlease enter validation code" ) # fmt: on code = input("(string) --> ") if not api.validate_2fa_code(code): print("Failed to verify verification code") sys.exit(1) print("") elif api.requires_2sa: # fmt: off print( "\nTwo-step authentication required.", "\nYour trusted devices are:" ) # fmt: on devices = api.trusted_devices for i, device in enumerate(devices): print( " %s: %s" % ( i, device.get( "deviceName", "SMS to %s" % device.get("phoneNumber") ), ) ) print("\nWhich device would you like to use?") device = int(input("(number) --> ")) device = devices[device] if not api.send_verification_code(device): print("Failed to send verification code") sys.exit(1) print("\nPlease enter validation code") code = input("(string) --> ") if not api.validate_verification_code(device, code): print("Failed to verify verification code") sys.exit(1) print("") break except PyiCloudFailedLoginException: # If they have a stored password; we just used it and # it did not work; let's delete it if there is one. if utils.password_exists_in_keyring(username): utils.delete_password_in_keyring(username) message = "Bad username or password for {username}".format( username=username, ) password = None failure_count += 1 if failure_count >= 3: raise RuntimeError(message) print(message, file=sys.stderr) for dev in api.devices: if not command_line.device_id or ( command_line.device_id.strip().lower() == dev.content["id"].strip().lower() ): # List device(s) if command_line.locate: dev.location() if command_line.output_to_file: create_pickled_data( dev, filename=(dev.content["name"].strip().lower() + ".fmip_snapshot"), ) contents = dev.content if command_line.longlist: print("-" * 30) print(contents["name"]) for key in contents: print("%20s - %s" % (key, contents[key])) elif command_line.list: print("-" * 30) print("Name - %s" % contents["name"]) print("Display Name - %s" % contents["deviceDisplayName"]) print("Location - %s" % contents["location"]) print("Battery Level - %s" % contents["batteryLevel"]) print("Battery Status- %s" % contents["batteryStatus"]) print("Device Class - %s" % contents["deviceClass"]) print("Device Model - %s" % contents["deviceModel"]) # Play a Sound on a device if command_line.sound: if command_line.device_id: dev.play_sound() else: raise RuntimeError( "\n\n\t\t%s %s\n\n" % ( "Sounds can only be played on a singular device.", DEVICE_ERROR, ) ) # Display a Message on the device if command_line.message: if command_line.device_id: dev.display_message( subject="A Message", message=command_line.message, sounds=True ) else: raise RuntimeError( "%s %s" % ( "Messages can only be played on a singular device.", DEVICE_ERROR, ) ) # Display a Silent Message on the device if command_line.silentmessage: if command_line.device_id: dev.display_message( subject="A Silent Message", message=command_line.silentmessage, sounds=False, ) else: raise RuntimeError( "%s %s" % ( "Silent Messages can only be played " "on a singular device.", DEVICE_ERROR, ) ) # Enable Lost mode if command_line.lostmode: if command_line.device_id: dev.lost_device( number=command_line.lost_phone.strip(), text=command_line.lost_message.strip(), newpasscode=command_line.lost_password.strip(), ) else: raise RuntimeError( "%s %s" % ( "Lost Mode can only be activated on a singular device.", DEVICE_ERROR, ) ) sys.exit(0)
def new(config, name, tags, readme_template, commands, clone, github, migrate, no_readme): """Create a new project""" clone_url = False # define vars project_name and project_dir project_name, project_dir = parse_project_data(name, config) # check if the project exists already and if that is the case, exit. if project_exists(project_name, project_dir, config): click.secho('Error! Project name is already used', bg='red', fg='white') sys.exit(0) # open the notepad if the user used the flag --commands if commands: INSTRUCTIONS = '# All the commands executed when a project is opened' code = click.edit(INSTRUCTIONS) else: code = '' # Add to database add_to_database(project_name, project_dir, tags, config, code) click.secho('Project added to the database', bg='green', fg='black') # print info click.echo(f'Project name: {project_name}') click.echo(f'Tags: {tags}') # Migrate files to a new folder if the user used the migrate option, then exit. if migrate: print(f'All the files from {migrate} will be migrated to the new dir') # the copytree function creates a new shutil.copytree(migrate, project_dir) exit(0) else: # Create a project folder, if they did not the migrate function! os.mkdir(project_dir) click.echo('Project directory created!') # Logic for cloning from github or with the --clone command. if github: github_repos = f'https://api.github.com/users/{github[0]}/repos' # Try to get data from github try: r = requests.get(github_repos) except requests.exceptions.RequestException as e: print(e) sys.exit(1) # reading json data data = loads(r.text) repo_name = github[1] # Create a list with the names of all the repos. user_repos = [] for repo in data: try: user_repos.append(repo['name']) except TypeError: # Failed to load data from user click.secho( f'Failed to load repository data for user "{github[0]}"', bg='red', fg='white') exit(0) while True: # Was a valid repo picked? if repo_name in user_repos: # find the clone_url. for repo in data: if repo_name == repo['name']: clone_url = repo['clone_url'] break break # if no repo with that named exists in the list else: # Alert the user! click.secho(f'{github[0]} has no repo named "{repo_name}"!', bg='red', fg='white') # Ask if they want to try again if not click.confirm('Would you like to choose from the existing repositories? ' \ '(the attempt to clone will otherwise be aborted)'): exit() # If the user said no, exit. otherwise move on: # The user chose to continue... # list all the repos: click.echo(f'Listing all repos belonging to {github[0]}') for name in user_repos: click.echo(name) repo_name = click.prompt('Enter name') elif clone: clone_url = click.prompt('Git URL') # If a clone_url was specified in the code above, try to git clone. if clone_url: os.chdir(project_dir) os.system(f'git clone {clone_url} .') click.echo(f'Project directory cloned from {clone_url}!') # If the user choose not to clone a readme is created #(unless the user used the no_readme flag) elif not no_readme: create_readme(readme_template, project_dir, project_name)
def command_cluster_destroy(ctx, profile): """ Destroys the named OpenShift cluster. If the named OpenShift cluster is currently active, it will be stopped before then being detsroyed. """ if profile not in profile_names(ctx): click.echo('Invalid: %s' % profile) ctx.exit(1) click.confirm('Destroy profile "%s"?' % profile, abort=True) # If the profile to be destroyed is the current active one then we # need to make sure it is stopped before removing anything. if profile == active_profile(ctx): click.echo('Stopping') result = execute('oc cluster down') cleanup_profile(ctx) if result.returncode == 0: click.echo('Stopped') else: click.echo('Failed: The "oc cluster down" command failed.') ctx.exit(result.returncode) # Now remove any images which were built by the cluster for this # profile by using label attached to images. click.echo('Cleaning') label = 'powershift-profile=%s' % profile command = 'docker images --filter label=%s -q' % label try: images = execute_and_capture(command) for image in images.strip().split(): command = 'docker rmi %s' % image result = execute(command) if result.returncode != 0: click.echo('Warning: Unable to delete image %s.' % image) except Exception: click.echo('Warning: Unable to query images for profile.') # Now remove any profile directory inside of the container. container_profiles_dir = '/var/lib/powershift/profiles' container_profile_dir = posixpath.join(container_profiles_dir, profile) command = [] command.append('docker run --rm -v /var:/var busybox rm -rf') command.append(container_profile_dir) command = ' '.join(command) result = execute(command) if result.returncode != 0: click.echo('Failed: Cannot delete container profile directory.') # Remove the profile directory. There may be a risk this will not # completely work if files were created in a volume which had # ownership or permissions that prevent removal. profiles = ctx.obj['PROFILES'] directory = os.path.join(profiles, profile) click.echo('Removing: %s' % directory) shutil.rmtree(directory)
def confirm(self, prompt): self.e("") click.confirm(prompt, default=True, abort=True, prompt_suffix=" ")
def edit(config, name, alternative): """Edit a project""" #Look for data in the database config.c.execute("SELECT * FROM projects WHERE name=:name", {'name': name}) collected_data = config.c.fetchone() #Check if such a project does exist, if not close the program. if not collected_data: click.echo("No project with that name exists!") exit(0) # Format the data using the format_projects function which creates an object # Note, format_projects needs a list. project = format_projects([collected_data])[0] if alternative == 'name': user_input = click.prompt('Input a new name').strip() new_name, new_dir = parse_project_data(user_input, config) if project_exists(new_name, new_dir, config): click.echo('That project name is already used!') exit(0) if click.confirm(f'Do you really want to confirm this update?'): with config.conn: config.c.execute( "UPDATE projects SET name = ? WHERE name = ? ", (new_name, project.name)) config.c.execute( "UPDATE projects SET path = ? WHERE name = ? ", (new_dir, new_name)) os.rename(project.dir, new_dir) click.echo('The changes has been made!') elif alternative == 'commands': code = click.edit(project.code) if click.confirm(f'Do you really want to confirm this update?'): with config.conn: config.c.execute( "UPDATE projects SET code = ? WHERE name = ? ", (code, project.name)) click.echo('The changes has been made!') else: click.echo('Ok. No changes has been made!') exit(0) # Update tags elif alternative == 'tags': click.echo(f'Current tags for "{project.name}": {project.tags}') click.echo('Write a list (seperated with commas followed by blank spaces) ' \ 'of all the tags that should be associated with the project.') new_tags = click.prompt('Input tags') if click.confirm( f'Do you really want to update the tags for "{project.name}"'): with config.conn: config.c.execute( "UPDATE projects SET tags = ? WHERE name = ? ", (new_tags, name)) click.echo('The changes has been made!') else: click.echo('Ok. No changes has been made!') exit(0)
def add(port): """Add a new NAT rule. Optionally, you can specify a port when you invoke this command, which will bypass the first prompt for a port number. Port *must* be an integer between 1024 and 65535. """ mgr = Manager() while True: if port is None: port = click.prompt('Enter the port number', type=int) else: click.echo('Adding a rule for port {}'.format(port)) simplefilter('ignore', ExpiredRuleMatchWarning) with catch_warnings(record=True) as w: simplefilter('always', ExpiredRuleMatchWarning) if mgr.existing_port(port): mgr.print_rules(simple=True, current_only=True) click.echo( " ** I'm sorry, that port has already been taken. Choose one that's not listed above." ) port = None continue # If w (a list) has any items in it, we got a warning about an existing rule using the specified port if len(w): mgr.print_rules(simple=True, single_port=port) click.echo(w.pop().message) if not click.confirm( '\nAre you sure you want to add a rule for port {}?'. format(port)): port = None continue if port < 1024 or port >= 65535: click.echo( ' ** Invalid port number. Must be in range 1024 <= port < 65535. Please try again.' ) port = None continue break name = click.prompt("Enter the requester's name") email = click.prompt("Enter the requester's email") ip, dest_port = None, None while ip is None: ip, dest_port = _parse_ip_input( click.prompt( 'Enter the IP address of destination machine (port optional)')) if dest_port is None: dest_port = click.prompt('Enter the port on the destination machine', type=int) expires = get_valid_expiration_date() rule = dict(in_port=port, dest_ip=ip, dest_port=dest_port, requested_by=name, email=email, expires=expires) try: mgr.add_rule(rule) except AssertionError: click.echo( 'Something went very wrong. Please contact the project maintainers with the following info:\n' 'Operation: add_rule\nValue: {}'.format(rule)) return mgr.save_rules() mgr.rewrite_script() enforce_rules_now()
def whamem(number, country, track, delay): click.clear() click.secho(intro, bg="magenta", fg="green") nexmo_client = nexmo.Client( application_id=os.environ["NEXMO_APPLICATION_ID"], private_key=os.environ["NEXMO_PRIVATE_KEY"], key=os.environ["NEXMO_API_KEY"], secret=os.environ["NEXMO_API_SECRET"], ) e164_number = False wtf_e164_message = click.style( "View information on WTF E.164 is?", bg="magenta", fg="white" ) try_number_again_message = click.style( "Want to try entering the number again?", bg="magenta", fg="white" ) while e164_number == False: insight_response = nexmo_client.get_basic_number_insight(number=number) if insight_response["status"] == 3: insight_response = nexmo_client.get_basic_number_insight( number=number, country=country ) if insight_response["status"] != 0: click.clear() click.secho(intro, bg="magenta", fg="green") click.secho( f"{number} does not appear to be a valid telephone number", bg="magenta", fg="white", ) click.secho( "It might work if you enter it in the E.164 format", bg="magenta", fg="white", ) if click.confirm(wtf_e164_message): click.launch( "https://developer.nexmo.com/concepts/guides/glossary#e-164-format" ) if click.confirm(try_number_again_message): number = click.prompt("Ok, give it to me in E.164 this time") else: raise click.BadArgumentUsage( click.style( f"{number} does not appear to be a valid number. Try entering it in the E.164 format", bg="red", fg="white", bold=True, ) ) else: e164_number = insight_response["international_format_number"] # We have a valid target number, let's get the track spotify_client_credentials_manager = SpotifyClientCredentials( client_id=os.environ["SPOTIFY_CLIENT_ID"], client_secret=os.environ["SPOTIFY_CLIENT_SECRET"], ) spotify_client = spotipy.Spotify( client_credentials_manager=spotify_client_credentials_manager ) tracks = spotify_client.search(track, limit=1, type="track") if len(tracks["tracks"]["items"]) == 0: raise click.BadOptionUsage( track, click.style(f"Can't find track: {track}", bg="red", fg="white", bold=True), ) track = tracks["tracks"]["items"][0] # Start our local ngrok tunnel try: ngrok_tunnel = requests.post( "http://localhost:4040/api/tunnels", json={"addr": 8008, "proto": "http", "name": "pvpwham", "bind_tls": True}, ).json() except requests.exceptions.ConnectionError: raise click.UsageError( click.style( "Please make sure ngrok is running", bg="red", fg="white", bold=True ) ) click.secho("## Starting the Call", bg="blue", fg="white", bold=True) nexmo_client.create_call( { "to": [{"type": "phone", "number": e164_number}], "from": {"type": "phone", "number": os.environ["NEXMO_VIRTUAL_NUMBER"]}, "answer_url": [ngrok_tunnel["public_url"]], "event_url": [f"{ngrok_tunnel['public_url']}/events"], } ) def quit_cherry(): cherrypy.engine.exit() click.secho("## Exiting NCCO Server", bg="blue", fg="white", bold=True) requests.delete("http://localhost:4040/api/tunnels/pvpwham") click.secho("## Closing tunnel", bg="blue", fg="white", bold=True) def fetch_recording(): data = cherrypy.request.json click.secho("## Fetching Call Recording", bg="green", fg="black", bold=True) recording_response = nexmo_client.get_recording(data["recording_url"]) recordingfile = f"/tmp/{data['recording_uuid']}.mp3" os.makedirs(os.path.dirname(recordingfile), exist_ok=True) with open(recordingfile, "wb") as f: f.write(recording_response) click.secho("## Call Recording Saved", bg="green", fg="black", bold=True) if click.confirm( click.style( "## Listen to your friend's anguish now?", bg="magenta", fg="white" ) ): click.launch(recordingfile) cherrypy.tools.quitcherry = cherrypy.Tool("on_end_request", quit_cherry) cherrypy.tools.fetch_recording = cherrypy.Tool("on_end_request", fetch_recording) @attr.s class NCCO(object): preview_url = attr.ib() ngrok_tunnel = attr.ib() @cherrypy.expose @cherrypy.tools.json_out() def index(self, **params): ncco_file = [ { "action": "record", "eventUrl": [f"{self.ngrok_tunnel['public_url']}/recording"], } ] if delay == "short": ncco_file.append({"action": "talk", "text": "whamageddon"}) elif delay == "long": ncco_file.append( { "action": "talk", "text": "hang up your phone or prepare to enter Whamhalla", } ) ncco_file.append( {"action": "stream", "streamUrl": [f"{self.preview_url}?t=mp3"]} ) return ncco_file @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.quitcherry() @cherrypy.tools.fetch_recording() def recording(self): click.secho("## Recording Ready", bg="green", fg="black", bold=True) return "OK" @cherrypy.expose @cherrypy.tools.json_in() def events(self): data = cherrypy.request.json click.secho( f"## Status: {data['status']}", bg="blue", fg="white", bold=True ) return "OK" click.secho("## NCCO Server Ready", bg="blue", fg="white", bold=True) cherrypy.config.update({"server.socket_port": 8008, "environment": "embedded"}) cherrypy.quickstart( NCCO(preview_url=track["preview_url"], ngrok_tunnel=ngrok_tunnel) ) click.clear() click.secho(outro, bg="cyan", fg="green") click.secho( random.choice( [ "You're a horrible person. Whamming complete", "Well done Krampus, you've just ruined your friend's Christmas", "You're totally getting coal in your stocking, consider them Wham'd", "May your Christmas Turkey be dry for what you've done to your friend.", "Your friend has been wham'd and you're on the naughty list", ] ), bg="red", fg="white", bold=True, )
def load(filename, yes): """Import a previous saved config DB dump file.""" if not yes: click.confirm('Load config from the file %s?' % filename, abort=True) command = "{} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, filename) run_command(command, display_cmd=True)
def find_best_firmware_version( client: "TrezorClient", version: Optional[str], beta: bool, bitcoin_only: bool, ) -> Tuple[str, str]: """Get the url from which to download the firmware and its expected fingerprint. When the version (X.Y.Z) is specified, checks for that specific release. Otherwise takes the latest one. If the specified version is not found, prints the closest available version (higher than the specified one, if existing). """ def version_str(version: Iterable[int]) -> str: return ".".join(map(str, version)) f = client.features releases = get_all_firmware_releases(bitcoin_only, beta, f.major_version) highest_version = releases[0]["version"] if version: want_version = [int(x) for x in version.split(".")] if len(want_version) != 3: click.echo("Please use the 'X.Y.Z' version format.") if want_version[0] != f.major_version: model = f.model or "1" click.echo(f"Warning: Trezor {model} firmware version should be " f"{f.major_version}.X.Y (requested: {version})") else: want_version = highest_version click.echo(f"Best available version: {version_str(want_version)}") # Identifying the release we will install # It may happen that the different version will need to be installed first confirm_different_version = False while True: # The want_version can be changed below, need to redefine it want_version_str = version_str(want_version) try: release = next(r for r in releases if r["version"] == want_version) except StopIteration: click.echo( f"Version {want_version_str} not found for your device.") # look for versions starting with the lowest for release in reversed(releases): closest_version = release["version"] if closest_version > want_version: # stop at first that is higher than the requested break else: raise click.ClickException("No versions were found!") # if there was no break, the newest is used click.echo( f"Closest available version: {version_str(closest_version)}") if not beta and want_version > highest_version: click.echo("Hint: specify --beta to look for a beta release.") sys.exit(1) # It can be impossible to update from a very old version directly # to the newer one, in that case update to the minimal # compatible version first # Choosing the version key to compare based on (not) being in BL mode client_version = [f.major_version, f.minor_version, f.patch_version] if f.bootloader_mode: key_to_compare = "min_bootloader_version" else: key_to_compare = "min_firmware_version" if key_to_compare in release and release[ key_to_compare] > client_version: need_version = release["min_firmware_version"] need_version_str = version_str(need_version) click.echo( f"Version {need_version_str} is required before upgrading to {want_version_str}." ) want_version = need_version confirm_different_version = True else: break if confirm_different_version: installing_different = f"Installing version {want_version_str} instead." if version is None: click.echo(installing_different) else: ok = click.confirm(installing_different + " Continue?", default=True) if not ok: sys.exit(1) return get_url_and_fingerprint_from_release(release, bitcoin_only)