def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() env = args.get("--env") or config.get("disco_aws", "default_environment") vpc = DiscoVPC.fetch_environment(environment_name=env) if not vpc: print("Environment does not exist: {}".format(env)) sys.exit(1) if args['list']: format_string = "{0:<50} {1:33} {2}" print(format_string.format("ELB Name", "Availability Zones", "ELB Id"), file=sys.stderr) for elb_info in sorted(DiscoELB(vpc).list_for_display()): print( format_string.format(elb_info['elb_name'], elb_info['availability_zones'], elb_info["elb_id"])) elif args['update']: DiscoAWS(config, env).update_elb(args['--hostclass'])
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") aws = DiscoAWS(config, environment_name=environment_name) if args.mode == "create": aws.disco_storage.create_ebs_snapshot(args.hostclass, args.size) elif args.mode == "list": for snapshot in aws.disco_storage.get_snapshots(args.hostclasses): print("{0:26} {1:13} {2:9} {3} {4:4}".format( snapshot.tags['hostclass'], snapshot.id, snapshot.status, snapshot.start_time, snapshot.volume_size)) elif args.mode == "cleanup": aws.disco_storage.cleanup_ebs_snapshots(args.keep) elif args.mode == "capture": instances = instances_from_args(aws, args) if not instances: logging.warning("No instances found") for instance in instances: return_code, output = aws.remotecmd( instance, ["sudo /opt/wgen/bin/take_snapshot.sh"], user="******") if return_code: raise Exception("Failed to snapshot instance {0}:\n {1}\n".format(instance, output)) logging.info("Successfully snapshotted %s", instance) elif args.mode == "delete": for snapshot_id in args.snapshots: aws.disco_storage.delete_snapshot(snapshot_id) elif args.mode == "update": snapshot = aws.disco_storage.get_latest_snapshot(args.hostclass) aws.autoscale.update_snapshot(args.hostclass, snapshot.id, snapshot.volume_size)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug, args.silent) bucket_name = args.bucket or DiscoVPC.get_credential_buckets_from_env_name( config, args.env)[0] s3_bucket = DiscoS3Bucket(bucket_name) if args.mode == "list": print("\n".join(s3_bucket.listkeys(args.prefix))) elif args.mode == "get": print(s3_bucket.get_key(args.key_name)) elif args.mode == "set": use_password = args.key_password key_value = args.key_value if use_password: key_value = getpass.getpass() elif key_value == "-": key_value = sys.stdin.read() s3_bucket.set_key(args.key_name, key_value) elif args.mode == "delete": s3_bucket.delete_key(args.key_name) elif args.mode == "setfile": key_value = s3_bucket.get_key(args.key_name) s3_bucket.get_contents_to_file(args.key_name, args.file_name)
def run_gracefully(main_function): """ Run a "main" function with standardized exception trapping, to make it easy to avoid certain unnecessary stack traces. If debug logging is switched on, stack traces will return. """ configure_logging(debug=False) try: main_function() except EasyExit as msg: logger.error(str(msg)) sys.exit(1) except EarlyExitException as non_error_msg: logger.info(str(non_error_msg)) except KeyboardInterrupt: # swallow the exception unless we turned on debugging, in which case # we might want to know what infinite loop we were stuck in if getLogger().isEnabledFor(DEBUG): raise sys.exit(1) except (EC2ResponseError, ClientError) as err: logger.error("EC2 Error response: %s", err.message) if getLogger().isEnabledFor(DEBUG): raise sys.exit(1)
def __init__(self, args): self.args = args self.config = read_config() self.env = self.args["--env"] or self.config.get("disco_aws", "default_environment") self.pick_instance = self.args['--first'] self.user = self.args.get("--user") configure_logging(args["--debug"])
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() env = args.get("--env") or config.get("disco_aws", "default_environment") vpc = DiscoVPC.fetch_environment(environment_name=env) if not vpc: print("Environment does not exist: {}".format(env)) sys.exit(1) aws = DiscoAWS(config, env) disco_elasticache = DiscoElastiCache(vpc, aws=aws) if args['list']: for cluster in disco_elasticache.list(): size = 'N/A' if cluster['Status'] == 'available': size = len(cluster['NodeGroups'][0]['NodeGroupMembers']) print("{0:<25} {1:5} {2:>5}".format(cluster['Description'], cluster['Status'], size)) elif args['update']: if args['--cluster']: disco_elasticache.update(args['--cluster']) else: disco_elasticache.update_all() elif args['delete']: disco_elasticache.delete(args['--cluster'], wait=args['--wait'])
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() env = args.get("--env") or config.get("disco_aws", "default_environment") vpc = DiscoVPC.fetch_environment(environment_name=env) if not vpc: print("Environment does not exist: {}".format(env)) sys.exit(1) aws = DiscoAWS(config, env) disco_elasticache = DiscoElastiCache(vpc, aws=aws) if args['list']: for cluster in disco_elasticache.list(): size = 'N/A' if cluster['Status'] == 'available': size = len(cluster['NodeGroups'][0]['NodeGroupMembers']) print("{0:<25} {1:5} {2:>5}".format(cluster['Description'], cluster['Status'], size)) elif args['update']: if args['--cluster']: disco_elasticache.update(args['--cluster']) else: disco_elasticache.update_all() elif args['delete']: disco_elasticache.delete(args['--cluster'], wait=args['--wait'])
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) bucket_name = args.bucket or DiscoVPC.get_credential_buckets_from_env_name(config, args.env)[0] s3_bucket = DiscoS3Bucket(bucket_name) if args.mode == "list": print("\n".join(s3_bucket.listkeys(args.prefix))) elif args.mode == "get": print(s3_bucket.get_key(args.key_name)) elif args.mode == "set": use_password = args.key_password key_value = args.key_value if use_password: key_value = getpass.getpass() elif key_value == "-": key_value = sys.stdin.read() s3_bucket.set_key(args.key_name, key_value) elif args.mode == "delete": s3_bucket.delete_key(args.key_name) elif args.mode == "setfile": key_value = s3_bucket.get_key(args.key_name) s3_bucket.get_contents_to_file(args.key_name, args.file_name)
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() dry_run = args.get("--dry-run") delete = args.get("--delete") hostclass = args.get("--hostclass") env = args.get("--env") or config.get("disco_aws", "default_environment") alarms_config = DiscoAlarmsConfig(env) if args["update_notifications"]: notifications = alarms_config.get_notifications() DiscoSNS().update_sns_with_notifications(notifications, env, delete=delete, dry_run=dry_run) elif args["update_metrics"]: if delete: DiscoAlarm().delete_hostclass_environment_alarms(env, hostclass) DiscoAWS(config, env).spinup_alarms([hostclass]) elif args["list"]: alarms = DiscoAlarm().get_alarms({ "env": env, "hostclass": hostclass } if hostclass else {"env": env}) for alarm in alarms: print(alarm) else: logging.error("No command specified. See --help") sys.exit(1)
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() dry_run = args.get("--dry-run") delete = args.get("--delete") hostclass = args.get("--hostclass") env = args.get("--env") or config.get("disco_aws", "default_environment") alarms_config = DiscoAlarmsConfig(env) if args["update_notifications"]: notifications = alarms_config.get_notifications() DiscoSNS().update_sns_with_notifications(notifications, env, delete=delete, dry_run=dry_run) elif args["update_metrics"]: if delete: DiscoAlarm().delete_hostclass_environment_alarms(env, hostclass) DiscoAWS(config, env).spinup_alarms([hostclass]) elif args["list"]: alarms = DiscoAlarm().get_alarms( {"env": env, "hostclass": hostclass} if hostclass else {"env": env}) for alarm in alarms: print(alarm) else: logging.error("No command specified. See --help") sys.exit(1)
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() env = args.get("--env") or config.get("disco_aws", "default_environment") disco_log_metrics = DiscoLogMetrics(env) if args["update"]: disco_log_metrics.update(args['--hostclass']) elif args["delete"]: disco_log_metrics.delete_metrics(args['--hostclass']) elif args["list-metrics"]: for metric_filter in disco_log_metrics.list_metric_filters( args['--hostclass']): for metric in metric_filter['metricTransformations']: print("{0:<40} {1:10}".format(metric['metricNamespace'], metric['metricName'])) elif args["list-groups"]: for group in disco_log_metrics.list_log_groups(args['--hostclass']): print("{0:<40} {1:10}".format( group['logGroupName'], format_bytes_to_mb(group['storedBytes'])))
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) disco_route53 = DiscoRoute53() if args['list-zones']: for hosted_zone in disco_route53.list_zones(): is_private_zone = hosted_zone.config['PrivateZone'] print("{0:<20} {1:10} {2:5}".format(hosted_zone.name, hosted_zone.id, is_private_zone)) elif args['list-records']: for hosted_zone in disco_route53.list_zones(): # the Hosted Zone name is the domain name with a period appended to it # allow searching by either with or without the period if not args['--zone'] or hosted_zone.name in (args['--zone'], args['--zone'] + '.'): for record in disco_route53.list_records(hosted_zone.name): values = ','.join(record.resource_records) print("{0:<5} {1:20} {2:50}".format(record.type, record.name, values)) elif args['create-record']: disco_route53.create_record(args['<zone-name>'], args['<record-name>'], args['<type>'], args['<value>']) elif args['delete-record']: record_name = args['<record-name>'] # AWS appends a . to the end of the record name. # Add it here as a convenience if the argument is missing it if not record_name.endswith('.'): record_name += '.' disco_route53.delete_record(args['<zone-name>'], record_name, args['<type>'])
def run(): """Parses command line and dispatches the commands""" parser = get_parser() args = parser.parse_args() configure_logging(args.debug) iam = DiscoIAM() if args.mode == "listgroups": print("\n".join(sorted(iam.list_groups()))) elif args.mode == "listgrouppolicies": print("\n".join(sorted(iam.list_group_policies(args.group_name)))) elif args.mode == "getgrouppolicy": print(iam.get_group_policy(args.group_name, args.policy_name)) elif args.mode == "listusers": iam.print_users() elif args.mode == "listusergroups": print("\n".join(sorted(iam.list_user_groups(args.user_name)))) elif args.mode == "listkeys": key_fmt = "{0.user_name:<30}\t{0.access_key_id}\t{0.status:<8}\t{0.create_date}" keys = [key_fmt.format(key) for key in iam.list_access_keys(args.user_name)] print("\n".join(keys)) elif args.mode == "createkey": iam.create_access_key(args.user_name) elif args.mode == "removekey": iam.remove_access_key(args.user_name, args.access_key_id) elif args.mode == "activatekey": iam.activate_access_key(args.user_name, args.access_key_id) elif args.mode == "deactivatekey": iam.deactivate_access_key(args.user_name, args.access_key_id) elif args.mode == "listinstanceprofiles": print("\n".join(sorted(iam.listinstanceprofiles()))) elif args.mode == "listroles": for role in iam.listroles(): output = role.role_name if args.federation: is_federated = role.assume_role_policy_document.is_federated() output += "\t{0}".format( "federated" if is_federated else "unfederated" ) print(output) elif args.mode == "listrolepolicies": print("\n".join(iam.listrolepolicies(args.role_name))) elif args.mode == "decode": iam.decode_message(args.message) elif args.mode == "update": # We don't use saml for api level access, I'm not sure # if reloading providers as such is safe. Does policy expire # as soon as trust is removed, probably not? iam.set_environment(args.environment) iam.delete_saml_providers() iam.create_saml_provider() iam.reapply_user_policies() iam.reapply_group_members() iam.reapply_instance_policies() elif args.mode == "listproviders": for provider in iam.list_saml_providers(): print(provider.arn)
def __init__(self, args): self.args = args self.config = read_config() self.env = self.args["--env"] or self.config.get( "disco_aws", "default_environment") self.pick_instance = self.args['--first'] self.user = self.args.get("--user") configure_logging(args["--debug"])
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") aws = DiscoAWS(config, environment_name=environment_name) if args.mode == "create": product_line = aws.hostclass_option_default(args.hostclass, 'product_line', 'unknown') aws.disco_storage.create_ebs_snapshot(args.hostclass, args.size, product_line, not args.unencrypted) elif args.mode == "list": for snapshot in aws.disco_storage.get_snapshots(args.hostclasses): print("{0:26} {1:13} {2:9} {3} {4:4}".format( snapshot.tags['hostclass'], snapshot.id, snapshot.status, snapshot.start_time, snapshot.volume_size)) elif args.mode == "cleanup": aws.disco_storage.cleanup_ebs_snapshots(args.keep) elif args.mode == "capture": if args.volume_id: extra_snapshot_tags = None if args.tags: extra_snapshot_tags = dict( tag_item.split(':') for tag_item in args.tags) snapshot_id = aws.disco_storage.take_snapshot( args.volume_id, snapshot_tags=extra_snapshot_tags) print("Successfully created snapshot: {0}".format(snapshot_id)) else: instances = instances_from_args(aws, args) if not instances: print("No instances found") for instance in instances: return_code, output = aws.remotecmd( instance, ["sudo /opt/wgen/bin/take_snapshot.sh"], user="******") if return_code: raise Exception( "Failed to snapshot instance {0}:\n {1}\n".format( instance, output)) print("Successfully snapshotted {0}".format(instance)) elif args.mode == "delete": for snapshot_id in args.snapshots: aws.disco_storage.delete_snapshot(snapshot_id) elif args.mode == "update": if args.snapshot_id: snapshot = aws.disco_storage.get_snapshot_from_id(args.snapshot_id) else: snapshot = aws.disco_storage.get_latest_snapshot(args.hostclass) aws.discogroup.update_snapshot(snapshot.id, snapshot.volume_size, hostclass=args.hostclass)
def run(): """Parses command line and dispatches the commands""" parser = get_parser() args = parser.parse_args() configure_logging(args.debug) if args.mode == "bake": bakery = DiscoBake(use_local_ip=args.use_local_ip) bakery.bake_ami(args.hostclass, args.no_destroy, args.source_ami, args.stage) elif args.mode == "create": HostclassTemplating.create_hostclass(args.hostclass) elif args.mode == "promote": bakery = DiscoBake() ami = bakery.get_image(args.ami) bakery.promote_ami(ami, args.stage) if args.promote_to_prod: bakery.promote_ami_to_production(ami) elif args.mode == "hostclasspromote": bakery = DiscoBake() bakery.promote_latest_ami_to_production(args.hostclass) elif args.mode == "listrepo": bakery = DiscoBake() repo = bakery.repo_instance() if repo: print(repo.ip_address) elif args.mode == "listamis": ami_ids = [args.ami] if args.ami else None instance_ids = [args.instance] if args.instance else None bakery = DiscoBake() amis = sorted(bakery.list_amis(ami_ids, instance_ids, args.stage, args.product_line), key=bakery.ami_timestamp) now = datetime.utcnow() for ami in amis: bakery.pretty_print_ami(ami, now, in_prod=args.in_prod) if not amis: sys.exit(1) elif args.mode == "liststragglers": bakery = DiscoBake() for hostclass, image in bakery.list_stragglers(args.days).iteritems(): print("{0}\t{1}".format(hostclass, image.id if image else '-')) elif args.mode == "listlatestami": bakery = DiscoBake() ami = bakery.find_ami(args.stage, args.hostclass) if ami: bakery.pretty_print_ami(ami) else: sys.exit(1) elif args.mode == "deleteami": bakery = DiscoBake() bakery.delete_ami(args.ami) elif args.mode == "cleanupamis": bakery = DiscoBake() bakery.cleanup_amis(args.hostclass, args.product_line, args.stage, args.days, args.count, args.dryrun)
def run(): """Parses command line and dispatches the commands""" parser = get_parser() args = parser.parse_args() configure_logging(args.debug) env = args.env disco_es = DiscoElasticsearch(env) interactive_shell = sys.__stdin__.isatty() if args.mode == "list": entries = disco_es.list(include_endpoint=args.endpoint) headers = ["Elastic Search Domain Name", "Internal Name", "Route 53 Endpoint"] format_line = u"{0:<28} {1:<15} {2:<35}" if args.endpoint: format_line += u" {3:<80}" headers.append("Elastic Search Endpoint") print(format_line.format(*headers), file=sys.stderr) for entry in entries: values = [entry["elasticsearch_domain_name"], entry["internal_name"], entry["route_53_endpoint"]] if args.endpoint: values.append(entry["elasticsearch_endpoint"] or u"-") print(format_line.format(*values)) elif args.mode == "update": if args.names: for name in args.names: disco_es.update(name) else: disco_es.update() elif args.mode == "delete": prompt = "Deleting an ElasticSearch domain destroys all of its automated snapshots. Be careful!\n" if args.names: prompt += "Are you sure you want to delete ElasticSearch domains {}? (y/N)".format(args.names) if not interactive_shell or is_truthy(raw_input(prompt)): for name in args.names: disco_es.delete(name) else: scope = "all configured" if not args.delete_all else "*all*" prompt += "Are you sure you want to delete {} ElasticSearch domains? (y/N)".format(scope) if not interactive_shell or is_truthy(raw_input(prompt)): disco_es.delete(delete_all=args.delete_all) elif args.mode in ['archive', 'restore']: disco_es_archive = DiscoESArchive(env, args.cluster) if args.mode == 'archive': snap_states = disco_es_archive.archive(dry_run=args.dry_run) if args.dry_run: print("Snapshots to be taken: {0}".format(snap_states['SUCCESS'])) else: print("Snapshots state: {0}".format(snap_states)) if args.groom: disco_es_archive.groom(dry_run=args.dry_run) else: disco_es_archive.restore(args.begin_date, args.end_date, args.dry_run)
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) if args["upload"]: metrics = DiscoMetrics(dummy=args['--dummy']) metrics.collect() if args["--jitter"]: sleep_time = random.randrange(0, int(args.get("--jitter"))) time.sleep(sleep_time) metrics.upload()
def run(): """ Parses command line and dispatches the commands disco_dynamodb.py list """ args = parse_args() configure_logging(args.debug) disco_ssm = DiscoSSM() if args.mode == "list-documents": list_documents(disco_ssm, args.headers) elif args.mode == "get-document": print_content(disco_ssm, args.name) elif args.mode == "update-documents": update_documents(disco_ssm, args.no_wait, args.dry_run)
def run(): """Parses command line and dispatches the commands""" args = parse_arguments() configure_logging(args.debug) if args.mode == "create": create_vpc_command(args) elif args.mode == "destroy": destroy_vpc_command(args) elif args.mode == "list": list_vpc_command(args) elif args.mode == 'peerings': proxy_peerings_command(args)
def run(): """ Parses command line and dispatches the commands disco_dynamodb.py list """ args = parse_args() configure_logging(args.debug) disco_ssm = DiscoSSM() if args.mode == "list-documents": list_documents(disco_ssm, args.headers) elif args.mode == "get-document": print_content(disco_ssm, args.name) elif args.mode == "update-documents": update_documents(disco_ssm, args.no_wait, args.dry_run)
def run(): """Parses command line and dispatches the commands""" args = parse_arguments() configure_logging(args.debug) if args.mode == "create": create_vpc_command(args) elif args.mode == "destroy": destroy_vpc_command(args) elif args.mode == "list": list_vpc_command(args) elif args.mode == 'peerings': proxy_peerings_command(args)
def run(): """Parses command line and dispatches the commands""" config = read_config() args = parse_arguments() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") autoscale = DiscoAutoscale(environment_name) # Autoscaling group commands if args.mode == "listgroups": format_str = "{0} {1:12} {2:3} {3:3} {4:3} {5:3}" groups = autoscale.get_groups() instances = autoscale.get_instances() if args.debug: print(format_str.format( "Name".ljust(26 + len(environment_name)), "AMI", "min", "des", "max", "cnt")) for group in groups: launch_cfg = list(autoscale.get_configs(names=[group.launch_config_name])) image_id = launch_cfg[0].image_id if len(launch_cfg) else "" group_str = group.name.ljust(26 + len(environment_name)) group_cnt = len([instance for instance in instances if instance.group_name == group.name]) print(format_str.format(group_str, image_id, group.min_size, group.desired_capacity, group.max_size, group_cnt)) elif args.mode == "cleangroups": autoscale.clean_groups() elif args.mode == "deletegroup": autoscale.delete_group(args.hostclass, args.force) # Launch Configuration commands elif args.mode == "listconfigs": for config in autoscale.get_configs(): print("{0:24} {1}".format(config.name, config.image_id)) elif args.mode == "cleanconfigs": autoscale.clean_configs() elif args.mode == "deleteconfig": autoscale.delete_config(args.config) # Scaling policy commands elif args.mode == "listpolicies": policies = autoscale.list_policies() for policy in policies: print("{0:30} {1}".format(policy.name, policy.policy_arn)) elif args.mode == "createpolicy": autoscale.create_policy(args.policy_name, args.group_name, args.adjustment, args.cooldown) elif args.mode == "deletepolicy": autoscale.delete_policy(args.policy_name, args.group_name) sys.exit(0)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) s3_bucket_name = args.bucket or DiscoVPC.get_credential_buckets_from_env_name(config, args.env)[0] app_auth_dir = args.dir or None env = args.env or s3_bucket_name.split('.')[-1] if args.mode == "update": app_auth = DiscoAppAuth(env, s3_bucket_name, app_auth_dir) app_auth.update(args.force)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) s3_bucket_name = args.bucket or DiscoVPC.get_credential_buckets_from_env_name(config, args.env)[0] app_auth_dir = args.dir or None env = args.env or s3_bucket_name.split('.')[-1] if args.mode == "update": app_auth = DiscoAppAuth(env, s3_bucket_name, app_auth_dir) app_auth.update(args.force)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) env_name = args.env or config.get("disco_aws", "default_environment") chaos = DiscoChaos(config, env_name, args.level, args.retainage) instances = chaos.get_instances_to_terminate() for inst in instances: print("{0:20} {1}".format(inst.tags.get('hostclass'), inst.id)) if not args.dryrun: chaos.terminate(instances)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) env_name = args.env or config.get("disco_aws", "default_environment") chaos = DiscoChaos(config, env_name, args.level, args.retainage) instances = chaos.get_instances_to_terminate() for inst in instances: print("{0:20} {1}".format(inst.tags.get('hostclass'), inst.id)) if not args.dryrun: chaos.terminate(instances)
def run(): """ Main """ args = docopt(__doc__) configure_logging(args["--debug"]) # If no options are set, we assume user wants all of 'em. arg_options = ["--stray-ami", "--no-metadata", "--old"] if not any([args[option] for option in arg_options if option in args]): for option in arg_options: args[option] = True _ignore, failed_to_purge = purge_snapshots(args) if failed_to_purge: sys.exit(1)
def run(): """Parses command line and dispatches the commands""" config = read_config() args = docopt(__doc__) configure_logging(args["--debug"]) if args["hashpassword"]: print(password_hash()) sys.exit(0) bucket_name = args.get( "--bucket") or DiscoVPC.get_credential_buckets_from_env_name( config, args["--env"])[0] s3_accounts = S3AccountBackend(DiscoS3Bucket(bucket_name)) if args["install"]: s3_accounts.install_all() elif args["adduser"]: username = args["--name"] or os.environ.get("USER") user_template = s3_accounts.new_user_config( password_hash(args["--password"])) group_config = s3_accounts.new_group_config() user_config = s3_accounts.edit_account_config(user_template) s3_accounts.add_account(username, user_config) s3_accounts.add_account(username, group_config) elif args["addgroup"]: group_config = s3_accounts.new_group_config() s3_accounts.add_account(args["--name"], group_config) elif args["edituser"]: username = args["--name"] or os.environ.get("USER") user_config = s3_accounts.get_user_config(username) kwargs = {"active": args["--active"]} if args["--active"] else {} user_config = s3_accounts.edit_account_config(user_config, **kwargs) s3_accounts.add_account(username, user_config) s3_accounts.refresh_groups() elif args["editgroup"]: # there is nothing to edit for a group.. but.. group_config = s3_accounts.get_group_config(args["--name"]) group_config = s3_accounts.edit_account_config(group_config) s3_accounts.add_account(args["--name"], group_config) elif args["listgroups"]: print("\n".join(s3_accounts.list_groups())) elif args["listusers"]: print("\n".join(s3_accounts.list_users()))
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") aws = DiscoAWS(config, environment_name=environment_name) if args.mode == "create": product_line = aws.hostclass_option_default(args.hostclass, 'product_line', 'unknown') aws.disco_storage.create_ebs_snapshot(args.hostclass, args.size, product_line, not args.unencrypted) elif args.mode == "list": for snapshot in aws.disco_storage.get_snapshots(args.hostclasses): print("{0:26} {1:13} {2:9} {3} {4:4}".format( snapshot.tags['hostclass'], snapshot.id, snapshot.status, snapshot.start_time, snapshot.volume_size)) elif args.mode == "cleanup": aws.disco_storage.cleanup_ebs_snapshots(args.keep) elif args.mode == "capture": if args.volume_id: extra_snapshot_tags = None if args.tags: extra_snapshot_tags = dict(tag_item.split(':') for tag_item in args.tags) snapshot_id = aws.disco_storage.take_snapshot(args.volume_id, snapshot_tags=extra_snapshot_tags) print("Successfully created snapshot: {0}".format(snapshot_id)) else: instances = instances_from_args(aws, args) if not instances: print("No instances found") for instance in instances: return_code, output = aws.remotecmd( instance, ["sudo /opt/wgen/bin/take_snapshot.sh"], user="******") if return_code: raise Exception("Failed to snapshot instance {0}:\n {1}\n".format(instance, output)) print("Successfully snapshotted {0}".format(instance)) elif args.mode == "delete": for snapshot_id in args.snapshots: aws.disco_storage.delete_snapshot(snapshot_id) elif args.mode == "update": if args.snapshot_id: snapshot = aws.disco_storage.get_snapshot_from_id(args.snapshot_id) else: snapshot = aws.disco_storage.get_latest_snapshot(args.hostclass) aws.discogroup.update_snapshot(snapshot.id, snapshot.volume_size, hostclass=args.hostclass)
def run(): """ Main """ args = docopt(__doc__) configure_logging(args["--debug"]) # If no options are set, we assume user wants all of 'em. arg_options = ["--stray-ami", "--no-metadata", "--old"] if not any([args[option] for option in arg_options if option in args]): for option in arg_options: args[option] = True _ignore, failed_to_purge = purge_snapshots(args) if failed_to_purge: sys.exit(1)
def run(): """Parses command line and dispatches the commands""" config = read_config() args = docopt(__doc__) configure_logging(args["--debug"]) if args["hashpassword"]: print(password_hash()) sys.exit(0) bucket_name = args.get("--bucket") or DiscoVPC.get_credential_buckets_from_env_name( config, args["--env"])[0] s3_accounts = S3AccountBackend(DiscoS3Bucket(bucket_name)) if args["install"]: s3_accounts.install_all() elif args["adduser"]: username = args["--name"] or os.environ.get("USER") user_template = s3_accounts.new_user_config(password_hash(args["--password"])) group_config = s3_accounts.new_group_config() user_config = s3_accounts.edit_account_config(user_template) s3_accounts.add_account(username, user_config) s3_accounts.add_account(username, group_config) elif args["addgroup"]: group_config = s3_accounts.new_group_config() s3_accounts.add_account(args["--name"], group_config) elif args["edituser"]: username = args["--name"] or os.environ.get("USER") user_config = s3_accounts.get_user_config(username) kwargs = {"active": args["--active"]} if args["--active"] else {} user_config = s3_accounts.edit_account_config(user_config, **kwargs) s3_accounts.add_account(username, user_config) s3_accounts.refresh_groups() elif args["editgroup"]: # there is nothing to edit for a group.. but.. group_config = s3_accounts.get_group_config(args["--name"]) group_config = s3_accounts.edit_account_config(group_config) s3_accounts.add_account(args["--name"], group_config) elif args["listgroups"]: print("\n".join(s3_accounts.list_groups())) elif args["listusers"]: print("\n".join(s3_accounts.list_users()))
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() env = args.get("--env") or config.get("disco_aws", "default_environment") vpc = DiscoVPC.fetch_environment(environment_name=env) if not vpc: print("Environment does not exist: {}".format(env)) sys.exit(1) if args['list']: for elb in sorted(DiscoELB(vpc).list()): print("{0:<20} {1:25}".format(elb['LoadBalancerName'], ','.join(elb['AvailabilityZones']))) elif args['update']: DiscoAWS(config, env).update_elb(args['--hostclass'])
def run(): """Parses command line and dispatches the commands""" args = parse_arguments() configure_logging(args.debug) deip = DiscoEIP() if args.mode == "list": for eip in sorted(deip.list()): print("{0}\t{1}".format(eip.public_ip, eip.instance_id if eip.instance_id else "-")) elif args.mode == "allocate": eip = deip.allocate() print(eip.public_ip) elif args.mode == "release": if not deip.release(args.eip, args.force): sys.exit(1) sys.exit(0)
def run(): """ Main """ args = docopt(__doc__) configure_logging(args["--debug"]) # If no options are set, we assume user wants all of 'em. arg_options = [ "--stray-ami", "--no-metadata", "--keep-days", OLD_IMAGE_DAYS, "--max-per-day", None, "--keep-num", DEFAULT_KEEP_LAST ] if not any([args[option] for option in arg_options if option in args]): args = docopt(__doc__, argv=arg_options) _ignore, failed_to_purge = purge_snapshots(args) if failed_to_purge: sys.exit(1)
def run(): """ Main """ args = docopt(__doc__) configure_logging(args["--debug"]) # If no options are set, we assume user wants all of 'em. arg_options = ["--stray-ami", "--no-metadata", "--keep-days", OLD_IMAGE_DAYS, "--max-per-day", None, "--keep-num", DEFAULT_KEEP_LAST] if not any([args[option] for option in arg_options if option in args]): args = docopt(__doc__, argv=arg_options) _ignore, failed_to_purge = purge_snapshots(args) if failed_to_purge: sys.exit(1)
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() env = args.get("--env") or config.get("disco_aws", "default_environment") vpc = DiscoVPC.fetch_environment(environment_name=env) if not vpc: print("Environment does not exist: {}".format(env)) sys.exit(1) if args['list']: for elb in sorted(DiscoELB(vpc).list()): print("{0:<20} {1:25}".format(elb['LoadBalancerName'], ','.join(elb['AvailabilityZones']))) elif args['update']: DiscoAWS(config, env).update_elb(args['--hostclass'])
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") aws = DiscoAWS(config, environment_name=environment_name) if args.mode == "create": aws.disco_storage.create_ebs_snapshot(args.hostclass, args.size) elif args.mode == "list": for snapshot in aws.disco_storage.get_snapshots(args.hostclasses): print("{0:26} {1:13} {2:9} {3} {4:4}".format( snapshot.tags['hostclass'], snapshot.id, snapshot.status, snapshot.start_time, snapshot.volume_size)) elif args.mode == "cleanup": aws.disco_storage.cleanup_ebs_snapshots(args.keep) elif args.mode == "capture": instances = instances_from_args(aws, args) if not instances: logging.warning("No instances found") for instance in instances: return_code, output = aws.remotecmd( instance, ["sudo /opt/wgen/bin/take_snapshot.sh"], user="******") if return_code: raise Exception( "Failed to snapshot instance {0}:\n {1}\n".format( instance, output)) logging.info("Successfully snapshotted %s", instance) elif args.mode == "delete": for snapshot_id in args.snapshots: aws.disco_storage.delete_snapshot(snapshot_id) elif args.mode == "update": snapshot = aws.disco_storage.get_latest_snapshot(args.hostclass) aws.autoscale.update_snapshot(args.hostclass, snapshot.id, snapshot.volume_size)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = vars(args).get('env') or config.get( "disco_aws", "default_environment") vpc = DiscoVPC.fetch_environment(environment_name=environment_name) if not vpc: print("Environment does not exist: {}".format(environment_name)) sys.exit(1) rds = vpc.rds if args.mode == "list": instances = rds.get_db_instances() for instance in instances: line = "{:<20} {:>6}GB {:<12}".format( instance["DBInstanceIdentifier"], instance["AllocatedStorage"], instance["DBInstanceStatus"]) if args.url: endpoint = instance["Endpoint"] url = "{}:{}".format(endpoint["Address"], endpoint["Port"]) if endpoint else "-" line += " {}".format(url) print(line) elif args.mode == "update": if args.cluster: rds.update_cluster_by_id(args.cluster) else: rds.update_all_clusters_in_vpc(parallel=args.parallel) elif args.mode == "delete": rds.delete_db_instance(args.cluster, skip_final_snapshot=args.skip_final_snapshot) elif args.mode == "cleanup_snapshots": rds.cleanup_snapshots(args.days) elif args.mode == "clone": rds.clone(args.source_env, args.source_db)
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() env = args.get("--env") or config.get("disco_aws", "default_environment") vpc = DiscoVPC.fetch_environment(environment_name=env) if not vpc: print("Environment does not exist: {}".format(env)) sys.exit(1) if args['list']: format_string = "{0:<50} {1:33} {2}" print(format_string.format("ELB Name", "Availability Zones", "ELB Id"), file=sys.stderr) for elb_info in sorted(DiscoELB(vpc).list_for_display()): print(format_string.format(elb_info['elb_name'], elb_info['availability_zones'], elb_info["elb_id"])) elif args['update']: DiscoAWS(config, env).update_elb(args['--hostclass'])
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() env = args.get("--env") or config.get("disco_aws", "default_environment") disco_log_metrics = DiscoLogMetrics(env) if args["update"]: disco_log_metrics.update(args['--hostclass']) elif args["delete"]: disco_log_metrics.delete_metrics(args['--hostclass']) elif args["list-metrics"]: for metric_filter in disco_log_metrics.list_metric_filters(args['--hostclass']): for metric in metric_filter['metricTransformations']: print("{0:<40} {1:10}".format(metric['metricNamespace'], metric['metricName'])) elif args["list-groups"]: for group in disco_log_metrics.list_log_groups(args['--hostclass']): print("{0:<40} {1:10}".format(group['logGroupName'], format_bytes_to_mb(group['storedBytes'])))
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = vars(args).get('env') or config.get("disco_aws", "default_environment") vpc = DiscoVPC.fetch_environment(environment_name=environment_name) if not vpc: print("Environment does not exist: {}".format(environment_name)) sys.exit(1) rds = vpc.rds if args.mode == "list": instances = rds.get_db_instances() for instance in instances: line = "{:<20} {:>6}GB {:<12}".format( instance["DBInstanceIdentifier"], instance["AllocatedStorage"], instance["DBInstanceStatus"]) if args.url: endpoint = instance["Endpoint"] url = "{}:{}".format(endpoint["Address"], endpoint["Port"]) if endpoint else "-" line += " {}".format(url) print(line) elif args.mode == "update": if args.cluster: rds.update_cluster_by_id(args.cluster) else: rds.update_all_clusters_in_vpc(parallel=args.parallel) elif args.mode == "delete": rds.delete_db_instance(args.cluster, skip_final_snapshot=args.skip_final_snapshot) elif args.mode == "cleanup_snapshots": rds.cleanup_snapshots(args.days) elif args.mode == "clone": rds.clone(args.source_env, args.source_db)
def run(): """Parses command line and dispatches the commands""" parser = get_parser() args = parser.parse_args() configure_logging(args.debug) if args.mode == "bake": extra_tags = OrderedDict(tag.split(':', 1) for tag in args.tags) bakery = DiscoBake(use_local_ip=args.use_local_ip) bakery.bake_ami(args.hostclass, args.no_destroy, args.source_ami, args.stage, args.is_private, extra_tags=extra_tags) elif args.mode == "create": HostclassTemplating.create_hostclass(args.hostclass) elif args.mode == "promote": bakery = DiscoBake() ami = bakery.get_image(args.ami) bakery.promote_ami(ami, args.stage) if args.promote_to_prod: bakery.promote_ami_to_production(ami) elif args.mode == "hostclasspromote": bakery = DiscoBake() bakery.promote_latest_ami_to_production(args.hostclass) elif args.mode == "listrepo": bakery = DiscoBake() repo = bakery.repo_instance() if repo: print(repo.ip_address) elif args.mode == "listamis": ami_ids = [args.ami] if args.ami else None instance_ids = [args.instance] if args.instance else None bakery = DiscoBake() amis = sorted(bakery.list_amis(ami_ids, instance_ids, args.stage, args.product_line, args.state, args.hostclass), key=bakery.ami_timestamp) headers, output = bakery.tabilize_amis(amis=amis, in_prod=args.in_prod, show_tags=args.show_tags) print_table(headers=headers, rows=output) if not amis: sys.exit(1) elif args.mode == "liststragglers": bakery = DiscoBake() for hostclass, image in bakery.list_stragglers(args.days).iteritems(): print("{0}\t{1}".format(hostclass, image.id if image else '-')) elif args.mode == "listlatestami": bakery = DiscoBake() ami = bakery.find_ami(args.stage, args.hostclass) if ami: headers, output = bakery.tabilize_amis(amis=[ami], in_prod=args.in_prod, show_tags=args.show_tags) print_table(headers=headers, rows=output) else: sys.exit(1) elif args.mode == "deleteami": bakery = DiscoBake() bakery.delete_ami(args.ami) elif args.mode == "cleanupamis": bakery = DiscoBake() exclude_amis = args.exclude_amis.split(',') bakery.cleanup_amis(args.hostclass, args.product_line, args.stage, args.days, args.count, args.dryrun, exclude_amis)
def run(): """Parses command line and dispatches the commands""" config = read_config() args = docopt(__doc__) configure_logging(args["--debug"]) env = args["--environment"] or config.get("disco_aws", "default_environment") pipeline_definition = [] if args["--pipeline"]: with open(args["--pipeline"], "r") as f: reader = csv.DictReader(f) pipeline_definition = [line for line in reader] aws = DiscoAWS(config, env) if config.has_option('test', 'env'): test_env = config.get('test', 'env') test_aws = DiscoAWS(config, test_env) else: test_aws = aws deploy = DiscoDeploy( aws, test_aws, DiscoBake(config, aws.connection), pipeline_definition=pipeline_definition, test_hostclass=aws.config('hostclass', 'test'), test_user=aws.config('user', 'test'), test_command=aws.config('command', 'test'), ami=args.get("--ami"), hostclass=args.get("--hostclass"), allow_any_hostclass=args["--allow-any-hostclass"]) if args["test"]: deploy.test(dry_run=args["--dry-run"]) elif args["update"]: deploy.update(dry_run=args["--dry-run"]) elif args["list"]: missing = "-" if len(pipeline_definition) else "" if args["--tested"]: for (_hostclass, ami) in deploy.get_latest_tested_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--untested"]: for (_hostclass, ami) in deploy.get_latest_untested_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--failed"]: for (_hostclass, ami) in deploy.get_latest_failed_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--testable"]: for ami in deploy.get_test_amis(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--updatable"]: for ami in deploy.get_update_amis(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--failures"]: failures = deploy.get_failed_amis() for ami in failures: print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) sys.exit(1 if len(failures) else 0)
def __init__(self, args): self.args = args self.config = read_config() self.env = self.args["--env"] or self.config.get("disco_aws", "default_environment") configure_logging(args["--debug"])
def run(): """Parses command line and dispatches the commands""" config = read_config() args = docopt(__doc__) configure_logging(args["--debug"]) env = args["--environment"] or config.get("disco_aws", "default_environment") force_deployable = None if args["--deployable"] is None else is_truthy( args["--deployable"]) pipeline_definition = [] if args["--pipeline"]: with open(args["--pipeline"], "r") as f: reader = csv.DictReader(f) pipeline_definition = [line for line in reader] aws = DiscoAWS(config, env) if config.has_option('test', 'env'): test_env = config.get('test', 'env') test_aws = DiscoAWS(config, test_env) else: test_aws = aws bake = DiscoBake(config, aws.connection) if args["--ami"] and args["--hostclass"]: image = bake.get_image(args["--ami"]) if args["--hostclass"] != bake.ami_hostclass(image): logger.error('AMI %s does not belong to hostclass %s', args["--ami"], args["--hostclass"]) sys.exit(1) vpc = DiscoVPC.fetch_environment(environment_name=env) deploy = DiscoDeploy(aws, test_aws, bake, DiscoGroup(env), DiscoELB(vpc), DiscoSSM(environment_name=env), pipeline_definition=pipeline_definition, ami=args.get("--ami"), hostclass=args.get("--hostclass"), allow_any_hostclass=args["--allow-any-hostclass"]) if args["test"]: try: deploy.test(dry_run=args["--dry-run"], deployment_strategy=args["--strategy"], ticket_id=args["--ticket"], force_deployable=force_deployable) except RuntimeError as err: logger.error(str(err)) sys.exit(1) elif args["update"]: try: deploy.update(dry_run=args["--dry-run"], deployment_strategy=args["--strategy"], ticket_id=args["--ticket"], force_deployable=force_deployable) except RuntimeError as err: logger.error(str(err)) sys.exit(1) elif args["list"]: missing = "-" if pipeline_definition else "" if args["--tested"]: for (_hostclass, ami) in deploy.get_latest_tested_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--untested"]: for (_hostclass, ami) in deploy.get_latest_untested_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--failed"]: for (_hostclass, ami) in deploy.get_latest_failed_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--testable"]: for ami in deploy.get_test_amis(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--updatable"]: for ami in deploy.get_update_amis(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--failures"]: failures = deploy.get_failed_amis() for ami in failures: print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) sys.exit(1 if failures else 0)
def run(): """Parses command line and dispatches the commands""" config = read_config() args = docopt(__doc__) configure_logging(args["--debug"]) env = args["--environment"] or config.get("disco_aws", "default_environment") pipeline_definition = [] if args["--pipeline"]: with open(args["--pipeline"], "r") as f: reader = csv.DictReader(f) pipeline_definition = [line for line in reader] aws = DiscoAWS(config, env) if config.has_option('test', 'env'): test_env = config.get('test', 'env') test_aws = DiscoAWS(config, test_env) else: test_aws = aws deploy = DiscoDeploy(aws, test_aws, DiscoBake(config, aws.connection), pipeline_definition=pipeline_definition, test_hostclass=aws.config('hostclass', 'test'), test_user=aws.config('user', 'test'), test_command=aws.config('command', 'test'), ami=args.get("--ami"), hostclass=args.get("--hostclass"), allow_any_hostclass=args["--allow-any-hostclass"]) if args["test"]: deploy.test(dry_run=args["--dry-run"]) elif args["update"]: deploy.update(dry_run=args["--dry-run"]) elif args["list"]: missing = "-" if len(pipeline_definition) else "" if args["--tested"]: for (_hostclass, ami) in deploy.get_latest_tested_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--untested"]: for (_hostclass, ami) in deploy.get_latest_untested_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--failed"]: for (_hostclass, ami) in deploy.get_latest_failed_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--testable"]: for ami in deploy.get_test_amis(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--updatable"]: for ami in deploy.get_update_amis(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--failures"]: failures = deploy.get_failed_amis() for ami in failures: print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) sys.exit(1 if len(failures) else 0)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") aws = DiscoAWS(config, environment_name=environment_name) if args.mode == "provision": hostclass_dicts = [{ "sequence": 1, "hostclass": args.hostclass, "instance_type": args.instance_type, "extra_space": args.extra_space, "extra_disk": args.extra_disk, "iops": args.iops, "smoke_test": "no" if args.no_smoke else "yes", "ami": args.ami, "min_size": args.min_size, "desired_size": args.desired_size, "max_size": args.max_size, "chaos": "no" if args.no_chaos else None, "spotinst": args.spotinst, "spotinst_reserve": args.spotinst_reserve }] aws.spinup(hostclass_dicts, testing=args.testing) elif args.mode == "listhosts": instances = aws.instances_from_hostclass(args.hostclass) if args.hostclass else aws.instances() instances_filtered = [i for i in instances if i.state != u"terminated"] instances_sorted = sorted(instances_filtered, key=lambda i: (i.state, i.tags.get("hostclass", "-"), i.tags.get("hostname", "-"))) instance_to_private_ip = {i.id: get_preferred_private_ip(i) for i in instances_sorted} most = args.all or args.most if args.ami_age or args.uptime or most: bake = DiscoBake(config, aws.connection) ami_dict = bake.list_amis_by_instance(instances) now = datetime.utcnow() for instance in instances_sorted: line = u"{0} {1:<30} {2:<15}".format( instance.id, instance.tags.get("hostclass", "-"), instance.ip_address or instance_to_private_ip[instance.id]) if args.state or most: line += u" {0:<10}".format(instance.state) if args.hostname or most: line += u" {0:<1}".format("-" if instance.tags.get("hostname") is None else "y") if args.owner or most: line += u" {0:<11}".format(instance.tags.get("owner", u"-")) if args.instance_type or most: line += u" {0:<10}".format(instance.instance_type) if args.ami or most: line += u" {0:<12}".format(instance.image_id) if args.smoke or most: line += u" {0:<1}".format("-" if instance.tags.get("smoketest") is None else "y") if args.ami_age or most: creation_time = bake.get_ami_creation_time(ami_dict.get(instance.id)) line += u" {0:<4}".format(DiscoBake.time_diff_in_hours(now, creation_time)) if args.uptime or most: launch_time = dateutil_parser.parse(instance.launch_time) now_with_tz = now.replace(tzinfo=launch_time.tzinfo) # use a timezone-aware `now` line += u" {0:<3}".format(DiscoBake.time_diff_in_hours(now_with_tz, launch_time)) if args.private_ip or args.all: line += u" {0:<16}".format(instance_to_private_ip[instance.id]) if args.availability_zone or args.all: line += u" {0:<12}".format(instance.placement) if args.productline or args.all: productline = instance.tags.get("productline", u"unknown") line += u" {0:<15}".format(productline if productline != u"unknown" else u"-") if args.securitygroup or args.all: line += u" {0:15}".format(instance.groups[0].name) print(line) elif args.mode == "terminate": instances = instances_from_args(aws, args) terminated_instances = aws.terminate(instances) print("Terminated: {0}".format(",".join([str(inst) for inst in terminated_instances]))) elif args.mode == "stop": instances = instances_from_args(aws, args) stopped_instances = aws.stop(instances) print("Stopped: {0}".format(",".join([str(inst) for inst in stopped_instances]))) elif args.mode == "exec": instances = instances_from_args(aws, args) exit_code = 0 for instance in instances: _code, _stdout = aws.remotecmd(instance, [args.command], user=args.user, nothrow=True) sys.stdout.write(_stdout) exit_code = _code if _code else exit_code sys.exit(exit_code) elif args.mode == "exec-ssm": ssm = DiscoSSM(environment_name) if args.parameters: parsed_parameters = parse_ssm_parameters(args.parameters) else: parsed_parameters = None instances = [instance.id for instance in instances_from_args(aws, args)] if ssm.execute(instances, args.document, parameters=parsed_parameters, comment=args.comment): sys.exit(0) else: sys.exit(1) elif args.mode == "isready": instances = instances_from_args(aws, args) if not instances: print("No instances found") ready_count = 0 for instance in instances: name = "{0} {1}".format(instance.tags.get("hostname"), instance.id) print("Checking {0}...".format(name)) try: aws.smoketest_once(instance) print("...{0} is ready".format(name)) ready_count += 1 except SmokeTestError: print("..{0} failed smoke test".format(name)) except TimeoutError: print("...{0} is NOT ready".format(name)) sys.exit(0 if ready_count == len(instances) else 1) elif args.mode == "tag": for instance in aws.instances(instance_ids=args.instances): instance.remove_tag(args.key) if args.value: instance.add_tag(args.key, args.value) elif args.mode == "spinup": hostclass_dicts = read_pipeline_file(args.pipeline_definition_file) aws.spinup(hostclass_dicts, stage=args.stage, no_smoke=args.no_smoke, testing=args.testing) elif args.mode == "spindown": hostclasses = [line["hostclass"] for line in read_pipeline_file(args.pipeline_definition_file)] aws.spindown(hostclasses) elif args.mode == "spindownandup": hostclass_dicts = read_pipeline_file(args.pipeline_definition_file) hostclasses = [d["hostclass"] for d in hostclass_dicts] aws.spindown(hostclasses) aws.spinup(hostclass_dicts) elif args.mode == "gethostclassoption": try: print(aws.hostclass_option(args.hostclass, args.option)) except NoOptionError: print("Hostclass %s doesn't have option %s." % (args.hostclass, args.option)) elif args.mode == "promoterunning": aws.promote_running_instances_to_prod(args.hours * 60 * 60)
def run(): """Parses command line and dispatches the commands""" config = read_config() args = parse_arguments() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") discogroup = DiscoGroup(environment_name) # Autoscaling group commands if args.mode == "listgroups": format_str = "{0} {1:21} {2:3} {3:3} {4:3} {5:3} {6:4} {7:10}" groups = discogroup.list_groups() if args.debug: print( format_str.format("Name".ljust(35 + len(environment_name)), "AMI", "min", "des", "max", "cnt", "type", "is_testing")) for group in groups: print( format_str.format( group['name'].ljust(40 + len(environment_name)), group['image_id'], group['min_size'], group['desired_capacity'], group['max_size'], group['group_cnt'], group['type'], 'y' if is_truthy(group['tags'].get('is_testing')) else 'n')) elif args.mode == "cleangroups": discogroup.delete_groups() elif args.mode == "deletegroup": discogroup.delete_groups(hostclass=args.hostclass, group_name=args.name, force=args.force) # Launch Configuration commands elif args.mode == "listconfigs": for config in discogroup.get_configs(): print("{0:24} {1}".format(config['LaunchConfigurationName'], config['ImageId'])) elif args.mode == "cleanconfigs": discogroup.clean_configs() elif args.mode == "deleteconfig": discogroup.delete_config(args.config) # Scaling policy commands elif args.mode == "listpolicies": policies = discogroup.list_policies(group_name=args.group_name, policy_types=args.policy_types, policy_names=args.policy_names) print_table(policies, headers=[ 'ASG', 'Name', 'Type', 'Adjustment Type', 'Scaling Adjustment', 'Min Adjustment', 'Cooldown', 'Step Adjustments', 'Warmup', 'Alarms' ]) elif args.mode == "createpolicy": # Parse out the step adjustments, if provided. if args.step_adjustments: allowed_keys = [ 'MetricIntervalLowerBound', 'MetricIntervalUpperBound', 'ScalingAdjustment' ] parsed_steps = [] for step in args.step_adjustments: parsed_step = {} for entry in step.split(','): key, value = entry.split('=', 1) if key not in allowed_keys: raise Exception( 'Unable to parse step {0}, key {1} not in {2}'. format(step, key, allowed_keys)) parsed_step[key] = value parsed_steps.append(parsed_step) else: parsed_steps = [] discogroup.create_policy( group_name=args.group_name, policy_name=args.policy_name, policy_type=args.policy_type, adjustment_type=args.adjustment_type, min_adjustment_magnitude=args.min_adjustment_magnitude, scaling_adjustment=args.scaling_adjustment, cooldown=args.cooldown, metric_aggregation_type=args.metric_aggregation_type, step_adjustments=parsed_steps, estimated_instance_warmup=args.estimated_instance_warmup) elif args.mode == "deletepolicy": discogroup.delete_policy(args.policy_name, args.group_name) sys.exit(0)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") aws = DiscoAWS(config, environment_name=environment_name) if args.mode == "provision": hostclass_dicts = [{ "sequence": 1, "hostclass": args.hostclass, "instance_type": args.instance_type, "extra_space": args.extra_space, "extra_disk": args.extra_disk, "iops": args.iops, "smoke_test": "no" if args.no_smoke else "yes", "ami": args.ami, "min_size": args.min_size, "desired_size": args.desired_size, "max_size": args.max_size, "chaos": "no" if args.no_chaos else None }] aws.spinup(hostclass_dicts, testing=args.testing) elif args.mode == "listhosts": instances = aws.instances_from_hostclass( args.hostclass) if args.hostclass else aws.instances() instances_filtered = [i for i in instances if i.state != u"terminated"] instances_sorted = sorted(instances_filtered, key=lambda i: (i.state, i.tags.get("hostclass", "-"), i.tags.get("hostname", "-"))) instance_to_private_ip = { i.id: get_preferred_private_ip(i) for i in instances_sorted } most = args.all or args.most if args.ami_age or args.uptime or most: bake = DiscoBake(config, aws.connection) ami_dict = bake.list_amis_by_instance(instances) now = datetime.utcnow() for instance in instances_sorted: line = u"{0} {1:<30} {2:<15}".format( instance.id, instance.tags.get("hostclass", "-"), instance.ip_address or instance_to_private_ip[instance.id]) if args.state or most: line += u" {0:<10}".format(instance.state) if args.hostname or most: line += u" {0:<1}".format( "-" if instance.tags.get("hostname") is None else "y") if args.owner or most: line += u" {0:<11}".format(instance.tags.get("owner", u"-")) if args.instance_type or most: line += u" {0:<10}".format(instance.instance_type) if args.ami or most: line += u" {0:<12}".format(instance.image_id) if args.smoke or most: line += u" {0:<1}".format( "-" if instance.tags.get("smoketest") is None else "y") if args.ami_age or most: creation_time = bake.get_ami_creation_time( ami_dict.get(instance.id)) line += u" {0:<4}".format( DiscoBake.time_diff_in_hours(now, creation_time)) if args.uptime or most: launch_time = dateutil_parser.parse(instance.launch_time) now_with_tz = now.replace( tzinfo=launch_time.tzinfo) # use a timezone-aware `now` line += u" {0:<3}".format( DiscoBake.time_diff_in_hours(now_with_tz, launch_time)) if args.private_ip or args.all: line += u" {0:<16}".format(instance_to_private_ip[instance.id]) if args.availability_zone or args.all: line += u" {0:<12}".format(instance.placement) if args.productline or args.all: productline = instance.tags.get("productline", u"unknown") line += u" {0:<15}".format( productline if productline != u"unknown" else u"-") print(line) elif args.mode == "terminate": instances = instances_from_args(aws, args) terminated_instances = aws.terminate(instances) print("Terminated: {0}".format(",".join( [str(inst) for inst in terminated_instances]))) elif args.mode == "stop": instances = instances_from_args(aws, args) stopped_instances = aws.stop(instances) print("Stopped: {0}".format(",".join( [str(inst) for inst in stopped_instances]))) elif args.mode == "exec": instances = instances_from_args(aws, args) exit_code = 0 for instance in instances: _code, _stdout = aws.remotecmd(instance, [args.command], user=args.user, nothrow=True) sys.stdout.write(_stdout) exit_code = _code if _code else exit_code sys.exit(exit_code) elif args.mode == "isready": instances = instances_from_args(aws, args) if not instances: print("No instances found") ready_count = 0 for instance in instances: name = "{0} {1}".format(instance.tags.get("hostname"), instance.id) print("Checking {0}...".format(name)) try: aws.smoketest_once(instance) print("...{0} is ready".format(name)) ready_count += 1 except SmokeTestError: print("..{0} failed smoke test".format(name)) except TimeoutError: print("...{0} is NOT ready".format(name)) sys.exit(0 if ready_count == len(instances) else 1) elif args.mode == "tag": for instance in aws.instances(instance_ids=args.instances): instance.remove_tag(args.key) if args.value: instance.add_tag(args.key, args.value) elif args.mode == "spinup": with open(args.pipeline_definition_file, "r") as f: reader = csv.DictReader(f) hostclass_dicts = [line for line in reader] aws.spinup(hostclass_dicts, stage=args.stage, no_smoke=args.no_smoke, testing=args.testing) elif args.mode == "spindown": with open(args.pipeline_definition_file, "r") as f: reader = csv.DictReader(f) hostclasses = [line["hostclass"] for line in reader] aws.spindown(hostclasses) elif args.mode == "spindownandup": with open(args.pipeline_definition_file, "r") as f: reader = csv.DictReader(f) hostclass_dicts = [line for line in reader] hostclasses = [d["hostclass"] for d in hostclass_dicts] aws.spindown(hostclasses) aws.spinup(hostclass_dicts) elif args.mode == "gethostclassoption": try: print(aws.hostclass_option(args.hostclass, args.option)) except NoOptionError: print("Hostclass %s doesn't have option %s." % (args.hostclass, args.option)) elif args.mode == "promoterunning": aws.promote_running_instances_to_prod(args.hours * 60 * 60)
def run(): """Parses command line and dispatches the commands""" config = read_config() args = parse_arguments() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") discogroup = DiscoGroup(environment_name) # Autoscaling group commands if args.mode == "listgroups": format_str = "{0} {1:21} {2:3} {3:3} {4:3} {5:3} {6:4} {7:10}" groups = discogroup.list_groups() if args.debug: print( format_str.format( "Name".ljust(35 + len(environment_name)), "AMI", "min", "des", "max", "cnt", "type", "is_testing" ) ) for group in groups: print( format_str.format( group['name'].ljust(40 + len(environment_name)), group['image_id'], group['min_size'], group['desired_capacity'], group['max_size'], group['group_cnt'], group['type'], 'y' if is_truthy(group['tags'].get('is_testing')) else 'n' ) ) elif args.mode == "cleangroups": discogroup.delete_groups() elif args.mode == "deletegroup": discogroup.delete_groups(hostclass=args.hostclass, group_name=args.name, force=args.force) # Launch Configuration commands elif args.mode == "listconfigs": for config in discogroup.get_configs(): print("{0:24} {1}".format(config.name, config.image_id)) elif args.mode == "cleanconfigs": discogroup.clean_configs() elif args.mode == "deleteconfig": discogroup.delete_config(args.config) # Scaling policy commands elif args.mode == "listpolicies": policies = discogroup.list_policies( group_name=args.group_name, policy_types=args.policy_types, policy_names=args.policy_names ) print_table( policies, headers=[ 'ASG', 'Name', 'Type', 'Adjustment Type', 'Scaling Adjustment', 'Min Adjustment', 'Cooldown', 'Step Adjustments', 'Warmup', 'Alarms' ] ) elif args.mode == "createpolicy": # Parse out the step adjustments, if provided. if args.step_adjustments: allowed_keys = ['MetricIntervalLowerBound', 'MetricIntervalUpperBound', 'ScalingAdjustment'] parsed_steps = [] for step in args.step_adjustments: parsed_step = {} for entry in step.split(','): key, value = entry.split('=', 1) if key not in allowed_keys: raise Exception( 'Unable to parse step {0}, key {1} not in {2}'.format(step, key, allowed_keys) ) parsed_step[key] = value parsed_steps.append(parsed_step) else: parsed_steps = [] discogroup.create_policy( group_name=args.group_name, policy_name=args.policy_name, policy_type=args.policy_type, adjustment_type=args.adjustment_type, min_adjustment_magnitude=args.min_adjustment_magnitude, scaling_adjustment=args.scaling_adjustment, cooldown=args.cooldown, metric_aggregation_type=args.metric_aggregation_type, step_adjustments=parsed_steps, estimated_instance_warmup=args.estimated_instance_warmup ) elif args.mode == "deletepolicy": discogroup.delete_policy(args.policy_name, args.group_name) sys.exit(0)