def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) bucket_name = args.bucket or DiscoVPC.get_credential_buckets_from_env_name(config, args.env)[0] s3_bucket = DiscoS3Bucket(bucket_name) if args.mode == "list": print("\n".join(s3_bucket.listkeys(args.prefix))) elif args.mode == "get": print(s3_bucket.get_key(args.key_name)) elif args.mode == "set": use_password = args.key_password key_value = args.key_value if use_password: key_value = getpass.getpass() elif key_value == "-": key_value = sys.stdin.read() s3_bucket.set_key(args.key_name, key_value) elif args.mode == "delete": s3_bucket.delete_key(args.key_name) elif args.mode == "setfile": key_value = s3_bucket.get_key(args.key_name) s3_bucket.get_contents_to_file(args.key_name, args.file_name)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") aws = DiscoAWS(config, environment_name=environment_name) if args.mode == "create": aws.disco_storage.create_ebs_snapshot(args.hostclass, args.size) elif args.mode == "list": for snapshot in aws.disco_storage.get_snapshots(args.hostclasses): print("{0:26} {1:13} {2:9} {3} {4:4}".format( snapshot.tags['hostclass'], snapshot.id, snapshot.status, snapshot.start_time, snapshot.volume_size)) elif args.mode == "cleanup": aws.disco_storage.cleanup_ebs_snapshots(args.keep) elif args.mode == "capture": instances = instances_from_args(aws, args) if not instances: logging.warning("No instances found") for instance in instances: return_code, output = aws.remotecmd( instance, ["sudo /opt/wgen/bin/take_snapshot.sh"], user="******") if return_code: raise Exception("Failed to snapshot instance {0}:\n {1}\n".format(instance, output)) logging.info("Successfully snapshotted %s", instance) elif args.mode == "delete": for snapshot_id in args.snapshots: aws.disco_storage.delete_snapshot(snapshot_id) elif args.mode == "update": snapshot = aws.disco_storage.get_latest_snapshot(args.hostclass) aws.autoscale.update_snapshot(args.hostclass, snapshot.id, snapshot.volume_size)
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() env = args.get("--env") or config.get("disco_aws", "default_environment") vpc = DiscoVPC.fetch_environment(environment_name=env) if not vpc: print("Environment does not exist: {}".format(env)) sys.exit(1) aws = DiscoAWS(config, env) disco_elasticache = DiscoElastiCache(vpc, aws=aws) if args['list']: for cluster in disco_elasticache.list(): size = 'N/A' if cluster['Status'] == 'available': size = len(cluster['NodeGroups'][0]['NodeGroupMembers']) print("{0:<25} {1:5} {2:>5}".format(cluster['Description'], cluster['Status'], size)) elif args['update']: if args['--cluster']: disco_elasticache.update(args['--cluster']) else: disco_elasticache.update_all() elif args['delete']: disco_elasticache.delete(args['--cluster'], wait=args['--wait'])
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() env = args.get("--env") or config.get("disco_aws", "default_environment") disco_log_metrics = DiscoLogMetrics(env) if args["update"]: disco_log_metrics.update(args['--hostclass']) elif args["delete"]: disco_log_metrics.delete_metrics(args['--hostclass']) elif args["list-metrics"]: for metric_filter in disco_log_metrics.list_metric_filters( args['--hostclass']): for metric in metric_filter['metricTransformations']: print("{0:<40} {1:10}".format(metric['metricNamespace'], metric['metricName'])) elif args["list-groups"]: for group in disco_log_metrics.list_log_groups(args['--hostclass']): print("{0:<40} {1:10}".format( group['logGroupName'], format_bytes_to_mb(group['storedBytes'])))
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() dry_run = args.get("--dry-run") delete = args.get("--delete") hostclass = args.get("--hostclass") env = args.get("--env") or config.get("disco_aws", "default_environment") alarms_config = DiscoAlarmsConfig(env) if args["update_notifications"]: notifications = alarms_config.get_notifications() DiscoSNS().update_sns_with_notifications(notifications, env, delete=delete, dry_run=dry_run) elif args["update_metrics"]: if delete: DiscoAlarm().delete_hostclass_environment_alarms(env, hostclass) DiscoAWS(config, env).spinup_alarms([hostclass]) elif args["list"]: alarms = DiscoAlarm().get_alarms( {"env": env, "hostclass": hostclass} if hostclass else {"env": env}) for alarm in alarms: print(alarm) else: logging.error("No command specified. See --help") sys.exit(1)
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() dry_run = args.get("--dry-run") delete = args.get("--delete") hostclass = args.get("--hostclass") env = args.get("--env") or config.get("disco_aws", "default_environment") alarms_config = DiscoAlarmsConfig(env) if args["update_notifications"]: notifications = alarms_config.get_notifications() DiscoSNS().update_sns_with_notifications(notifications, env, delete=delete, dry_run=dry_run) elif args["update_metrics"]: if delete: DiscoAlarm().delete_hostclass_environment_alarms(env, hostclass) DiscoAWS(config, env).spinup_alarms([hostclass]) elif args["list"]: alarms = DiscoAlarm().get_alarms({ "env": env, "hostclass": hostclass } if hostclass else {"env": env}) for alarm in alarms: print(alarm) else: logging.error("No command specified. See --help") sys.exit(1)
def run(): """Parses command line and dispatches the commands""" config = read_config() args = parse_arguments() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") autoscale = DiscoAutoscale(environment_name) # Autoscaling group commands if args.mode == "listgroups": format_str = "{0} {1:12} {2:3} {3:3} {4:3} {5:3}" groups = autoscale.get_groups() instances = autoscale.get_instances() if args.debug: print(format_str.format( "Name".ljust(26 + len(environment_name)), "AMI", "min", "des", "max", "cnt")) for group in groups: launch_cfg = list(autoscale.get_configs(names=[group.launch_config_name])) image_id = launch_cfg[0].image_id if len(launch_cfg) else "" group_str = group.name.ljust(26 + len(environment_name)) group_cnt = len([instance for instance in instances if instance.group_name == group.name]) print(format_str.format(group_str, image_id, group.min_size, group.desired_capacity, group.max_size, group_cnt)) elif args.mode == "cleangroups": autoscale.clean_groups() elif args.mode == "deletegroup": autoscale.delete_group(args.hostclass, args.force) # Launch Configuration commands elif args.mode == "listconfigs": for config in autoscale.get_configs(): print("{0:24} {1}".format(config.name, config.image_id)) elif args.mode == "cleanconfigs": autoscale.clean_configs() elif args.mode == "deleteconfig": autoscale.delete_config(args.config) # Scaling policy commands elif args.mode == "listpolicies": policies = autoscale.list_policies() for policy in policies: print("{0:30} {1}".format(policy.name, policy.policy_arn)) elif args.mode == "createpolicy": autoscale.create_policy(args.policy_name, args.group_name, args.adjustment, args.cooldown) elif args.mode == "deletepolicy": autoscale.delete_policy(args.policy_name, args.group_name) sys.exit(0)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) s3_bucket_name = args.bucket or DiscoVPC.get_credential_buckets_from_env_name(config, args.env)[0] app_auth_dir = args.dir or None env = args.env or s3_bucket_name.split('.')[-1] if args.mode == "update": app_auth = DiscoAppAuth(env, s3_bucket_name, app_auth_dir) app_auth.update(args.force)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) env_name = args.env or config.get("disco_aws", "default_environment") chaos = DiscoChaos(config, env_name, args.level, args.retainage) instances = chaos.get_instances_to_terminate() for inst in instances: print("{0:20} {1}".format(inst.tags.get('hostclass'), inst.id)) if not args.dryrun: chaos.terminate(instances)
def run(): """Parses command line and dispatches the commands""" config = read_config() args = docopt(__doc__) configure_logging(args["--debug"]) if args["hashpassword"]: print(password_hash()) sys.exit(0) bucket_name = args.get( "--bucket") or DiscoVPC.get_credential_buckets_from_env_name( config, args["--env"])[0] s3_accounts = S3AccountBackend(DiscoS3Bucket(bucket_name)) if args["install"]: s3_accounts.install_all() elif args["adduser"]: username = args["--name"] or os.environ.get("USER") user_template = s3_accounts.new_user_config( password_hash(args["--password"])) group_config = s3_accounts.new_group_config() user_config = s3_accounts.edit_account_config(user_template) s3_accounts.add_account(username, user_config) s3_accounts.add_account(username, group_config) elif args["addgroup"]: group_config = s3_accounts.new_group_config() s3_accounts.add_account(args["--name"], group_config) elif args["edituser"]: username = args["--name"] or os.environ.get("USER") user_config = s3_accounts.get_user_config(username) kwargs = {"active": args["--active"]} if args["--active"] else {} user_config = s3_accounts.edit_account_config(user_config, **kwargs) s3_accounts.add_account(username, user_config) s3_accounts.refresh_groups() elif args["editgroup"]: # there is nothing to edit for a group.. but.. group_config = s3_accounts.get_group_config(args["--name"]) group_config = s3_accounts.edit_account_config(group_config) s3_accounts.add_account(args["--name"], group_config) elif args["listgroups"]: print("\n".join(s3_accounts.list_groups())) elif args["listusers"]: print("\n".join(s3_accounts.list_users()))
def run(): """Parses command line and dispatches the commands""" config = read_config() args = docopt(__doc__) configure_logging(args["--debug"]) if args["hashpassword"]: print(password_hash()) sys.exit(0) bucket_name = args.get("--bucket") or DiscoVPC.get_credential_buckets_from_env_name( config, args["--env"])[0] s3_accounts = S3AccountBackend(DiscoS3Bucket(bucket_name)) if args["install"]: s3_accounts.install_all() elif args["adduser"]: username = args["--name"] or os.environ.get("USER") user_template = s3_accounts.new_user_config(password_hash(args["--password"])) group_config = s3_accounts.new_group_config() user_config = s3_accounts.edit_account_config(user_template) s3_accounts.add_account(username, user_config) s3_accounts.add_account(username, group_config) elif args["addgroup"]: group_config = s3_accounts.new_group_config() s3_accounts.add_account(args["--name"], group_config) elif args["edituser"]: username = args["--name"] or os.environ.get("USER") user_config = s3_accounts.get_user_config(username) kwargs = {"active": args["--active"]} if args["--active"] else {} user_config = s3_accounts.edit_account_config(user_config, **kwargs) s3_accounts.add_account(username, user_config) s3_accounts.refresh_groups() elif args["editgroup"]: # there is nothing to edit for a group.. but.. group_config = s3_accounts.get_group_config(args["--name"]) group_config = s3_accounts.edit_account_config(group_config) s3_accounts.add_account(args["--name"], group_config) elif args["listgroups"]: print("\n".join(s3_accounts.list_groups())) elif args["listusers"]: print("\n".join(s3_accounts.list_users()))
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() env = args.get("--env") or config.get("disco_aws", "default_environment") vpc = DiscoVPC.fetch_environment(environment_name=env) if not vpc: print("Environment does not exist: {}".format(env)) sys.exit(1) if args['list']: for elb in sorted(DiscoELB(vpc).list()): print("{0:<20} {1:25}".format(elb['LoadBalancerName'], ','.join(elb['AvailabilityZones']))) elif args['update']: DiscoAWS(config, env).update_elb(args['--hostclass'])
def run(): """ Parses command line and dispatches the commands disco_dynamodb.py list """ config = read_config() parser = get_parser() args = parser.parse_args() environment_name = args.env \ if (hasattr(args, "env") and args.env) else config.get("disco_aws", "default_environment") dynamodb = DiscoDynamoDB(environment_name=environment_name) if args.mode == "list": list_tables(dynamodb, args.header) elif args.mode == "create": create_table(dynamodb, args.config, args.wait) elif args.mode == "update": update_table(dynamodb, args.table, args.config, args.wait) elif args.mode == "delete": delete_table(dynamodb, args.table, args.wait) elif args.mode == "describe": describe_table(dynamodb, args.table)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") aws = DiscoAWS(config, environment_name=environment_name) if args.mode == "create": aws.disco_storage.create_ebs_snapshot(args.hostclass, args.size) elif args.mode == "list": for snapshot in aws.disco_storage.get_snapshots(args.hostclasses): print("{0:26} {1:13} {2:9} {3} {4:4}".format( snapshot.tags['hostclass'], snapshot.id, snapshot.status, snapshot.start_time, snapshot.volume_size)) elif args.mode == "cleanup": aws.disco_storage.cleanup_ebs_snapshots(args.keep) elif args.mode == "capture": instances = instances_from_args(aws, args) if not instances: logging.warning("No instances found") for instance in instances: return_code, output = aws.remotecmd( instance, ["sudo /opt/wgen/bin/take_snapshot.sh"], user="******") if return_code: raise Exception( "Failed to snapshot instance {0}:\n {1}\n".format( instance, output)) logging.info("Successfully snapshotted %s", instance) elif args.mode == "delete": for snapshot_id in args.snapshots: aws.disco_storage.delete_snapshot(snapshot_id) elif args.mode == "update": snapshot = aws.disco_storage.get_latest_snapshot(args.hostclass) aws.autoscale.update_snapshot(args.hostclass, snapshot.id, snapshot.volume_size)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = vars(args).get('env') or config.get( "disco_aws", "default_environment") vpc = DiscoVPC.fetch_environment(environment_name=environment_name) if not vpc: print("Environment does not exist: {}".format(environment_name)) sys.exit(1) rds = vpc.rds if args.mode == "list": instances = rds.get_db_instances() for instance in instances: line = "{:<20} {:>6}GB {:<12}".format( instance["DBInstanceIdentifier"], instance["AllocatedStorage"], instance["DBInstanceStatus"]) if args.url: endpoint = instance["Endpoint"] url = "{}:{}".format(endpoint["Address"], endpoint["Port"]) if endpoint else "-" line += " {}".format(url) print(line) elif args.mode == "update": if args.cluster: rds.update_cluster(args.cluster) else: rds.update_all_clusters_in_vpc() elif args.mode == "delete": rds.delete_db_instance(args.cluster, skip_final_snapshot=args.skip_final_snapshot) elif args.mode == 'cleanup_snapshots': rds.cleanup_snapshots(args.days)
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() env = args.get("--env") or config.get("disco_aws", "default_environment") disco_log_metrics = DiscoLogMetrics(env) if args["update"]: disco_log_metrics.update(args['--hostclass']) elif args["delete"]: disco_log_metrics.delete_metrics(args['--hostclass']) elif args["list-metrics"]: for metric_filter in disco_log_metrics.list_metric_filters(args['--hostclass']): for metric in metric_filter['metricTransformations']: print("{0:<40} {1:10}".format(metric['metricNamespace'], metric['metricName'])) elif args["list-groups"]: for group in disco_log_metrics.list_log_groups(args['--hostclass']): print("{0:<40} {1:10}".format(group['logGroupName'], format_bytes_to_mb(group['storedBytes'])))
def __init__(self, args): self.args = args self.config = read_config() self.env = self.args["--env"] or self.config.get("disco_aws", "default_environment") configure_logging(args["--debug"])
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") aws = DiscoAWS(config, environment_name=environment_name) if args.mode == "provision": hostclass_dicts = [{ "sequence": 1, "hostclass": args.hostclass, "instance_type": args.instance_type, "extra_space": args.extra_space, "extra_disk": args.extra_disk, "iops": args.iops, "smoke_test": "no" if args.no_smoke else "yes", "ami": args.ami, "min_size": args.min_size, "desired_size": args.desired_size, "max_size": args.max_size, "chaos": "no" if args.no_chaos else None }] aws.spinup(hostclass_dicts, testing=args.testing) elif args.mode == "listhosts": instances = aws.instances_from_hostclass( args.hostclass) if args.hostclass else aws.instances() instances_filtered = [i for i in instances if i.state != u"terminated"] instances_sorted = sorted(instances_filtered, key=lambda i: (i.state, i.tags.get("hostclass", "-"), i.tags.get("hostname", "-"))) instance_to_private_ip = { i.id: get_preferred_private_ip(i) for i in instances_sorted } most = args.all or args.most if args.ami_age or args.uptime or most: bake = DiscoBake(config, aws.connection) ami_dict = bake.list_amis_by_instance(instances) now = datetime.utcnow() for instance in instances_sorted: line = u"{0} {1:<30} {2:<15}".format( instance.id, instance.tags.get("hostclass", "-"), instance.ip_address or instance_to_private_ip[instance.id]) if args.state or most: line += u" {0:<10}".format(instance.state) if args.hostname or most: line += u" {0:<1}".format( "-" if instance.tags.get("hostname") is None else "y") if args.owner or most: line += u" {0:<11}".format(instance.tags.get("owner", u"-")) if args.instance_type or most: line += u" {0:<10}".format(instance.instance_type) if args.ami or most: line += u" {0:<12}".format(instance.image_id) if args.smoke or most: line += u" {0:<1}".format( "-" if instance.tags.get("smoketest") is None else "y") if args.ami_age or most: creation_time = bake.get_ami_creation_time( ami_dict.get(instance.id)) line += u" {0:<4}".format( DiscoBake.time_diff_in_hours(now, creation_time)) if args.uptime or most: launch_time = dateutil_parser.parse(instance.launch_time) now_with_tz = now.replace( tzinfo=launch_time.tzinfo) # use a timezone-aware `now` line += u" {0:<3}".format( DiscoBake.time_diff_in_hours(now_with_tz, launch_time)) if args.private_ip or args.all: line += u" {0:<16}".format(instance_to_private_ip[instance.id]) if args.availability_zone or args.all: line += u" {0:<12}".format(instance.placement) if args.productline or args.all: productline = instance.tags.get("productline", u"unknown") line += u" {0:<15}".format( productline if productline != u"unknown" else u"-") print(line) elif args.mode == "terminate": instances = instances_from_args(aws, args) terminated_instances = aws.terminate(instances) print("Terminated: {0}".format(",".join( [str(inst) for inst in terminated_instances]))) elif args.mode == "stop": instances = instances_from_args(aws, args) stopped_instances = aws.stop(instances) print("Stopped: {0}".format(",".join( [str(inst) for inst in stopped_instances]))) elif args.mode == "exec": instances = instances_from_args(aws, args) exit_code = 0 for instance in instances: _code, _stdout = aws.remotecmd(instance, [args.command], user=args.user, nothrow=True) sys.stdout.write(_stdout) exit_code = _code if _code else exit_code sys.exit(exit_code) elif args.mode == "isready": instances = instances_from_args(aws, args) if not instances: print("No instances found") ready_count = 0 for instance in instances: name = "{0} {1}".format(instance.tags.get("hostname"), instance.id) print("Checking {0}...".format(name)) try: aws.smoketest_once(instance) print("...{0} is ready".format(name)) ready_count += 1 except SmokeTestError: print("..{0} failed smoke test".format(name)) except TimeoutError: print("...{0} is NOT ready".format(name)) sys.exit(0 if ready_count == len(instances) else 1) elif args.mode == "tag": for instance in aws.instances(instance_ids=args.instances): instance.remove_tag(args.key) if args.value: instance.add_tag(args.key, args.value) elif args.mode == "spinup": with open(args.pipeline_definition_file, "r") as f: reader = csv.DictReader(f) hostclass_dicts = [line for line in reader] aws.spinup(hostclass_dicts, stage=args.stage, no_smoke=args.no_smoke, testing=args.testing) elif args.mode == "spindown": with open(args.pipeline_definition_file, "r") as f: reader = csv.DictReader(f) hostclasses = [line["hostclass"] for line in reader] aws.spindown(hostclasses) elif args.mode == "spindownandup": with open(args.pipeline_definition_file, "r") as f: reader = csv.DictReader(f) hostclass_dicts = [line for line in reader] hostclasses = [d["hostclass"] for d in hostclass_dicts] aws.spindown(hostclasses) aws.spinup(hostclass_dicts) elif args.mode == "gethostclassoption": try: print(aws.hostclass_option(args.hostclass, args.option)) except NoOptionError: print("Hostclass %s doesn't have option %s." % (args.hostclass, args.option)) elif args.mode == "promoterunning": aws.promote_running_instances_to_prod(args.hours * 60 * 60)
def run(): """Parses command line and dispatches the commands""" config = read_config() args = docopt(__doc__) configure_logging(args["--debug"]) env = args["--environment"] or config.get("disco_aws", "default_environment") pipeline_definition = [] if args["--pipeline"]: with open(args["--pipeline"], "r") as f: reader = csv.DictReader(f) pipeline_definition = [line for line in reader] aws = DiscoAWS(config, env) if config.has_option('test', 'env'): test_env = config.get('test', 'env') test_aws = DiscoAWS(config, test_env) else: test_aws = aws deploy = DiscoDeploy(aws, test_aws, DiscoBake(config, aws.connection), pipeline_definition=pipeline_definition, test_hostclass=aws.config('hostclass', 'test'), test_user=aws.config('user', 'test'), test_command=aws.config('command', 'test'), ami=args.get("--ami"), hostclass=args.get("--hostclass"), allow_any_hostclass=args["--allow-any-hostclass"]) if args["test"]: deploy.test(dry_run=args["--dry-run"]) elif args["update"]: deploy.update(dry_run=args["--dry-run"]) elif args["list"]: missing = "-" if len(pipeline_definition) else "" if args["--tested"]: for (_hostclass, ami) in deploy.get_latest_tested_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--untested"]: for (_hostclass, ami) in deploy.get_latest_untested_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--failed"]: for (_hostclass, ami) in deploy.get_latest_failed_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--testable"]: for ami in deploy.get_test_amis(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--updatable"]: for ami in deploy.get_update_amis(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--failures"]: failures = deploy.get_failed_amis() for ami in failures: print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) sys.exit(1 if len(failures) else 0)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") aws = DiscoAWS(config, environment_name=environment_name) if args.mode == "provision": hostclass_dicts = [{ "sequence": 1, "hostclass": args.hostclass, "instance_type": args.instance_type, "extra_space": args.extra_space, "extra_disk": args.extra_disk, "iops": args.iops, "smoke_test": "no" if args.no_smoke else "yes", "ami": args.ami, "min_size": args.min_size, "desired_size": args.desired_size, "max_size": args.max_size, "chaos": "no" if args.no_chaos else None }] aws.spinup(hostclass_dicts, testing=args.testing) elif args.mode == "listhosts": instances = aws.instances_from_hostclass(args.hostclass) if args.hostclass else aws.instances() instances_filtered = [i for i in instances if i.state != u"terminated"] instances_sorted = sorted(instances_filtered, key=lambda i: (i.state, i.tags.get("hostclass", "-"), i.tags.get("hostname", "-"))) instance_to_private_ip = {i.id: get_preferred_private_ip(i) for i in instances_sorted} most = args.all or args.most if args.ami_age or args.uptime or most: bake = DiscoBake(config, aws.connection) ami_dict = bake.list_amis_by_instance(instances) now = datetime.utcnow() for instance in instances_sorted: line = u"{0} {1:<30} {2:<15}".format( instance.id, instance.tags.get("hostclass", "-"), instance.ip_address or instance_to_private_ip[instance.id]) if args.state or most: line += u" {0:<10}".format(instance.state) if args.hostname or most: line += u" {0:<1}".format("-" if instance.tags.get("hostname") is None else "y") if args.owner or most: line += u" {0:<11}".format(instance.tags.get("owner", u"-")) if args.instance_type or most: line += u" {0:<10}".format(instance.instance_type) if args.ami or most: line += u" {0:<12}".format(instance.image_id) if args.smoke or most: line += u" {0:<1}".format("-" if instance.tags.get("smoketest") is None else "y") if args.ami_age or most: creation_time = bake.get_ami_creation_time(ami_dict.get(instance.id)) line += u" {0:<4}".format(DiscoBake.time_diff_in_hours(now, creation_time)) if args.uptime or most: launch_time = dateutil_parser.parse(instance.launch_time) now_with_tz = now.replace(tzinfo=launch_time.tzinfo) # use a timezone-aware `now` line += u" {0:<3}".format(DiscoBake.time_diff_in_hours(now_with_tz, launch_time)) if args.private_ip or args.all: line += u" {0:<16}".format(instance_to_private_ip[instance.id]) if args.availability_zone or args.all: line += u" {0:<12}".format(instance.placement) if args.productline or args.all: productline = instance.tags.get("productline", u"unknown") line += u" {0:<15}".format(productline if productline != u"unknown" else u"-") print(line) elif args.mode == "terminate": instances = instances_from_args(aws, args) terminated_instances = aws.terminate(instances) print("Terminated: {0}".format(",".join([str(inst) for inst in terminated_instances]))) elif args.mode == "stop": instances = instances_from_args(aws, args) stopped_instances = aws.stop(instances) print("Stopped: {0}".format(",".join([str(inst) for inst in stopped_instances]))) elif args.mode == "exec": instances = instances_from_args(aws, args) exit_code = 0 for instance in instances: _code, _stdout = aws.remotecmd(instance, [args.command], user=args.user, nothrow=True) sys.stdout.write(_stdout) exit_code = _code if _code else exit_code sys.exit(exit_code) elif args.mode == "isready": instances = instances_from_args(aws, args) if not instances: print("No instances found") ready_count = 0 for instance in instances: name = "{0} {1}".format(instance.tags.get("hostname"), instance.id) print("Checking {0}...".format(name)) try: aws.smoketest_once(instance) print("...{0} is ready".format(name)) ready_count += 1 except SmokeTestError: print("..{0} failed smoke test".format(name)) except TimeoutError: print("...{0} is NOT ready".format(name)) sys.exit(0 if ready_count == len(instances) else 1) elif args.mode == "tag": for instance in aws.instances(instance_ids=args.instances): instance.remove_tag(args.key) if args.value: instance.add_tag(args.key, args.value) elif args.mode == "spinup": with open(args.pipeline_definition_file, "r") as f: reader = csv.DictReader(f) hostclass_dicts = [line for line in reader] aws.spinup(hostclass_dicts, stage=args.stage, no_smoke=args.no_smoke, testing=args.testing) elif args.mode == "spindown": with open(args.pipeline_definition_file, "r") as f: reader = csv.DictReader(f) hostclasses = [line["hostclass"] for line in reader] aws.spindown(hostclasses) elif args.mode == "spindownandup": with open(args.pipeline_definition_file, "r") as f: reader = csv.DictReader(f) hostclass_dicts = [line for line in reader] hostclasses = [d["hostclass"] for d in hostclass_dicts] aws.spindown(hostclasses) aws.spinup(hostclass_dicts) elif args.mode == "gethostclassoption": try: print(aws.hostclass_option(args.hostclass, args.option)) except NoOptionError: print("Hostclass %s doesn't have option %s." % (args.hostclass, args.option)) elif args.mode == "promoterunning": aws.promote_running_instances_to_prod(args.hours * 60 * 60)
def run(): """Parses command line and dispatches the commands""" config = read_config() args = docopt(__doc__) configure_logging(args["--debug"]) env = args["--environment"] or config.get("disco_aws", "default_environment") pipeline_definition = [] if args["--pipeline"]: with open(args["--pipeline"], "r") as f: reader = csv.DictReader(f) pipeline_definition = [line for line in reader] aws = DiscoAWS(config, env) if config.has_option('test', 'env'): test_env = config.get('test', 'env') test_aws = DiscoAWS(config, test_env) else: test_aws = aws deploy = DiscoDeploy( aws, test_aws, DiscoBake(config, aws.connection), pipeline_definition=pipeline_definition, test_hostclass=aws.config('hostclass', 'test'), test_user=aws.config('user', 'test'), test_command=aws.config('command', 'test'), ami=args.get("--ami"), hostclass=args.get("--hostclass"), allow_any_hostclass=args["--allow-any-hostclass"]) if args["test"]: deploy.test(dry_run=args["--dry-run"]) elif args["update"]: deploy.update(dry_run=args["--dry-run"]) elif args["list"]: missing = "-" if len(pipeline_definition) else "" if args["--tested"]: for (_hostclass, ami) in deploy.get_latest_tested_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--untested"]: for (_hostclass, ami) in deploy.get_latest_untested_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--failed"]: for (_hostclass, ami) in deploy.get_latest_failed_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--testable"]: for ami in deploy.get_test_amis(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--updatable"]: for ami in deploy.get_update_amis(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--failures"]: failures = deploy.get_failed_amis() for ami in failures: print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) sys.exit(1 if len(failures) else 0)