def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() env = args.get("--env") or config.get("disco_aws", "default_environment") vpc = DiscoVPC.fetch_environment(environment_name=env) if not vpc: print("Environment does not exist: {}".format(env)) sys.exit(1) aws = DiscoAWS(config, env) disco_elasticache = DiscoElastiCache(vpc, aws=aws) if args['list']: for cluster in disco_elasticache.list(): size = 'N/A' if cluster['Status'] == 'available': size = len(cluster['NodeGroups'][0]['NodeGroupMembers']) print("{0:<25} {1:5} {2:>5}".format(cluster['Description'], cluster['Status'], size)) elif args['update']: if args['--cluster']: disco_elasticache.update(args['--cluster']) else: disco_elasticache.update_all() elif args['delete']: disco_elasticache.delete(args['--cluster'], wait=args['--wait'])
def test__arglist__expected_behavior(self, configparser_constructor): "Unnamed argument list for read_config works" parser = Mock() configparser_constructor.return_value = parser parsed = disco_config.read_config("foo", "bar") self.assertIs(parsed, parser) parser.read.assert_called_once_with("FAKE_CONFIG_DIR/foo/bar")
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug, args.silent) bucket_name = args.bucket or DiscoVPC.get_credential_buckets_from_env_name(config, args.env)[0] s3_bucket = DiscoS3Bucket(bucket_name) if args.mode == "list": print("\n".join(s3_bucket.listkeys(args.prefix))) elif args.mode == "get": print(s3_bucket.get_key(args.key_name)) elif args.mode == "set": use_password = args.key_password key_value = args.key_value if use_password: key_value = getpass.getpass() elif key_value == "-": key_value = sys.stdin.read() s3_bucket.set_key(args.key_name, key_value) elif args.mode == "delete": s3_bucket.delete_key(args.key_name) elif args.mode == "setfile": key_value = s3_bucket.get_key(args.key_name) s3_bucket.get_contents_to_file(args.key_name, args.file_name)
def test__arg_combo__named_arg_last(self, configparser_constructor): "Combined keyword and listed args for read_config work" parser = Mock() configparser_constructor.return_value = parser parsed = disco_config.read_config("foo", "bar", config_file="baz.ini") self.assertIs(parsed, parser) parser.read.assert_called_once_with("FAKE_CONFIG_DIR/foo/bar/baz.ini")
def __init__(self, args): self.args = args self.config = read_config() self.env = self.args["--env"] or self.config.get("disco_aws", "default_environment") self.pick_instance = self.args['--first'] self.user = self.args.get("--user") configure_logging(args["--debug"])
def test__no_arg__default_behavior(self, configparser_constructor): "Default argument for read_config works" parser = Mock() configparser_constructor.return_value = parser parsed = disco_config.read_config() self.assertIs(parsed, parser) parser.read.assert_called_once_with("FAKE_CONFIG_DIR/disco_aws.ini")
def test__named_arg__expected_behavior(self, configparser_constructor): "Keyword argument for read_config works" parser = Mock() configparser_constructor.return_value = parser parsed = disco_config.read_config(config_file="Foobar") self.assertIs(parsed, parser) parser.read.assert_called_once_with("FAKE_CONFIG_DIR/Foobar")
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug, args.silent) bucket_name = args.bucket or DiscoVPC.get_credential_buckets_from_env_name( config, args.env)[0] s3_bucket = DiscoS3Bucket(bucket_name) if args.mode == "list": print("\n".join(s3_bucket.listkeys(args.prefix))) elif args.mode == "get": print(s3_bucket.get_key(args.key_name)) elif args.mode == "set": use_password = args.key_password key_value = args.key_value if use_password: key_value = getpass.getpass() elif key_value == "-": key_value = sys.stdin.read() s3_bucket.set_key(args.key_name, key_value) elif args.mode == "delete": s3_bucket.delete_key(args.key_name) elif args.mode == "setfile": key_value = s3_bucket.get_key(args.key_name) s3_bucket.get_contents_to_file(args.key_name, args.file_name)
def test__arglist__expected_behavior(self, configparser_constructor): "Unnamed argument list for read_config works" parser = Mock() configparser_constructor.return_value = parser parsed = disco_config.read_config("foo", "bar") self.assertIs(parsed, parser) parser.read.assert_called_once_with("FAKE_CONFIG_DIR/foo/bar")
def test__arg_combo__named_arg_last(self, configparser_constructor): "Combined keyword and listed args for read_config work" parser = Mock() configparser_constructor.return_value = parser parsed = disco_config.read_config("foo", "bar", config_file="baz.ini") self.assertIs(parsed, parser) parser.read.assert_called_once_with("FAKE_CONFIG_DIR/foo/bar/baz.ini")
def test__named_arg__expected_behavior(self, configparser_constructor): "Keyword argument for read_config works" parser = Mock() configparser_constructor.return_value = parser parsed = disco_config.read_config(config_file="Foobar") self.assertIs(parsed, parser) parser.read.assert_called_once_with("FAKE_CONFIG_DIR/Foobar")
def test__no_arg__default_behavior(self, configparser_constructor): "Default argument for read_config works" parser = Mock() configparser_constructor.return_value = parser parsed = disco_config.read_config() self.assertIs(parsed, parser) parser.read.assert_called_once_with("FAKE_CONFIG_DIR/disco_aws.ini")
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() env = args.get("--env") or config.get("disco_aws", "default_environment") vpc = DiscoVPC.fetch_environment(environment_name=env) if not vpc: print("Environment does not exist: {}".format(env)) sys.exit(1) if args['list']: format_string = "{0:<50} {1:33} {2}" print(format_string.format("ELB Name", "Availability Zones", "ELB Id"), file=sys.stderr) for elb_info in sorted(DiscoELB(vpc).list_for_display()): print( format_string.format(elb_info['elb_name'], elb_info['availability_zones'], elb_info["elb_id"])) elif args['update']: DiscoAWS(config, env).update_elb(args['--hostclass'])
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() env = args.get("--env") or config.get("disco_aws", "default_environment") vpc = DiscoVPC.fetch_environment(environment_name=env) if not vpc: print("Environment does not exist: {}".format(env)) sys.exit(1) aws = DiscoAWS(config, env) disco_elasticache = DiscoElastiCache(vpc, aws=aws) if args['list']: for cluster in disco_elasticache.list(): size = 'N/A' if cluster['Status'] == 'available': size = len(cluster['NodeGroups'][0]['NodeGroupMembers']) print("{0:<25} {1:5} {2:>5}".format(cluster['Description'], cluster['Status'], size)) elif args['update']: if args['--cluster']: disco_elasticache.update(args['--cluster']) else: disco_elasticache.update_all() elif args['delete']: disco_elasticache.delete(args['--cluster'], wait=args['--wait'])
def __init__(self, args): self.args = args self.config = read_config() self.env = self.args["--env"] or self.config.get( "disco_aws", "default_environment") self.pick_instance = self.args['--first'] self.user = self.args.get("--user") configure_logging(args["--debug"])
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") aws = DiscoAWS(config, environment_name=environment_name) if args.mode == "create": product_line = aws.hostclass_option_default(args.hostclass, 'product_line', 'unknown') aws.disco_storage.create_ebs_snapshot(args.hostclass, args.size, product_line, not args.unencrypted) elif args.mode == "list": for snapshot in aws.disco_storage.get_snapshots(args.hostclasses): print("{0:26} {1:13} {2:9} {3} {4:4}".format( snapshot.tags['hostclass'], snapshot.id, snapshot.status, snapshot.start_time, snapshot.volume_size)) elif args.mode == "cleanup": aws.disco_storage.cleanup_ebs_snapshots(args.keep) elif args.mode == "capture": if args.volume_id: extra_snapshot_tags = None if args.tags: extra_snapshot_tags = dict( tag_item.split(':') for tag_item in args.tags) snapshot_id = aws.disco_storage.take_snapshot( args.volume_id, snapshot_tags=extra_snapshot_tags) print("Successfully created snapshot: {0}".format(snapshot_id)) else: instances = instances_from_args(aws, args) if not instances: print("No instances found") for instance in instances: return_code, output = aws.remotecmd( instance, ["sudo /opt/wgen/bin/take_snapshot.sh"], user="******") if return_code: raise Exception( "Failed to snapshot instance {0}:\n {1}\n".format( instance, output)) print("Successfully snapshotted {0}".format(instance)) elif args.mode == "delete": for snapshot_id in args.snapshots: aws.disco_storage.delete_snapshot(snapshot_id) elif args.mode == "update": if args.snapshot_id: snapshot = aws.disco_storage.get_snapshot_from_id(args.snapshot_id) else: snapshot = aws.disco_storage.get_latest_snapshot(args.hostclass) aws.discogroup.update_snapshot(snapshot.id, snapshot.volume_size, hostclass=args.hostclass)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) s3_bucket_name = args.bucket or DiscoVPC.get_credential_buckets_from_env_name(config, args.env)[0] app_auth_dir = args.dir or None env = args.env or s3_bucket_name.split('.')[-1] if args.mode == "update": app_auth = DiscoAppAuth(env, s3_bucket_name, app_auth_dir) app_auth.update(args.force)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) env_name = args.env or config.get("disco_aws", "default_environment") chaos = DiscoChaos(config, env_name, args.level, args.retainage) instances = chaos.get_instances_to_terminate() for inst in instances: print("{0:20} {1}".format(inst.tags.get('hostclass'), inst.id)) if not args.dryrun: chaos.terminate(instances)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) env_name = args.env or config.get("disco_aws", "default_environment") chaos = DiscoChaos(config, env_name, args.level, args.retainage) instances = chaos.get_instances_to_terminate() for inst in instances: print("{0:20} {1}".format(inst.tags.get('hostclass'), inst.id)) if not args.dryrun: chaos.terminate(instances)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) s3_bucket_name = args.bucket or DiscoVPC.get_credential_buckets_from_env_name( config, args.env)[0] app_auth_dir = args.dir or None env = args.env or s3_bucket_name.split('.')[-1] if args.mode == "update": app_auth = DiscoAppAuth(env, s3_bucket_name, app_auth_dir) app_auth.update(args.force)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") aws = DiscoAWS(config, environment_name=environment_name) if args.mode == "create": product_line = aws.hostclass_option_default(args.hostclass, 'product_line', 'unknown') aws.disco_storage.create_ebs_snapshot(args.hostclass, args.size, product_line, not args.unencrypted) elif args.mode == "list": for snapshot in aws.disco_storage.get_snapshots(args.hostclasses): print("{0:26} {1:13} {2:9} {3} {4:4}".format( snapshot.tags['hostclass'], snapshot.id, snapshot.status, snapshot.start_time, snapshot.volume_size)) elif args.mode == "cleanup": aws.disco_storage.cleanup_ebs_snapshots(args.keep) elif args.mode == "capture": if args.volume_id: extra_snapshot_tags = None if args.tags: extra_snapshot_tags = dict(tag_item.split(':') for tag_item in args.tags) snapshot_id = aws.disco_storage.take_snapshot(args.volume_id, snapshot_tags=extra_snapshot_tags) print("Successfully created snapshot: {0}".format(snapshot_id)) else: instances = instances_from_args(aws, args) if not instances: print("No instances found") for instance in instances: return_code, output = aws.remotecmd( instance, ["sudo /opt/wgen/bin/take_snapshot.sh"], user="******") if return_code: raise Exception("Failed to snapshot instance {0}:\n {1}\n".format(instance, output)) print("Successfully snapshotted {0}".format(instance)) elif args.mode == "delete": for snapshot_id in args.snapshots: aws.disco_storage.delete_snapshot(snapshot_id) elif args.mode == "update": if args.snapshot_id: snapshot = aws.disco_storage.get_snapshot_from_id(args.snapshot_id) else: snapshot = aws.disco_storage.get_latest_snapshot(args.hostclass) aws.discogroup.update_snapshot(snapshot.id, snapshot.volume_size, hostclass=args.hostclass)
def run(): """Parses command line and dispatches the commands""" config = read_config() args = docopt(__doc__) configure_logging(args["--debug"]) if args["hashpassword"]: print(password_hash()) sys.exit(0) bucket_name = args.get( "--bucket") or DiscoVPC.get_credential_buckets_from_env_name( config, args["--env"])[0] s3_accounts = S3AccountBackend(DiscoS3Bucket(bucket_name)) if args["install"]: s3_accounts.install_all() elif args["adduser"]: username = args["--name"] or os.environ.get("USER") user_template = s3_accounts.new_user_config( password_hash(args["--password"])) group_config = s3_accounts.new_group_config() user_config = s3_accounts.edit_account_config(user_template) s3_accounts.add_account(username, user_config) s3_accounts.add_account(username, group_config) elif args["addgroup"]: group_config = s3_accounts.new_group_config() s3_accounts.add_account(args["--name"], group_config) elif args["edituser"]: username = args["--name"] or os.environ.get("USER") user_config = s3_accounts.get_user_config(username) kwargs = {"active": args["--active"]} if args["--active"] else {} user_config = s3_accounts.edit_account_config(user_config, **kwargs) s3_accounts.add_account(username, user_config) elif args["editgroup"]: # there is nothing to edit for a group.. but.. group_config = s3_accounts.get_group_config(args["--name"]) group_config = s3_accounts.edit_account_config(group_config) s3_accounts.add_account(args["--name"], group_config) elif args["listgroups"]: print("\n".join(s3_accounts.list_groups())) elif args["listusers"]: print("\n".join(s3_accounts.list_users()))
def run(): """Parses command line and dispatches the commands""" config = read_config() args = docopt(__doc__) configure_logging(args["--debug"]) if args["hashpassword"]: print(password_hash()) sys.exit(0) bucket_name = args.get("--bucket") or DiscoVPC.get_credential_buckets_from_env_name( config, args["--env"])[0] s3_accounts = S3AccountBackend(DiscoS3Bucket(bucket_name)) if args["install"]: s3_accounts.install_all() elif args["adduser"]: username = args["--name"] or os.environ.get("USER") user_template = s3_accounts.new_user_config(password_hash(args["--password"])) group_config = s3_accounts.new_group_config() user_config = s3_accounts.edit_account_config(user_template) s3_accounts.add_account(username, user_config) s3_accounts.add_account(username, group_config) elif args["addgroup"]: group_config = s3_accounts.new_group_config() s3_accounts.add_account(args["--name"], group_config) elif args["edituser"]: username = args["--name"] or os.environ.get("USER") user_config = s3_accounts.get_user_config(username) kwargs = {"active": args["--active"]} if args["--active"] else {} user_config = s3_accounts.edit_account_config(user_config, **kwargs) s3_accounts.add_account(username, user_config) elif args["editgroup"]: # there is nothing to edit for a group.. but.. group_config = s3_accounts.get_group_config(args["--name"]) group_config = s3_accounts.edit_account_config(group_config) s3_accounts.add_account(args["--name"], group_config) elif args["listgroups"]: print("\n".join(s3_accounts.list_groups())) elif args["listusers"]: print("\n".join(s3_accounts.list_users()))
def run(): """ Parses command line and dispatches the commands disco_dynamodb.py list """ config = read_config() parser = get_parser() args = parser.parse_args() environment_name = args.env \ if (hasattr(args, "env") and args.env) else config.get("disco_aws", "default_environment") dynamodb = DiscoDynamoDB(environment_name=environment_name) if args.mode == "list": list_tables(dynamodb, args.header) elif args.mode == "create": create_table(dynamodb, args.config, args.wait) elif args.mode == "update": update_table(dynamodb, args.table, args.config, args.wait) elif args.mode == "delete": delete_table(dynamodb, args.table, args.wait) elif args.mode == "describe": describe_table(dynamodb, args.table)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = vars(args).get('env') or config.get( "disco_aws", "default_environment") vpc = DiscoVPC.fetch_environment(environment_name=environment_name) if not vpc: print("Environment does not exist: {}".format(environment_name)) sys.exit(1) rds = vpc.rds if args.mode == "list": instances = rds.get_db_instances() for instance in instances: line = "{:<20} {:>6}GB {:<12}".format( instance["DBInstanceIdentifier"], instance["AllocatedStorage"], instance["DBInstanceStatus"]) if args.url: endpoint = instance["Endpoint"] url = "{}:{}".format(endpoint["Address"], endpoint["Port"]) if endpoint else "-" line += " {}".format(url) print(line) elif args.mode == "update": if args.cluster: rds.update_cluster_by_id(args.cluster) else: rds.update_all_clusters_in_vpc(parallel=args.parallel) elif args.mode == "delete": rds.delete_db_instance(args.cluster, skip_final_snapshot=args.skip_final_snapshot) elif args.mode == "cleanup_snapshots": rds.cleanup_snapshots(args.days) elif args.mode == "clone": rds.clone(args.source_env, args.source_db)
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() env = args.get("--env") or config.get("disco_aws", "default_environment") vpc = DiscoVPC.fetch_environment(environment_name=env) if not vpc: print("Environment does not exist: {}".format(env)) sys.exit(1) if args['list']: format_string = "{0:<50} {1:33} {2}" print(format_string.format("ELB Name", "Availability Zones", "ELB Id"), file=sys.stderr) for elb_info in sorted(DiscoELB(vpc).list_for_display()): print(format_string.format(elb_info['elb_name'], elb_info['availability_zones'], elb_info["elb_id"])) elif args['update']: DiscoAWS(config, env).update_elb(args['--hostclass'])
def run(): """Parses command line and dispatches the commands""" args = docopt(__doc__) configure_logging(args["--debug"]) config = read_config() env = args.get("--env") or config.get("disco_aws", "default_environment") disco_log_metrics = DiscoLogMetrics(env) if args["update"]: disco_log_metrics.update(args['--hostclass']) elif args["delete"]: disco_log_metrics.delete_metrics(args['--hostclass']) elif args["list-metrics"]: for metric_filter in disco_log_metrics.list_metric_filters(args['--hostclass']): for metric in metric_filter['metricTransformations']: print("{0:<40} {1:10}".format(metric['metricNamespace'], metric['metricName'])) elif args["list-groups"]: for group in disco_log_metrics.list_log_groups(args['--hostclass']): print("{0:<40} {1:10}".format(group['logGroupName'], format_bytes_to_mb(group['storedBytes'])))
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = vars(args).get('env') or config.get("disco_aws", "default_environment") vpc = DiscoVPC.fetch_environment(environment_name=environment_name) if not vpc: print("Environment does not exist: {}".format(environment_name)) sys.exit(1) rds = vpc.rds if args.mode == "list": instances = rds.get_db_instances() for instance in instances: line = "{:<20} {:>6}GB {:<12}".format( instance["DBInstanceIdentifier"], instance["AllocatedStorage"], instance["DBInstanceStatus"]) if args.url: endpoint = instance["Endpoint"] url = "{}:{}".format(endpoint["Address"], endpoint["Port"]) if endpoint else "-" line += " {}".format(url) print(line) elif args.mode == "update": if args.cluster: rds.update_cluster_by_id(args.cluster) else: rds.update_all_clusters_in_vpc(parallel=args.parallel) elif args.mode == "delete": rds.delete_db_instance(args.cluster, skip_final_snapshot=args.skip_final_snapshot) elif args.mode == "cleanup_snapshots": rds.cleanup_snapshots(args.days) elif args.mode == "clone": rds.clone(args.source_env, args.source_db)
def run(): """Parses command line and dispatches the commands""" config = read_config() args = parse_arguments() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") discogroup = DiscoGroup(environment_name) # Autoscaling group commands if args.mode == "listgroups": format_str = "{0} {1:21} {2:3} {3:3} {4:3} {5:3} {6:4} {7:10}" groups = discogroup.list_groups() if args.debug: print( format_str.format( "Name".ljust(35 + len(environment_name)), "AMI", "min", "des", "max", "cnt", "type", "is_testing" ) ) for group in groups: print( format_str.format( group['name'].ljust(40 + len(environment_name)), group['image_id'], group['min_size'], group['desired_capacity'], group['max_size'], group['group_cnt'], group['type'], 'y' if is_truthy(group['tags'].get('is_testing')) else 'n' ) ) elif args.mode == "cleangroups": discogroup.delete_groups() elif args.mode == "deletegroup": discogroup.delete_groups(hostclass=args.hostclass, group_name=args.name, force=args.force) # Launch Configuration commands elif args.mode == "listconfigs": for config in discogroup.get_configs(): print("{0:24} {1}".format(config.name, config.image_id)) elif args.mode == "cleanconfigs": discogroup.clean_configs() elif args.mode == "deleteconfig": discogroup.delete_config(args.config) # Scaling policy commands elif args.mode == "listpolicies": policies = discogroup.list_policies( group_name=args.group_name, policy_types=args.policy_types, policy_names=args.policy_names ) print_table( policies, headers=[ 'ASG', 'Name', 'Type', 'Adjustment Type', 'Scaling Adjustment', 'Min Adjustment', 'Cooldown', 'Step Adjustments', 'Warmup', 'Alarms' ] ) elif args.mode == "createpolicy": # Parse out the step adjustments, if provided. if args.step_adjustments: allowed_keys = ['MetricIntervalLowerBound', 'MetricIntervalUpperBound', 'ScalingAdjustment'] parsed_steps = [] for step in args.step_adjustments: parsed_step = {} for entry in step.split(','): key, value = entry.split('=', 1) if key not in allowed_keys: raise Exception( 'Unable to parse step {0}, key {1} not in {2}'.format(step, key, allowed_keys) ) parsed_step[key] = value parsed_steps.append(parsed_step) else: parsed_steps = [] discogroup.create_policy( group_name=args.group_name, policy_name=args.policy_name, policy_type=args.policy_type, adjustment_type=args.adjustment_type, min_adjustment_magnitude=args.min_adjustment_magnitude, scaling_adjustment=args.scaling_adjustment, cooldown=args.cooldown, metric_aggregation_type=args.metric_aggregation_type, step_adjustments=parsed_steps, estimated_instance_warmup=args.estimated_instance_warmup ) elif args.mode == "deletepolicy": discogroup.delete_policy(args.policy_name, args.group_name) sys.exit(0)
def run(): """Parses command line and dispatches the commands""" config = read_config() args = docopt(__doc__) configure_logging(args["--debug"]) env = args["--environment"] or config.get("disco_aws", "default_environment") force_deployable = None if args["--deployable"] is None else is_truthy( args["--deployable"]) pipeline_definition = [] if args["--pipeline"]: with open(args["--pipeline"], "r") as f: reader = csv.DictReader(f) pipeline_definition = [line for line in reader] aws = DiscoAWS(config, env) if config.has_option('test', 'env'): test_env = config.get('test', 'env') test_aws = DiscoAWS(config, test_env) else: test_aws = aws bake = DiscoBake(config, aws.connection) if args["--ami"] and args["--hostclass"]: image = bake.get_image(args["--ami"]) if args["--hostclass"] != bake.ami_hostclass(image): logger.error('AMI %s does not belong to hostclass %s', args["--ami"], args["--hostclass"]) sys.exit(1) vpc = DiscoVPC.fetch_environment(environment_name=env) deploy = DiscoDeploy(aws, test_aws, bake, DiscoGroup(env), DiscoELB(vpc), DiscoSSM(environment_name=env), pipeline_definition=pipeline_definition, ami=args.get("--ami"), hostclass=args.get("--hostclass"), allow_any_hostclass=args["--allow-any-hostclass"]) if args["test"]: try: deploy.test(dry_run=args["--dry-run"], deployment_strategy=args["--strategy"], ticket_id=args["--ticket"], force_deployable=force_deployable) except RuntimeError as err: logger.error(str(err)) sys.exit(1) elif args["update"]: try: deploy.update(dry_run=args["--dry-run"], deployment_strategy=args["--strategy"], ticket_id=args["--ticket"], force_deployable=force_deployable) except RuntimeError as err: logger.error(str(err)) sys.exit(1) elif args["list"]: missing = "-" if pipeline_definition else "" if args["--tested"]: for (_hostclass, ami) in deploy.get_latest_tested_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--untested"]: for (_hostclass, ami) in deploy.get_latest_untested_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--failed"]: for (_hostclass, ami) in deploy.get_latest_failed_amis().iteritems(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--testable"]: for ami in deploy.get_test_amis(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--updatable"]: for ami in deploy.get_update_amis(): print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) elif args["--failures"]: failures = deploy.get_failed_amis() for ami in failures: print("{} {:40} {}".format( ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing)) sys.exit(1 if failures else 0)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") aws = DiscoAWS(config, environment_name=environment_name) if args.mode == "provision": hostclass_dicts = [{ "sequence": 1, "hostclass": args.hostclass, "instance_type": args.instance_type, "extra_space": args.extra_space, "extra_disk": args.extra_disk, "iops": args.iops, "smoke_test": "no" if args.no_smoke else "yes", "ami": args.ami, "min_size": args.min_size, "desired_size": args.desired_size, "max_size": args.max_size, "chaos": "no" if args.no_chaos else None, "spotinst": args.spotinst, "spotinst_reserve": args.spotinst_reserve }] aws.spinup(hostclass_dicts, testing=args.testing) elif args.mode == "listhosts": instances = aws.instances_from_hostclass( args.hostclass) if args.hostclass else aws.instances() instances_filtered = [i for i in instances if i.state != u"terminated"] instances_sorted = sorted(instances_filtered, key=lambda i: (i.state, i.tags.get("hostclass", "-"), i.tags.get("hostname", "-"))) instance_to_private_ip = { i.id: get_preferred_private_ip(i) for i in instances_sorted } most = args.all or args.most if args.ami_age or args.uptime or most: bake = DiscoBake(config, aws.connection) ami_dict = bake.list_amis_by_instance(instances) now = datetime.utcnow() for instance in instances_sorted: line = u"{0} {1:<30} {2:<15}".format( instance.id, instance.tags.get("hostclass", "-"), instance.ip_address or instance_to_private_ip[instance.id]) if args.state or most: line += u" {0:<10}".format(instance.state) if args.hostname or most: line += u" {0:<1}".format( "-" if instance.tags.get("hostname") is None else "y") if args.owner or most: line += u" {0:<11}".format(instance.tags.get("owner", u"-")) if args.instance_type or most: line += u" {0:<10}".format(instance.instance_type) if args.ami or most: line += u" {0:<12}".format(instance.image_id) if args.smoke or most: line += u" {0:<1}".format( "-" if instance.tags.get("smoketest") is None else "y") if args.ami_age or most: creation_time = bake.get_ami_creation_time( ami_dict.get(instance.id)) line += u" {0:<4}".format( DiscoBake.time_diff_in_hours(now, creation_time)) if args.uptime or most: launch_time = dateutil_parser.parse(instance.launch_time) now_with_tz = now.replace( tzinfo=launch_time.tzinfo) # use a timezone-aware `now` line += u" {0:<3}".format( DiscoBake.time_diff_in_hours(now_with_tz, launch_time)) if args.private_ip or args.all: line += u" {0:<16}".format(instance_to_private_ip[instance.id]) if args.availability_zone or args.all: line += u" {0:<12}".format(instance.placement) if args.productline or args.all: productline = instance.tags.get("productline", u"unknown") line += u" {0:<15}".format( productline if productline != u"unknown" else u"-") if args.securitygroup or args.all: line += u" {0:15}".format(instance.groups[0].name) print(line) elif args.mode == "terminate": instances = instances_from_args(aws, args) terminated_instances = aws.terminate(instances) print("Terminated: {0}".format(",".join( [str(inst) for inst in terminated_instances]))) elif args.mode == "stop": instances = instances_from_args(aws, args) stopped_instances = aws.stop(instances) print("Stopped: {0}".format(",".join( [str(inst) for inst in stopped_instances]))) elif args.mode == "exec": instances = instances_from_args(aws, args) exit_code = 0 for instance in instances: _code, _stdout = aws.remotecmd(instance, [args.command], user=args.user, nothrow=True) sys.stdout.write(_stdout) exit_code = _code if _code else exit_code sys.exit(exit_code) elif args.mode == "exec-ssm": ssm = DiscoSSM(environment_name) if args.parameters: parsed_parameters = parse_ssm_parameters(args.parameters) else: parsed_parameters = None instances = [ instance.id for instance in instances_from_args(aws, args) ] if ssm.execute(instances, args.document, parameters=parsed_parameters, comment=args.comment): sys.exit(0) else: sys.exit(1) elif args.mode == "isready": instances = instances_from_args(aws, args) if not instances: print("No instances found") ready_count = 0 for instance in instances: name = "{0} {1}".format(instance.tags.get("hostname"), instance.id) print("Checking {0}...".format(name)) try: aws.smoketest_once(instance) print("...{0} is ready".format(name)) ready_count += 1 except SmokeTestError: print("..{0} failed smoke test".format(name)) except TimeoutError: print("...{0} is NOT ready".format(name)) sys.exit(0 if ready_count == len(instances) else 1) elif args.mode == "tag": for instance in aws.instances(instance_ids=args.instances): instance.remove_tag(args.key) if args.value: instance.add_tag(args.key, args.value) elif args.mode == "spinup": hostclass_dicts = read_pipeline_file(args.pipeline_definition_file) aws.spinup(hostclass_dicts, stage=args.stage, no_smoke=args.no_smoke, testing=args.testing) elif args.mode == "spindown": hostclasses = [ line["hostclass"] for line in read_pipeline_file(args.pipeline_definition_file) ] aws.spindown(hostclasses) elif args.mode == "spindownandup": hostclass_dicts = read_pipeline_file(args.pipeline_definition_file) hostclasses = [d["hostclass"] for d in hostclass_dicts] aws.spindown(hostclasses) aws.spinup(hostclass_dicts) elif args.mode == "gethostclassoption": try: print(aws.hostclass_option(args.hostclass, args.option)) except NoOptionError: print("Hostclass %s doesn't have option %s." % (args.hostclass, args.option)) elif args.mode == "promoterunning": aws.promote_running_instances_to_prod(args.hours * 60 * 60)
def run(): """Parses command line and dispatches the commands""" config = read_config() parser = get_parser() args = parser.parse_args() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") aws = DiscoAWS(config, environment_name=environment_name) if args.mode == "provision": hostclass_dicts = [{ "sequence": 1, "hostclass": args.hostclass, "instance_type": args.instance_type, "extra_space": args.extra_space, "extra_disk": args.extra_disk, "iops": args.iops, "smoke_test": "no" if args.no_smoke else "yes", "ami": args.ami, "min_size": args.min_size, "desired_size": args.desired_size, "max_size": args.max_size, "chaos": "no" if args.no_chaos else None, "spotinst": args.spotinst, "spotinst_reserve": args.spotinst_reserve }] aws.spinup(hostclass_dicts, testing=args.testing) elif args.mode == "listhosts": instances = aws.instances_from_hostclass(args.hostclass) if args.hostclass else aws.instances() instances_filtered = [i for i in instances if i.state != u"terminated"] instances_sorted = sorted(instances_filtered, key=lambda i: (i.state, i.tags.get("hostclass", "-"), i.tags.get("hostname", "-"))) instance_to_private_ip = {i.id: get_preferred_private_ip(i) for i in instances_sorted} most = args.all or args.most if args.ami_age or args.uptime or most: bake = DiscoBake(config, aws.connection) ami_dict = bake.list_amis_by_instance(instances) now = datetime.utcnow() for instance in instances_sorted: line = u"{0} {1:<30} {2:<15}".format( instance.id, instance.tags.get("hostclass", "-"), instance.ip_address or instance_to_private_ip[instance.id]) if args.state or most: line += u" {0:<10}".format(instance.state) if args.hostname or most: line += u" {0:<1}".format("-" if instance.tags.get("hostname") is None else "y") if args.owner or most: line += u" {0:<11}".format(instance.tags.get("owner", u"-")) if args.instance_type or most: line += u" {0:<10}".format(instance.instance_type) if args.ami or most: line += u" {0:<12}".format(instance.image_id) if args.smoke or most: line += u" {0:<1}".format("-" if instance.tags.get("smoketest") is None else "y") if args.ami_age or most: creation_time = bake.get_ami_creation_time(ami_dict.get(instance.id)) line += u" {0:<4}".format(DiscoBake.time_diff_in_hours(now, creation_time)) if args.uptime or most: launch_time = dateutil_parser.parse(instance.launch_time) now_with_tz = now.replace(tzinfo=launch_time.tzinfo) # use a timezone-aware `now` line += u" {0:<3}".format(DiscoBake.time_diff_in_hours(now_with_tz, launch_time)) if args.private_ip or args.all: line += u" {0:<16}".format(instance_to_private_ip[instance.id]) if args.availability_zone or args.all: line += u" {0:<12}".format(instance.placement) if args.productline or args.all: productline = instance.tags.get("productline", u"unknown") line += u" {0:<15}".format(productline if productline != u"unknown" else u"-") if args.securitygroup or args.all: line += u" {0:15}".format(instance.groups[0].name) print(line) elif args.mode == "terminate": instances = instances_from_args(aws, args) terminated_instances = aws.terminate(instances) print("Terminated: {0}".format(",".join([str(inst) for inst in terminated_instances]))) elif args.mode == "stop": instances = instances_from_args(aws, args) stopped_instances = aws.stop(instances) print("Stopped: {0}".format(",".join([str(inst) for inst in stopped_instances]))) elif args.mode == "exec": instances = instances_from_args(aws, args) exit_code = 0 for instance in instances: _code, _stdout = aws.remotecmd(instance, [args.command], user=args.user, nothrow=True) sys.stdout.write(_stdout) exit_code = _code if _code else exit_code sys.exit(exit_code) elif args.mode == "exec-ssm": ssm = DiscoSSM(environment_name) if args.parameters: parsed_parameters = parse_ssm_parameters(args.parameters) else: parsed_parameters = None instances = [instance.id for instance in instances_from_args(aws, args)] if ssm.execute(instances, args.document, parameters=parsed_parameters, comment=args.comment): sys.exit(0) else: sys.exit(1) elif args.mode == "isready": instances = instances_from_args(aws, args) if not instances: print("No instances found") ready_count = 0 for instance in instances: name = "{0} {1}".format(instance.tags.get("hostname"), instance.id) print("Checking {0}...".format(name)) try: aws.smoketest_once(instance) print("...{0} is ready".format(name)) ready_count += 1 except SmokeTestError: print("..{0} failed smoke test".format(name)) except TimeoutError: print("...{0} is NOT ready".format(name)) sys.exit(0 if ready_count == len(instances) else 1) elif args.mode == "tag": for instance in aws.instances(instance_ids=args.instances): instance.remove_tag(args.key) if args.value: instance.add_tag(args.key, args.value) elif args.mode == "spinup": hostclass_dicts = read_pipeline_file(args.pipeline_definition_file) aws.spinup(hostclass_dicts, stage=args.stage, no_smoke=args.no_smoke, testing=args.testing) elif args.mode == "spindown": hostclasses = [line["hostclass"] for line in read_pipeline_file(args.pipeline_definition_file)] aws.spindown(hostclasses) elif args.mode == "spindownandup": hostclass_dicts = read_pipeline_file(args.pipeline_definition_file) hostclasses = [d["hostclass"] for d in hostclass_dicts] aws.spindown(hostclasses) aws.spinup(hostclass_dicts) elif args.mode == "gethostclassoption": try: print(aws.hostclass_option(args.hostclass, args.option)) except NoOptionError: print("Hostclass %s doesn't have option %s." % (args.hostclass, args.option)) elif args.mode == "promoterunning": aws.promote_running_instances_to_prod(args.hours * 60 * 60)
def run(): """Parses command line and dispatches the commands""" config = read_config() args = parse_arguments() configure_logging(args.debug) environment_name = args.env or config.get("disco_aws", "default_environment") discogroup = DiscoGroup(environment_name) # Autoscaling group commands if args.mode == "listgroups": format_str = "{0} {1:21} {2:3} {3:3} {4:3} {5:3} {6:4} {7:10}" groups = discogroup.list_groups() if args.debug: print( format_str.format("Name".ljust(35 + len(environment_name)), "AMI", "min", "des", "max", "cnt", "type", "is_testing")) for group in groups: print( format_str.format( group['name'].ljust(40 + len(environment_name)), group['image_id'], group['min_size'], group['desired_capacity'], group['max_size'], group['group_cnt'], group['type'], 'y' if is_truthy(group['tags'].get('is_testing')) else 'n')) elif args.mode == "cleangroups": discogroup.delete_groups() elif args.mode == "deletegroup": discogroup.delete_groups(hostclass=args.hostclass, group_name=args.name, force=args.force) # Launch Configuration commands elif args.mode == "listconfigs": for config in discogroup.get_configs(): print("{0:24} {1}".format(config['LaunchConfigurationName'], config['ImageId'])) elif args.mode == "cleanconfigs": discogroup.clean_configs() elif args.mode == "deleteconfig": discogroup.delete_config(args.config) # Scaling policy commands elif args.mode == "listpolicies": policies = discogroup.list_policies(group_name=args.group_name, policy_types=args.policy_types, policy_names=args.policy_names) print_table(policies, headers=[ 'ASG', 'Name', 'Type', 'Adjustment Type', 'Scaling Adjustment', 'Min Adjustment', 'Cooldown', 'Step Adjustments', 'Warmup', 'Alarms' ]) elif args.mode == "createpolicy": # Parse out the step adjustments, if provided. if args.step_adjustments: allowed_keys = [ 'MetricIntervalLowerBound', 'MetricIntervalUpperBound', 'ScalingAdjustment' ] parsed_steps = [] for step in args.step_adjustments: parsed_step = {} for entry in step.split(','): key, value = entry.split('=', 1) if key not in allowed_keys: raise Exception( 'Unable to parse step {0}, key {1} not in {2}'. format(step, key, allowed_keys)) parsed_step[key] = value parsed_steps.append(parsed_step) else: parsed_steps = [] discogroup.create_policy( group_name=args.group_name, policy_name=args.policy_name, policy_type=args.policy_type, adjustment_type=args.adjustment_type, min_adjustment_magnitude=args.min_adjustment_magnitude, scaling_adjustment=args.scaling_adjustment, cooldown=args.cooldown, metric_aggregation_type=args.metric_aggregation_type, step_adjustments=parsed_steps, estimated_instance_warmup=args.estimated_instance_warmup) elif args.mode == "deletepolicy": discogroup.delete_policy(args.policy_name, args.group_name) sys.exit(0)