def test_list_kms_keys(monkeypatch): boto3 = MagicMock() boto3.list_keys.return_value = {'Keys': [{'KeyId':'key_a'},{'KeyId':'key_b'}]} boto3.list_aliases.return_value = {'Aliases': [{'AliasName':'a', 'TargetKeyId':'key_a'}]} boto3.describe_key.return_value = {'KeyMetadata':{'Description':'This is key a'}} monkeypatch.setattr('boto3.client', MagicMock(return_value=boto3)) assert len(list_kms_keys(region=None, details=True)) == 2
def test_list_kms_keys(monkeypatch): boto3 = MagicMock() boto3.list_keys.return_value = {"Keys": [{"KeyId": "key_a"}, {"KeyId": "key_b"}]} boto3.list_aliases.return_value = {"Aliases": [{"AliasName": "a", "TargetKeyId": "key_a"}]} boto3.describe_key.return_value = {"KeyMetadata": {"Description": "This is key a"}} monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3)) assert len(list_kms_keys(region=None, details=True)) == 2
def test_list_kms_keys(monkeypatch): boto3 = MagicMock() boto3.list_keys.return_value = { 'Keys': [{'KeyId': 'key_a'}, {'KeyId': 'key_b'}]} boto3.list_aliases.return_value = { 'Aliases': [{'AliasName': 'a', 'TargetKeyId': 'key_a'}]} boto3.describe_key.return_value = { 'KeyMetadata': {'Description': 'This is key a'}} monkeypatch.setattr('boto3.client', MagicMock(return_value=boto3)) assert len(list_kms_keys(region=None, details=True)) == 2
def gather_user_variables(variables, region, account_info): defaults = set_default_variables(dict()) if click.confirm('Do you want to set the docker image now? [No]'): prompt(variables, "docker_image", "Docker Image Version", default=get_latest_image()) prompt(variables, 'wal_s3_bucket', 'Postgres WAL S3 bucket to use', default='{}-{}-spilo-app'.format(get_account_alias(), region)) prompt(variables, 'instance_type', 'EC2 instance type', default='t2.medium') variables['hosted_zone'] = account_info.Domain or defaults['hosted_zone'] if (variables['hosted_zone'][-1:] != '.'): variables['hosted_zone'] += '.' prompt(variables, 'discovery_domain', 'ETCD Discovery Domain', default='postgres.' + variables['hosted_zone'][:-1]) variables['add_replica_loadbalancer'] = click.confirm('Do you want a replica ELB?', default=False) prompt(variables, 'elb_access_cidr', 'Which network should be allowed to access the ELB''s? (default=vpc)', default=get_vpc_attribute(region=region, vpc_id=account_info.VpcID, attribute='cidr_block')) odd_sg_name = 'Odd (SSH Bastion Host)' odd_sg = get_security_group(region, odd_sg_name) if odd_sg and click.confirm('Do you want to allow access to the Spilo nodes from {}?'.format(odd_sg_name), default=True): variables['odd_sg_id'] = odd_sg.group_id # Find all Security Groups attached to the zmon worker with 'zmon' in their name ec2 = boto3.client('ec2', region) filters = [{'Name': 'tag-key', 'Values': ['StackName']}, {'Name': 'tag-value', 'Values': ['zmon-appliance']}] zmon_sgs = list() for reservation in ec2.describe_instances(Filters=filters).get('Reservations', []): for instance in reservation.get('Instances', []): zmon_sgs += [sg['GroupId'] for sg in instance.get('SecurityGroups', []) if 'zmon' in sg['GroupName']] if len(zmon_sgs) == 0: warning('Could not find zmon security group, do you have the zmon-appliance deployed?') else: click.confirm('Do you want to allow access to the Spilo nodes from zmon?', default=True) if len(zmon_sgs) > 1: prompt(variables, 'zmon_sg_id', 'Which Security Group should we allow access from? {}'.format(zmon_sgs)) else: variables['zmon_sg_id'] = zmon_sgs[0] if variables['instance_type'].lower().split('.')[0] in ('c3', 'g2', 'hi1', 'i2', 'm3', 'r3'): variables['use_ebs'] = click.confirm('Do you want database data directory on external (EBS) storage? [Yes]', default=defaults['use_ebs']) else: variables['use_ebs'] = True if variables['use_ebs']: prompt(variables, 'volume_size', 'Database volume size (GB, 10 or more)', default=defaults['volume_size']) prompt(variables, 'volume_type', 'Database volume type (gp2, io1 or standard)', default=defaults['volume_type']) if variables['volume_type'] == 'io1': pio_max = variables['volume_size'] * 30 prompt(variables, "volume_iops", 'Provisioned I/O operations per second (100 - {0})'. format(pio_max), default=str(pio_max)) prompt(variables, "snapshot_id", "ID of the snapshot to populate EBS volume from", default="") if ebs_optimized_supported(variables['instance_type']): variables['ebs_optimized'] = True prompt(variables, "fstype", "Filesystem for the data partition", default=defaults['fstype']) prompt(variables, "fsoptions", "Filesystem mount options (comma-separated)", default=defaults['fsoptions']) prompt(variables, "scalyr_account_key", "Account key for your scalyr account", "") prompt(variables, 'pgpassword_superuser', "Password for PostgreSQL superuser [random]", show_default=False, default=generate_random_password, hide_input=True, confirmation_prompt=True) prompt(variables, 'pgpassword_standby', "Password for PostgreSQL user standby [random]", show_default=False, default=generate_random_password, hide_input=True, confirmation_prompt=True) prompt(variables, 'pgpassword_admin', "Password for PostgreSQL user admin", show_default=True, default=defaults['pgpassword_admin'], hide_input=True, confirmation_prompt=True) if click.confirm('Do you wish to encrypt these passwords using KMS?', default=False): kms_keys = [k for k in list_kms_keys(region) if 'alias/aws/ebs' not in k['aliases']] if len(kms_keys) == 0: raise click.UsageError('No KMS key is available for encrypting and decrypting. ' 'Ensure you have at least 1 key available.') options = ['{}: {}'.format(k['KeyId'], k['Description']) for k in kms_keys] kms_key = choice(prompt='Please select the encryption key', options=options) kms_keyid = kms_key.split(':')[0] variables['kms_arn'] = [k['Arn'] for k in kms_keys if k['KeyId'] == kms_keyid][0] for key in [k for k in variables if k.startswith('pgpassword_') or k == 'scalyr_account_key']: if variables[key]: encrypted = encrypt(region=region, KeyId=kms_keyid, Plaintext=variables[key], b64encode=True) variables[key] = 'aws:kms:{}'.format(encrypted) set_default_variables(variables) check_s3_bucket(variables['wal_s3_bucket'], region) return variables
def gather_user_variables(variables, region, account_info): defaults = set_default_variables(dict()) if click.confirm('Do you want to set the docker image now? [No]'): prompt(variables, "docker_image", "Docker Image Version", default=get_latest_image()) prompt(variables, 'wal_s3_bucket', 'Postgres WAL S3 bucket to use', default='{}-{}-spilo-app'.format(get_account_alias(), region)) prompt(variables, 'instance_type', 'EC2 instance type', default='t2.medium') variables['hosted_zone'] = account_info.Domain or defaults['hosted_zone'] if (variables['hosted_zone'][-1:] != '.'): variables['hosted_zone'] += '.' prompt(variables, 'discovery_domain', 'ETCD Discovery Domain', default='postgres.' + variables['hosted_zone'][:-1]) variables['add_replica_loadbalancer'] = click.confirm( 'Do you want a replica ELB?', default=False) prompt(variables, 'elb_access_cidr', 'Which network should be allowed to access the ELB' 's? (default=vpc)', default=get_vpc_attribute(region=region, vpc_id=account_info.VpcID, attribute='cidr_block')) odd_sg_name = 'Odd (SSH Bastion Host)' odd_sg = get_security_group(region, odd_sg_name) if odd_sg and click.confirm( 'Do you want to allow access to the Spilo nodes from {}?'.format( odd_sg_name), default=True): variables['odd_sg_id'] = odd_sg.group_id # Find all Security Groups attached to the zmon worker with 'zmon' in their name ec2 = boto3.client('ec2', region) filters = [{ 'Name': 'tag-key', 'Values': ['StackName'] }, { 'Name': 'tag-value', 'Values': ['zmon-worker'] }] zmon_sgs = list() for reservation in ec2.describe_instances(Filters=filters).get( 'Reservations', []): for instance in reservation.get('Instances', []): zmon_sgs += [ sg['GroupId'] for sg in instance.get('SecurityGroups', []) if 'zmon' in sg['GroupName'] ] if len(zmon_sgs) == 0: warning('Could not find zmon security group') else: click.confirm( 'Do you want to allow access to the Spilo nodes from zmon?', default=True) if len(zmon_sgs) > 1: prompt( variables, 'zmon_sg_id', 'Which Security Group should we allow access from? {}'.format( zmon_sgs)) else: variables['zmon_sg_id'] = zmon_sgs[0] if variables['instance_type'].lower().split('.')[0] in ('c3', 'g2', 'hi1', 'i2', 'm3', 'r3'): variables['use_ebs'] = click.confirm( 'Do you want database data directory on external (EBS) storage? [Yes]', default=defaults['use_ebs']) else: variables['use_ebs'] = True if variables['use_ebs']: prompt(variables, 'volume_size', 'Database volume size (GB, 10 or more)', default=defaults['volume_size']) prompt(variables, 'volume_type', 'Database volume type (gp2, io1 or standard)', default=defaults['volume_type']) if variables['volume_type'] == 'io1': pio_max = variables['volume_size'] * 30 prompt(variables, "volume_iops", 'Provisioned I/O operations per second (100 - {0})'.format( pio_max), default=str(pio_max)) prompt(variables, "snapshot_id", "ID of the snapshot to populate EBS volume from", default="") if ebs_optimized_supported(variables['instance_type']): variables['ebs_optimized'] = True prompt(variables, "fstype", "Filesystem for the data partition", default=defaults['fstype']) prompt(variables, "fsoptions", "Filesystem mount options (comma-separated)", default=defaults['fsoptions']) prompt(variables, "scalyr_account_key", "Account key for your scalyr account", "") prompt(variables, 'pgpassword_superuser', "Password for PostgreSQL superuser [random]", show_default=False, default=generate_random_password, hide_input=True, confirmation_prompt=True) prompt(variables, 'pgpassword_standby', "Password for PostgreSQL user standby [random]", show_default=False, default=generate_random_password, hide_input=True, confirmation_prompt=True) prompt(variables, 'pgpassword_admin', "Password for PostgreSQL user admin", show_default=True, default=defaults['pgpassword_admin'], hide_input=True, confirmation_prompt=True) if click.confirm('Do you wish to encrypt these passwords using KMS?', default=False): kms_keys = [ k for k in list_kms_keys(region) if 'alias/aws/ebs' not in k['aliases'] ] if len(kms_keys) == 0: raise click.UsageError( 'No KMS key is available for encrypting and decrypting. ' 'Ensure you have at least 1 key available.') options = [ '{}: {}'.format(k['KeyId'], k['Description']) for k in kms_keys ] kms_key = choice(prompt='Please select the encryption key', options=options) kms_keyid = kms_key.split(':')[0] variables['kms_arn'] = [ k['Arn'] for k in kms_keys if k['KeyId'] == kms_keyid ][0] for key in [ k for k in variables if k.startswith('pgpassword_') or k == 'scalyr_account_key' ]: encrypted = encrypt(region=region, KeyId=kms_keyid, Plaintext=variables[key], b64encode=True) variables[key] = 'aws:kms:{}'.format(encrypted) set_default_variables(variables) check_s3_bucket(variables['wal_s3_bucket'], region) return variables
def gather_user_variables(variables, region, account_info): defaults = set_default_variables(dict()) if click.confirm("Do you want to set the docker image now? [No]"): prompt(variables, "docker_image", "Docker Image Version", default=get_latest_spilo_image()) prompt( variables, "wal_s3_bucket", "Postgres WAL S3 bucket to use", default="{}-{}-spilo-app".format(get_account_alias(), region), ) prompt(variables, "instance_type", "EC2 instance type", default="t2.micro") variables["hosted_zone"] = account_info.Domain or defaults["hosted_zone"] if variables["hosted_zone"][-1:] != ".": variables["hosted_zone"] += "." prompt(variables, "discovery_domain", "ETCD Discovery Domain", default="postgres." + variables["hosted_zone"][:-1]) variables["add_replica_loadbalancer"] = click.confirm("Do you want a replica ELB?", default=False) prompt( variables, "elb_access_cidr", "Which network should be allowed to access the ELB" "s? (default=vpc)", default=get_vpc_attribute(region=region, vpc_id=account_info.VpcID, attribute="cidr_block"), ) odd_sg_name = "Odd (SSH Bastion Host)" odd_sg = get_security_group(region, odd_sg_name) if odd_sg and click.confirm( "Do you want to allow access to the Spilo nodes from {}?".format(odd_sg_name), default=True ): variables["odd_sg_id"] = odd_sg.group_id # Find all Security Groups attached to the zmon worker with 'zmon' in their name ec2 = boto3.client("ec2", region) filters = [{"Name": "tag-key", "Values": ["StackName"]}, {"Name": "tag-value", "Values": ["zmon-worker"]}] zmon_sgs = list() for reservation in ec2.describe_instances(Filters=filters).get("Reservations", []): for instance in reservation.get("Instances", []): zmon_sgs += [sg["GroupId"] for sg in instance.get("SecurityGroups", []) if "zmon" in sg["GroupName"]] if len(zmon_sgs) == 0: warning("Could not find zmon security group") else: click.confirm("Do you want to allow access to the Spilo nodes from zmon?", default=True) if len(zmon_sgs) > 1: prompt(variables, "zmon_sg_id", "Which Security Group should we allow access from? {}".format(zmon_sgs)) else: variables["zmon_sg_id"] = zmon_sgs[0] if variables["instance_type"].lower().split(".")[0] in ("c3", "g2", "hi1", "i2", "m3", "r3"): variables["use_ebs"] = click.confirm( "Do you want database data directory on external (EBS) storage? [Yes]", default=defaults["use_ebs"] ) else: variables["use_ebs"] = True if variables["use_ebs"]: prompt(variables, "volume_size", "Database volume size (GB, 10 or more)", default=defaults["volume_size"]) prompt(variables, "volume_type", "Database volume type (gp2, io1 or standard)", default=defaults["volume_type"]) if variables["volume_type"] == "io1": pio_max = variables["volume_size"] * 30 prompt( variables, "volume_iops", "Provisioned I/O operations per second (100 - {0})".format(pio_max), default=str(pio_max), ) prompt(variables, "snapshot_id", "ID of the snapshot to populate EBS volume from", default="") if ebs_optimized_supported(variables["instance_type"]): variables["ebs_optimized"] = True prompt(variables, "fstype", "Filesystem for the data partition", default=defaults["fstype"]) prompt(variables, "fsoptions", "Filesystem mount options (comma-separated)", default=defaults["fsoptions"]) prompt(variables, "scalyr_account_key", "Account key for your scalyr account", "") prompt( variables, "pgpassword_superuser", "Password for PostgreSQL superuser [random]", show_default=False, default=generate_random_password, hide_input=True, confirmation_prompt=True, ) prompt( variables, "pgpassword_standby", "Password for PostgreSQL user standby [random]", show_default=False, default=generate_random_password, hide_input=True, confirmation_prompt=True, ) prompt( variables, "pgpassword_admin", "Password for PostgreSQL user admin", show_default=True, default=defaults["pgpassword_admin"], hide_input=True, confirmation_prompt=True, ) if click.confirm("Do you wish to encrypt these passwords using KMS?", default=False): kms_keys = [k for k in list_kms_keys(region) if "alias/aws/ebs" not in k["aliases"]] if len(kms_keys) == 0: raise click.UsageError( "No KMS key is available for encrypting and decrypting. " "Ensure you have at least 1 key available." ) options = ["{}: {}".format(k["KeyId"], k["Description"]) for k in kms_keys] kms_key = choice(prompt="Please select the encryption key", options=options) kms_keyid = kms_key.split(":")[0] variables["kms_arn"] = [k["Arn"] for k in kms_keys if k["KeyId"] == kms_keyid][0] for key in [k for k in variables if k.startswith("pgpassword_")]: encrypted = encrypt(region=region, KeyId=kms_keyid, Plaintext=variables[key], b64encode=True) variables[key] = "aws:kms:{}".format(encrypted) set_default_variables(variables) check_s3_bucket(variables["wal_s3_bucket"], region) return variables
def gather_user_variables(variables, region, account_info): defaults = set_default_variables(dict()) if click.confirm('Do you want to set the docker image now? [No]'): prompt(variables, "docker_image", "Docker Image Version", default=get_latest_spilo_image()) prompt(variables, 'wal_s3_bucket', 'Postgres WAL S3 bucket to use', default='{}-{}-spilo-app'.format(get_account_alias(), region)) prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro') variables['hosted_zone'] = account_info.Domain or defaults['hosted_zone'] if (variables['hosted_zone'][-1:] != '.'): variables['hosted_zone'] += '.' prompt(variables, 'discovery_domain', 'ETCD Discovery Domain', default='postgres.' + variables['hosted_zone'][:-1]) if variables['instance_type'].lower().split('.')[0] in ('c3', 'g2', 'hi1', 'i2', 'm3', 'r3'): variables['use_ebs'] = click.confirm('Do you want database data directory on external (EBS) storage? [Yes]', default=defaults['use_ebs']) else: variables['use_ebs'] = True if variables['use_ebs']: prompt(variables, 'volume_size', 'Database volume size (GB, 10 or more)', default=defaults['volume_size']) prompt(variables, 'volume_type', 'Database volume type (gp2, io1 or standard)', default=defaults['volume_type']) if variables['volume_type'] == 'io1': pio_max = variables['volume_size'] * 30 prompt(variables, "volume_iops", 'Provisioned I/O operations per second (100 - {0})'. format(pio_max), default=str(pio_max)) prompt(variables, "snapshot_id", "ID of the snapshot to populate EBS volume from", default="") if ebs_optimized_supported(variables['instance_type']): variables['ebs_optimized'] = True prompt(variables, "fstype", "Filesystem for the data partition", default=defaults['fstype']) prompt(variables, "fsoptions", "Filesystem mount options (comma-separated)", default=defaults['fsoptions']) prompt(variables, "scalyr_account_key", "Account key for your scalyr account", "") prompt(variables, 'pgpassword_superuser', "Password for PostgreSQL superuser [random]", show_default=False, default=generate_random_password, hide_input=True, confirmation_prompt=True) prompt(variables, 'pgpassword_standby', "Password for PostgreSQL user standby [random]", show_default=False, default=generate_random_password, hide_input=True, confirmation_prompt=True) prompt(variables, 'pgpassword_admin', "Password for PostgreSQL user admin", show_default=True, default=defaults['pgpassword_admin'], hide_input=True, confirmation_prompt=True) if click.confirm('Do you wish to encrypt these passwords using KMS?', default=False): kms_keys = [k for k in list_kms_keys(region) if 'alias/aws/ebs' not in k['aliases']] if len(kms_keys) == 0: raise click.UsageError('No KMS key is available for encrypting and decrypting. ' 'Ensure you have at least 1 key available.') options = ['{}: {}'.format(k['KeyId'], k['Description']) for k in kms_keys] kms_key = choice(prompt='Please select the encryption key', options=options) kms_keyid = kms_key.split(':')[0] variables['kms_arn'] = [k['Arn'] for k in kms_keys if k['KeyId'] == kms_keyid][0] for key in [k for k in variables if k.startswith('pgpassword_')]: encrypted = encrypt(region=region, KeyId=kms_keyid, Plaintext=variables[key], b64encode=True) variables[key] = 'aws:kms:{}'.format(encrypted) set_default_variables(variables) sg_name = 'app-spilo' rules_missing = check_security_group(sg_name, [('tcp', 22), ('tcp', POSTGRES_PORT), ('tcp', HEALTHCHECK_PORT)], region, allow_from_self=True) if ('tcp', 22) in rules_missing: warning('Security group {} does not allow SSH access, you will not be able to ssh into your servers'. format(sg_name)) if ('tcp', POSTGRES_PORT) in rules_missing: error('Security group {} does not allow inbound TCP traffic on the default postgres port ({})'.format( sg_name, POSTGRES_PORT )) if ('tcp', HEALTHCHECK_PORT) in rules_missing: error('Security group {} does not allow inbound TCP traffic on the default health check port ({})'. format(sg_name, HEALTHCHECK_PORT)) variables['spilo_sg_id'] = get_security_group(region, sg_name).id check_s3_bucket(variables['wal_s3_bucket'], region) return variables
def gather_user_variables(variables, account_info, region): set_default_variables(variables) missing = [] for required in ('team_name', 'team_region', 'team_gateway_zone', 'hosted_zone'): if not variables.get(required): missing.append(required) if len(missing) > 0: fatal_error("Missing values for the following variables: {0}".format(', '.join(missing))) # redefine the region per the user input if variables['team_region'] != region.Region: fatal_error("Current region {0} do not match the requested region {1}\n" "Change the currect region with --region option or set AWS_DEFAULT_REGION variable.". format(region.Region, variables['team_region'])) variables['wal_s3_bucket'] = '{}-{}-spilo-dbaas'.format(get_account_alias(), region.Region) for name in ('team_gateway_zone', 'hosted_zone'): if variables[name][-1] != '.': variables[name] += '.' # split the ldap url into the URL and suffix (path component) if variables['ldap_url']: url = urlparse(variables['ldap_url']) if url.path and url.path[0] == '/': variables['ldap_suffix'] = url.path[1:] # if master DNS name is specified but not the replica one - derive the replica name from the master if variables['master_dns_name'] and not variables['replica_dns_name']: replica_dns_components = variables['master_dns_name'].split('.') replica_dns_components[0] += '-repl' variables['replica_dns_name'] = '.'.join(replica_dns_components) # make sure all DNS names belong to the hosted zone for v in ('master_dns_name', 'replica_dns_name'): if variables[v] and not check_dns_name(variables[v], variables['hosted_zone'][:-1]): fatal_error("{0} should end with {1}". format(v.replace('_', ' '), variables['hosted_zone'][:-1])) if variables['ldap_url'] and not variables['ldap_suffix']: fatal_error("LDAP URL is missing the suffix: shoud be in a format: " "ldap[s]://example.com[:port]/ou=people,dc=example,dc=com") # pick up the proper etcd address depending on the region variables['discovery_domain'] = detect_etcd_discovery_domain_for_region(variables['hosted_zone'], region.Region) # get the IP addresses of the NAT gateways to acess a given ELB. variables['nat_gateway_addresses'] = detect_eu_team_nat_gateways(variables['team_gateway_zone']) variables['odd_instance_addresses'] = detect_eu_team_odd_instances(variables['team_gateway_zone']) variables['spilo_security_group_ingress_rules_block'] = \ generate_spilo_master_security_group_ingress(variables['nat_gateway_addresses'] + variables['odd_instance_addresses']) if variables['postgresqlconf']: variables['postgresqlconf'] = generate_postgresql_configuration(variables['postgresqlconf']) odd_sg = get_security_group(region.Region, ODD_SG_NAME) variables['odd_sg_id'] = odd_sg.group_id # Find all Security Groups attached to the zmon worker with 'zmon' in their name variables['zmon_sg_id'] = detect_zmon_security_group(region.Region) if variables['volume_type'] == 'io1' and not variables['volume_iops']: pio_max = variables['volume_size'] * 30 variables['volume_iops'] = str(pio_max) variables['ebs_optimized'] = ebs_optimized_supported(variables['instance_type']) # pick up the first key with a description containing spilo kms_keys = [k for k in list_kms_keys(region.Region) if 'alias/aws/ebs' not in k['aliases'] and 'spilo' in ((k['Description']).lower())] if len(kms_keys) == 0: raise fatal_error('No KMS key is available for encrypting and decrypting. ' 'Ensure you have at least 1 key available.') kms_key = kms_keys[0] kms_keyid = kms_key['KeyId'] variables['kms_arn'] = kms_key['Arn'] for key in [k for k in variables if k.startswith('pgpassword_')] +\ (['scalyr_account_key'] if variables.get('scalyr_account_key') else []): encrypted = encrypt(region=region.Region, KeyId=kms_keyid, Plaintext=variables[key], b64encode=True) variables[key] = 'aws:kms:{}'.format(encrypted) check_s3_bucket(variables['wal_s3_bucket'], region.Region) return variables