def check_security_group(sg_name, rules, region, allow_from_self=False): rules_missing = set() for rule in rules: rules_missing.add(rule) with Action('Checking security group {}..'.format(sg_name)): sg = get_security_group(region, sg_name) if sg: for rule in sg.rules: # NOTE: boto object has port as string! for proto, port in rules: if rule.ip_protocol == proto and rule.from_port == str(port): rules_missing.remove((proto, port)) if sg: return rules_missing else: create_sg = click.confirm('Security group {} does not exist. Do you want Senza to create it now?'.format( sg_name), default=True) if create_sg: vpc_conn = boto.vpc.connect_to_region(region) vpcs = vpc_conn.get_all_vpcs() ec2_conn = boto.ec2.connect_to_region(region) sg = ec2_conn.create_security_group(sg_name, 'Application security group', vpc_id=vpcs[0].id) sg.add_tags({'Name': sg_name}) for proto, port in rules: sg.authorize(ip_protocol=proto, from_port=port, to_port=port, cidr_ip='0.0.0.0/0') if allow_from_self: sg.authorize(ip_protocol='tcp', from_port=0, to_port=65535, src_group=sg) return set()
def gather_user_variables(variables, region): prompt(variables, 'wal_s3_bucket', 'Postgres WAL S3 bucket to use', default='zalando-spilo-app') prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro') prompt(variables, 'hosted_zone', 'Hosted Zone', default=get_default_zone(region) or 'example.com') if (variables['hosted_zone'][-1:] != '.'): variables['hosted_zone'] += '.' prompt(variables, 'discovery_url', 'ETCD Discovery URL', default='postgres.'+variables['hosted_zone'][:-1]) variables['postgres_port'] = POSTGRES_PORT variables['healthcheck_port'] = HEALTHCHECK_PORT sg_name = 'app-spilo' variables['spilo_sg_id'] = get_security_group(region, sg_name).id rules_missing = check_security_group(sg_name, [('tcp', 22), ('tcp', POSTGRES_PORT), ('tcp', HEALTHCHECK_PORT)], region, allow_from_self=True) if ('tcp', 22) in rules_missing: warning('Security group {} does not allow SSH access, you will not be able to ssh into your servers'.format( sg_name)) if ('tcp', POSTGRES_PORT) in rules_missing: error('Security group {} does not allow inbound TCP traffic on the default postgres port ({})'.format( sg_name, POSTGRES_PORT )) if ('tcp', HEALTHCHECK_PORT) in rules_missing: error('Security group {} does not allow inbound TCP traffic on the default health check port ({})'.format( sg_name, HEALTHCHECK_PORT )) check_s3_bucket(variables['wal_s3_bucket'], region) return variables
def check_security_group(sg_name, rules, region, allow_from_self=False): rules_missing = set() for rule in rules: rules_missing.add(rule) with Action("Checking security group {}..".format(sg_name)): sg = get_security_group(region, sg_name) if sg: for rule in sg.ip_permissions: # NOTE: boto object has port as string! for proto, port in rules: if rule["IpProtocol"] == proto and rule["FromPort"] == int( port): rules_missing.remove((proto, port)) if sg: return rules_missing else: create_sg = click.confirm( "Security group {} does not exist. Do you want Senza to create it now?" .format(sg_name), default=True, ) if create_sg: ec2c = BotoClientProxy("ec2", region) # FIXME which vpc? vpc = ec2c.describe_vpcs()["Vpcs"][0] sg = ec2c.create_security_group( GroupName=sg_name, Description="Application security group", VpcId=vpc["VpcId"], ) ec2c.create_tags(Resources=[sg["GroupId"]], Tags=[{ "Key": "Name", "Value": sg_name }]) ip_permissions = [] for proto, port in rules: ip_permissions.append({ "IpProtocol": proto, "FromPort": port, "ToPort": port, "IpRanges": [{ "CidrIp": "0.0.0.0/0" }], }) if allow_from_self: ip_permissions.append({ "IpProtocol": "-1", "UserIdGroupPairs": [{ "GroupId": sg["GroupId"] }], }) ec2c.authorize_security_group_ingress(GroupId=sg["GroupId"], IpPermissions=ip_permissions) return set()
def check_security_group(sg_name, rules, region, allow_from_self=False): rules_missing = set() for rule in rules: rules_missing.add(rule) with Action('Checking security group {}..'.format(sg_name)): sg = get_security_group(region, sg_name) if sg: for rule in sg.ip_permissions: # NOTE: boto object has port as string! for proto, port in rules: if rule['IpProtocol'] == proto and rule['FromPort'] == int( port): rules_missing.remove((proto, port)) if sg: return rules_missing else: create_sg = click.confirm( 'Security group {} does not exist. Do you want Senza to create it now?' .format(sg_name), default=True) if create_sg: ec2c = BotoClientProxy('ec2', region) # FIXME which vpc? vpc = ec2c.describe_vpcs()['Vpcs'][0] sg = ec2c.create_security_group( GroupName=sg_name, Description='Application security group', VpcId=vpc['VpcId']) ec2c.create_tags(Resources=[sg['GroupId']], Tags=[{ 'Key': 'Name', 'Value': sg_name }]) ip_permissions = [] for proto, port in rules: ip_permissions.append({ 'IpProtocol': proto, 'FromPort': port, 'ToPort': port, 'IpRanges': [{ 'CidrIp': '0.0.0.0/0' }] }) if allow_from_self: ip_permissions.append({ 'IpProtocol': '-1', 'UserIdGroupPairs': [{ 'GroupId': sg['GroupId'] }] }) ec2c.authorize_security_group_ingress(GroupId=sg['GroupId'], IpPermissions=ip_permissions) return set()
def gather_user_variables(variables, region): if click.confirm('Do you want to set the docker image now? [No]'): prompt(variables, "docker_image", "Docker Image Version", default=get_latest_spilo_image()) prompt(variables, 'wal_s3_bucket', 'Postgres WAL S3 bucket to use', default='zalando-spilo-app') prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro') prompt(variables, 'hosted_zone', 'Hosted Zone', default=get_default_zone(region) or 'example.com') if (variables['hosted_zone'][-1:] != '.'): variables['hosted_zone'] += '.' prompt(variables, 'discovery_domain', 'ETCD Discovery Domain', default='postgres.'+variables['hosted_zone'][:-1]) if variables['instance_type'].lower().split('.')[0] in ('c3', 'g2', 'hi1', 'i2', 'm3', 'r3'): variables['use_ebs'] = click.confirm('Do you want database data directory on external (EBS) storage? [Yes]', default=True) else: variables['use_ebs'] = True if variables['use_ebs']: prompt(variables, 'volume_size', 'Database volume size (GB, 10 or more)', default=10) prompt(variables, 'volume_type', 'Database volume type (gp2, io1 or standard)', default='gp2') if variables['volume_type'] == 'io1': pio_max = variables['volume_size'] * 30 prompt(variables, "volume_iops", 'Provisioned I/O operations per second (100 - {0})'. format(pio_max), default=str(pio_max)) prompt(variables, "snapshot_id", "ID of the snapshot to populate EBS volume from", default="") if ebs_optimized_supported(variables['instance_type']): variables['ebs_optimized'] = True prompt(variables, "fstype", "Filesystem for the data partition", default="ext4") prompt(variables, "fsoptions", "Filesystem mount options (comma-separated)", default="noatime,nodiratime,nobarrier") prompt(variables, "scalyr_account_key", "Account key for your scalyr account", "") variables['postgres_port'] = POSTGRES_PORT variables['healthcheck_port'] = HEALTHCHECK_PORT sg_name = 'app-spilo' rules_missing = check_security_group(sg_name, [('tcp', 22), ('tcp', POSTGRES_PORT), ('tcp', HEALTHCHECK_PORT)], region, allow_from_self=True) if ('tcp', 22) in rules_missing: warning('Security group {} does not allow SSH access, you will not be able to ssh into your servers'. format(sg_name)) if ('tcp', POSTGRES_PORT) in rules_missing: error('Security group {} does not allow inbound TCP traffic on the default postgres port ({})'.format( sg_name, POSTGRES_PORT )) if ('tcp', HEALTHCHECK_PORT) in rules_missing: error('Security group {} does not allow inbound TCP traffic on the default health check port ({})'. format(sg_name, HEALTHCHECK_PORT)) variables['spilo_sg_id'] = get_security_group(region, sg_name).id check_s3_bucket(variables['wal_s3_bucket'], region) return variables
def test_get_security_group_by_tag_name(monkeypatch): def mock_filter(Filters): if Filters[0]['Name'] == 'tag:Name' and Filters[0]['Values'] == ['my-sg']: sg = MagicMock() sg.id = 'sg-123' return [sg] ec2 = MagicMock() ec2.security_groups.filter = mock_filter monkeypatch.setattr('boto3.resource', MagicMock(return_value=ec2)) assert get_security_group('myregion', 'my-sg').id == 'sg-123'
def check_security_group(sg_name, rules, region, allow_from_self=False): rules_missing = set() for rule in rules: rules_missing.add(rule) with Action('Checking security group {}..'.format(sg_name)): sg = get_security_group(region, sg_name) if sg: for rule in sg.rules: # NOTE: boto object has port as string! for proto, port in rules: if rule.ip_protocol == proto and rule.from_port == str( port): rules_missing.remove((proto, port)) if sg: return rules_missing else: create_sg = click.confirm( 'Security group {} does not exist. Do you want Senza to create it now?' .format(sg_name), default=True) if create_sg: vpc_conn = boto.vpc.connect_to_region(region) vpcs = vpc_conn.get_all_vpcs() ec2_conn = boto.ec2.connect_to_region(region) sg = ec2_conn.create_security_group(sg_name, 'Application security group', vpc_id=vpcs[0].id) sg.add_tags({'Name': sg_name}) for proto, port in rules: sg.authorize(ip_protocol=proto, from_port=port, to_port=port, cidr_ip='0.0.0.0/0') if allow_from_self: sg.authorize(ip_protocol='tcp', from_port=0, to_port=65535, src_group=sg) return set()
def check_security_group(sg_name, rules, region, allow_from_self=False): rules_missing = set() for rule in rules: rules_missing.add(rule) with Action('Checking security group {}..'.format(sg_name)): sg = get_security_group(region, sg_name) if sg: for rule in sg.ip_permissions: # NOTE: boto object has port as string! for proto, port in rules: if rule['IpProtocol'] == proto and rule['FromPort'] == int(port): rules_missing.remove((proto, port)) if sg: return rules_missing else: create_sg = click.confirm('Security group {} does not exist. Do you want Senza to create it now?'.format( sg_name), default=True) if create_sg: ec2c = boto3.client('ec2', region) # FIXME which vpc? vpc = ec2c.describe_vpcs()['Vpcs'][0] sg = ec2c.create_security_group(GroupName=sg_name, Description='Application security group', VpcId=vpc['VpcId']) ec2c.create_tags(Resources=[sg['GroupId']], Tags=[{'Key': 'Name', 'Value': sg_name}]) ip_permissions = [] for proto, port in rules: ip_permissions.append({'IpProtocol': proto, 'FromPort': port, 'ToPort': port, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}) if allow_from_self: ip_permissions.append({'IpProtocol': '-1', 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]}) ec2c.authorize_security_group_ingress(GroupId=sg['GroupId'], IpPermissions=ip_permissions) return set()
def test_get_security_group(monkeypatch): ec2 = MagicMock() monkeypatch.setattr('boto3.resource', MagicMock(return_value=ec2)) results = None assert results == get_security_group('myregion', 'group_inexistant')
def gather_user_variables(variables, region, account_info): if click.confirm('Do you want to set the docker image now? [No]'): prompt(variables, "docker_image", "Docker Image Version", default=get_latest_spilo_image()) else: variables['docker_image'] = None prompt(variables, 'wal_s3_bucket', 'Postgres WAL S3 bucket to use', default='{}-{}-spilo-app'.format(get_account_alias(), region)) prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro') variables['hosted_zone'] = account_info.Domain or 'example.com' if (variables['hosted_zone'][-1:] != '.'): variables['hosted_zone'] += '.' prompt(variables, 'discovery_domain', 'ETCD Discovery Domain', default='postgres.' + variables['hosted_zone'][:-1]) if variables['instance_type'].lower().split('.')[0] in ('c3', 'g2', 'hi1', 'i2', 'm3', 'r3'): variables['use_ebs'] = click.confirm( 'Do you want database data directory on external (EBS) storage? [Yes]', default=True) else: variables['use_ebs'] = True variables['ebs_optimized'] = None variables['volume_iops'] = None variables['snapshot_id'] = None if variables['use_ebs']: prompt(variables, 'volume_size', 'Database volume size (GB, 10 or more)', default=10) prompt(variables, 'volume_type', 'Database volume type (gp2, io1 or standard)', default='gp2') if variables['volume_type'] == 'io1': pio_max = variables['volume_size'] * 30 prompt(variables, "volume_iops", 'Provisioned I/O operations per second (100 - {0})'.format( pio_max), default=str(pio_max)) prompt(variables, "snapshot_id", "ID of the snapshot to populate EBS volume from", default="") if ebs_optimized_supported(variables['instance_type']): variables['ebs_optimized'] = True prompt(variables, "fstype", "Filesystem for the data partition", default="ext4") prompt(variables, "fsoptions", "Filesystem mount options (comma-separated)", default="noatime,nodiratime,nobarrier") prompt(variables, "scalyr_account_key", "Account key for your scalyr account", "") variables['postgres_port'] = POSTGRES_PORT variables['healthcheck_port'] = HEALTHCHECK_PORT sg_name = 'app-spilo' rules_missing = check_security_group(sg_name, [('tcp', 22), ('tcp', POSTGRES_PORT), ('tcp', HEALTHCHECK_PORT)], region, allow_from_self=True) if ('tcp', 22) in rules_missing: warning( 'Security group {} does not allow SSH access, you will not be able to ssh into your servers' .format(sg_name)) if ('tcp', POSTGRES_PORT) in rules_missing: error( 'Security group {} does not allow inbound TCP traffic on the default postgres port ({})' .format(sg_name, POSTGRES_PORT)) if ('tcp', HEALTHCHECK_PORT) in rules_missing: error( 'Security group {} does not allow inbound TCP traffic on the default health check port ({})' .format(sg_name, HEALTHCHECK_PORT)) variables['spilo_sg_id'] = get_security_group(region, sg_name).id check_s3_bucket(variables['wal_s3_bucket'], region) return variables
def gather_user_variables(variables, region, account_info): defaults = set_default_variables(dict()) if click.confirm('Do you want to set the docker image now? [No]'): prompt(variables, "docker_image", "Docker Image Version", default=get_latest_image()) prompt(variables, 'wal_s3_bucket', 'Postgres WAL S3 bucket to use', default='{}-{}-spilo-app'.format(get_account_alias(), region)) prompt(variables, 'instance_type', 'EC2 instance type', default='t2.medium') variables['hosted_zone'] = account_info.Domain or defaults['hosted_zone'] if (variables['hosted_zone'][-1:] != '.'): variables['hosted_zone'] += '.' prompt(variables, 'discovery_domain', 'ETCD Discovery Domain', default='postgres.' + variables['hosted_zone'][:-1]) variables['add_replica_loadbalancer'] = click.confirm('Do you want a replica ELB?', default=False) prompt(variables, 'elb_access_cidr', 'Which network should be allowed to access the ELB''s? (default=vpc)', default=get_vpc_attribute(region=region, vpc_id=account_info.VpcID, attribute='cidr_block')) odd_sg_name = 'Odd (SSH Bastion Host)' odd_sg = get_security_group(region, odd_sg_name) if odd_sg and click.confirm('Do you want to allow access to the Spilo nodes from {}?'.format(odd_sg_name), default=True): variables['odd_sg_id'] = odd_sg.group_id # Find all Security Groups attached to the zmon worker with 'zmon' in their name ec2 = boto3.client('ec2', region) filters = [{'Name': 'tag-key', 'Values': ['StackName']}, {'Name': 'tag-value', 'Values': ['zmon-appliance']}] zmon_sgs = list() for reservation in ec2.describe_instances(Filters=filters).get('Reservations', []): for instance in reservation.get('Instances', []): zmon_sgs += [sg['GroupId'] for sg in instance.get('SecurityGroups', []) if 'zmon' in sg['GroupName']] if len(zmon_sgs) == 0: warning('Could not find zmon security group, do you have the zmon-appliance deployed?') else: click.confirm('Do you want to allow access to the Spilo nodes from zmon?', default=True) if len(zmon_sgs) > 1: prompt(variables, 'zmon_sg_id', 'Which Security Group should we allow access from? {}'.format(zmon_sgs)) else: variables['zmon_sg_id'] = zmon_sgs[0] if variables['instance_type'].lower().split('.')[0] in ('c3', 'g2', 'hi1', 'i2', 'm3', 'r3'): variables['use_ebs'] = click.confirm('Do you want database data directory on external (EBS) storage? [Yes]', default=defaults['use_ebs']) else: variables['use_ebs'] = True if variables['use_ebs']: prompt(variables, 'volume_size', 'Database volume size (GB, 10 or more)', default=defaults['volume_size']) prompt(variables, 'volume_type', 'Database volume type (gp2, io1 or standard)', default=defaults['volume_type']) if variables['volume_type'] == 'io1': pio_max = variables['volume_size'] * 30 prompt(variables, "volume_iops", 'Provisioned I/O operations per second (100 - {0})'. format(pio_max), default=str(pio_max)) prompt(variables, "snapshot_id", "ID of the snapshot to populate EBS volume from", default="") if ebs_optimized_supported(variables['instance_type']): variables['ebs_optimized'] = True prompt(variables, "fstype", "Filesystem for the data partition", default=defaults['fstype']) prompt(variables, "fsoptions", "Filesystem mount options (comma-separated)", default=defaults['fsoptions']) prompt(variables, "scalyr_account_key", "Account key for your scalyr account", "") prompt(variables, 'pgpassword_superuser', "Password for PostgreSQL superuser [random]", show_default=False, default=generate_random_password, hide_input=True, confirmation_prompt=True) prompt(variables, 'pgpassword_standby', "Password for PostgreSQL user standby [random]", show_default=False, default=generate_random_password, hide_input=True, confirmation_prompt=True) prompt(variables, 'pgpassword_admin', "Password for PostgreSQL user admin", show_default=True, default=defaults['pgpassword_admin'], hide_input=True, confirmation_prompt=True) if click.confirm('Do you wish to encrypt these passwords using KMS?', default=False): kms_keys = [k for k in list_kms_keys(region) if 'alias/aws/ebs' not in k['aliases']] if len(kms_keys) == 0: raise click.UsageError('No KMS key is available for encrypting and decrypting. ' 'Ensure you have at least 1 key available.') options = ['{}: {}'.format(k['KeyId'], k['Description']) for k in kms_keys] kms_key = choice(prompt='Please select the encryption key', options=options) kms_keyid = kms_key.split(':')[0] variables['kms_arn'] = [k['Arn'] for k in kms_keys if k['KeyId'] == kms_keyid][0] for key in [k for k in variables if k.startswith('pgpassword_') or k == 'scalyr_account_key']: if variables[key]: encrypted = encrypt(region=region, KeyId=kms_keyid, Plaintext=variables[key], b64encode=True) variables[key] = 'aws:kms:{}'.format(encrypted) set_default_variables(variables) check_s3_bucket(variables['wal_s3_bucket'], region) return variables
def gather_user_variables(variables, region, account_info): defaults = set_default_variables(dict()) if click.confirm('Do you want to set the docker image now? [No]'): prompt(variables, "docker_image", "Docker Image Version", default=get_latest_image()) prompt(variables, 'wal_s3_bucket', 'Postgres WAL S3 bucket to use', default='{}-{}-spilo-app'.format(get_account_alias(), region)) prompt(variables, 'instance_type', 'EC2 instance type', default='t2.medium') variables['hosted_zone'] = account_info.Domain or defaults['hosted_zone'] if (variables['hosted_zone'][-1:] != '.'): variables['hosted_zone'] += '.' prompt(variables, 'discovery_domain', 'ETCD Discovery Domain', default='postgres.' + variables['hosted_zone'][:-1]) variables['add_replica_loadbalancer'] = click.confirm( 'Do you want a replica ELB?', default=False) prompt(variables, 'elb_access_cidr', 'Which network should be allowed to access the ELB' 's? (default=vpc)', default=get_vpc_attribute(region=region, vpc_id=account_info.VpcID, attribute='cidr_block')) odd_sg_name = 'Odd (SSH Bastion Host)' odd_sg = get_security_group(region, odd_sg_name) if odd_sg and click.confirm( 'Do you want to allow access to the Spilo nodes from {}?'.format( odd_sg_name), default=True): variables['odd_sg_id'] = odd_sg.group_id # Find all Security Groups attached to the zmon worker with 'zmon' in their name ec2 = boto3.client('ec2', region) filters = [{ 'Name': 'tag-key', 'Values': ['StackName'] }, { 'Name': 'tag-value', 'Values': ['zmon-worker'] }] zmon_sgs = list() for reservation in ec2.describe_instances(Filters=filters).get( 'Reservations', []): for instance in reservation.get('Instances', []): zmon_sgs += [ sg['GroupId'] for sg in instance.get('SecurityGroups', []) if 'zmon' in sg['GroupName'] ] if len(zmon_sgs) == 0: warning('Could not find zmon security group') else: click.confirm( 'Do you want to allow access to the Spilo nodes from zmon?', default=True) if len(zmon_sgs) > 1: prompt( variables, 'zmon_sg_id', 'Which Security Group should we allow access from? {}'.format( zmon_sgs)) else: variables['zmon_sg_id'] = zmon_sgs[0] if variables['instance_type'].lower().split('.')[0] in ('c3', 'g2', 'hi1', 'i2', 'm3', 'r3'): variables['use_ebs'] = click.confirm( 'Do you want database data directory on external (EBS) storage? [Yes]', default=defaults['use_ebs']) else: variables['use_ebs'] = True if variables['use_ebs']: prompt(variables, 'volume_size', 'Database volume size (GB, 10 or more)', default=defaults['volume_size']) prompt(variables, 'volume_type', 'Database volume type (gp2, io1 or standard)', default=defaults['volume_type']) if variables['volume_type'] == 'io1': pio_max = variables['volume_size'] * 30 prompt(variables, "volume_iops", 'Provisioned I/O operations per second (100 - {0})'.format( pio_max), default=str(pio_max)) prompt(variables, "snapshot_id", "ID of the snapshot to populate EBS volume from", default="") if ebs_optimized_supported(variables['instance_type']): variables['ebs_optimized'] = True prompt(variables, "fstype", "Filesystem for the data partition", default=defaults['fstype']) prompt(variables, "fsoptions", "Filesystem mount options (comma-separated)", default=defaults['fsoptions']) prompt(variables, "scalyr_account_key", "Account key for your scalyr account", "") prompt(variables, 'pgpassword_superuser', "Password for PostgreSQL superuser [random]", show_default=False, default=generate_random_password, hide_input=True, confirmation_prompt=True) prompt(variables, 'pgpassword_standby', "Password for PostgreSQL user standby [random]", show_default=False, default=generate_random_password, hide_input=True, confirmation_prompt=True) prompt(variables, 'pgpassword_admin', "Password for PostgreSQL user admin", show_default=True, default=defaults['pgpassword_admin'], hide_input=True, confirmation_prompt=True) if click.confirm('Do you wish to encrypt these passwords using KMS?', default=False): kms_keys = [ k for k in list_kms_keys(region) if 'alias/aws/ebs' not in k['aliases'] ] if len(kms_keys) == 0: raise click.UsageError( 'No KMS key is available for encrypting and decrypting. ' 'Ensure you have at least 1 key available.') options = [ '{}: {}'.format(k['KeyId'], k['Description']) for k in kms_keys ] kms_key = choice(prompt='Please select the encryption key', options=options) kms_keyid = kms_key.split(':')[0] variables['kms_arn'] = [ k['Arn'] for k in kms_keys if k['KeyId'] == kms_keyid ][0] for key in [ k for k in variables if k.startswith('pgpassword_') or k == 'scalyr_account_key' ]: encrypted = encrypt(region=region, KeyId=kms_keyid, Plaintext=variables[key], b64encode=True) variables[key] = 'aws:kms:{}'.format(encrypted) set_default_variables(variables) check_s3_bucket(variables['wal_s3_bucket'], region) return variables
def gather_user_variables(variables, region): if click.confirm("Do you want to set the docker image now? [No]"): prompt(variables, "docker_image", "Docker Image Version", default=get_latest_spilo_image()) prompt(variables, "wal_s3_bucket", "Postgres WAL S3 bucket to use", default="zalando-spilo-app") prompt(variables, "instance_type", "EC2 instance type", default="t2.micro") prompt(variables, "hosted_zone", "Hosted Zone", default=get_default_zone(region) or "example.com") if variables["hosted_zone"][-1:] != ".": variables["hosted_zone"] += "." prompt(variables, "discovery_domain", "ETCD Discovery Domain", default="postgres." + variables["hosted_zone"][:-1]) if variables["instance_type"].lower().split(".")[0] in ("c3", "g2", "hi1", "i2", "m3", "r3"): variables["use_ebs"] = click.confirm( "Do you want database data directory on external (EBS) storage? [Yes]", default=True ) else: variables["use_ebs"] = True if variables["use_ebs"]: prompt(variables, "volume_size", "Database volume size (GB, 10 or more)", default=10) prompt(variables, "volume_type", "Database volume type (gp2, io1 or standard)", default="gp2") if variables["volume_type"] == "io1": pio_max = variables["volume_size"] * 30 prompt( variables, "volume_iops", "Provisioned I/O operations per second (100 - {0})".format(pio_max), default=str(pio_max), ) prompt(variables, "snapshot_id", "ID of the snapshot to populate EBS volume from", default="") if ebs_optimized_supported(variables["instance_type"]): variables["ebs_optimized"] = True prompt(variables, "fstype", "Filesystem for the data partition", default="ext4") prompt(variables, "fsoptions", "Filesystem mount options (comma-separated)", default="noatime,nodiratime,nobarrier") prompt(variables, "scalyr_account_key", "Account key for your scalyr account", "") variables["postgres_port"] = POSTGRES_PORT variables["healthcheck_port"] = HEALTHCHECK_PORT sg_name = "app-spilo" rules_missing = check_security_group( sg_name, [("tcp", 22), ("tcp", POSTGRES_PORT), ("tcp", HEALTHCHECK_PORT)], region, allow_from_self=True ) if ("tcp", 22) in rules_missing: warning( "Security group {} does not allow SSH access, you will not be able to ssh into your servers".format(sg_name) ) if ("tcp", POSTGRES_PORT) in rules_missing: error( "Security group {} does not allow inbound TCP traffic on the default postgres port ({})".format( sg_name, POSTGRES_PORT ) ) if ("tcp", HEALTHCHECK_PORT) in rules_missing: error( "Security group {} does not allow inbound TCP traffic on the default health check port ({})".format( sg_name, HEALTHCHECK_PORT ) ) variables["spilo_sg_id"] = get_security_group(region, sg_name).id check_s3_bucket(variables["wal_s3_bucket"], region) return variables
def gather_user_variables(variables, region, account_info): defaults = set_default_variables(dict()) if click.confirm("Do you want to set the docker image now? [No]"): prompt(variables, "docker_image", "Docker Image Version", default=get_latest_spilo_image()) prompt( variables, "wal_s3_bucket", "Postgres WAL S3 bucket to use", default="{}-{}-spilo-app".format(get_account_alias(), region), ) prompt(variables, "instance_type", "EC2 instance type", default="t2.micro") variables["hosted_zone"] = account_info.Domain or defaults["hosted_zone"] if variables["hosted_zone"][-1:] != ".": variables["hosted_zone"] += "." prompt(variables, "discovery_domain", "ETCD Discovery Domain", default="postgres." + variables["hosted_zone"][:-1]) variables["add_replica_loadbalancer"] = click.confirm("Do you want a replica ELB?", default=False) prompt( variables, "elb_access_cidr", "Which network should be allowed to access the ELB" "s? (default=vpc)", default=get_vpc_attribute(region=region, vpc_id=account_info.VpcID, attribute="cidr_block"), ) odd_sg_name = "Odd (SSH Bastion Host)" odd_sg = get_security_group(region, odd_sg_name) if odd_sg and click.confirm( "Do you want to allow access to the Spilo nodes from {}?".format(odd_sg_name), default=True ): variables["odd_sg_id"] = odd_sg.group_id # Find all Security Groups attached to the zmon worker with 'zmon' in their name ec2 = boto3.client("ec2", region) filters = [{"Name": "tag-key", "Values": ["StackName"]}, {"Name": "tag-value", "Values": ["zmon-worker"]}] zmon_sgs = list() for reservation in ec2.describe_instances(Filters=filters).get("Reservations", []): for instance in reservation.get("Instances", []): zmon_sgs += [sg["GroupId"] for sg in instance.get("SecurityGroups", []) if "zmon" in sg["GroupName"]] if len(zmon_sgs) == 0: warning("Could not find zmon security group") else: click.confirm("Do you want to allow access to the Spilo nodes from zmon?", default=True) if len(zmon_sgs) > 1: prompt(variables, "zmon_sg_id", "Which Security Group should we allow access from? {}".format(zmon_sgs)) else: variables["zmon_sg_id"] = zmon_sgs[0] if variables["instance_type"].lower().split(".")[0] in ("c3", "g2", "hi1", "i2", "m3", "r3"): variables["use_ebs"] = click.confirm( "Do you want database data directory on external (EBS) storage? [Yes]", default=defaults["use_ebs"] ) else: variables["use_ebs"] = True if variables["use_ebs"]: prompt(variables, "volume_size", "Database volume size (GB, 10 or more)", default=defaults["volume_size"]) prompt(variables, "volume_type", "Database volume type (gp2, io1 or standard)", default=defaults["volume_type"]) if variables["volume_type"] == "io1": pio_max = variables["volume_size"] * 30 prompt( variables, "volume_iops", "Provisioned I/O operations per second (100 - {0})".format(pio_max), default=str(pio_max), ) prompt(variables, "snapshot_id", "ID of the snapshot to populate EBS volume from", default="") if ebs_optimized_supported(variables["instance_type"]): variables["ebs_optimized"] = True prompt(variables, "fstype", "Filesystem for the data partition", default=defaults["fstype"]) prompt(variables, "fsoptions", "Filesystem mount options (comma-separated)", default=defaults["fsoptions"]) prompt(variables, "scalyr_account_key", "Account key for your scalyr account", "") prompt( variables, "pgpassword_superuser", "Password for PostgreSQL superuser [random]", show_default=False, default=generate_random_password, hide_input=True, confirmation_prompt=True, ) prompt( variables, "pgpassword_standby", "Password for PostgreSQL user standby [random]", show_default=False, default=generate_random_password, hide_input=True, confirmation_prompt=True, ) prompt( variables, "pgpassword_admin", "Password for PostgreSQL user admin", show_default=True, default=defaults["pgpassword_admin"], hide_input=True, confirmation_prompt=True, ) if click.confirm("Do you wish to encrypt these passwords using KMS?", default=False): kms_keys = [k for k in list_kms_keys(region) if "alias/aws/ebs" not in k["aliases"]] if len(kms_keys) == 0: raise click.UsageError( "No KMS key is available for encrypting and decrypting. " "Ensure you have at least 1 key available." ) options = ["{}: {}".format(k["KeyId"], k["Description"]) for k in kms_keys] kms_key = choice(prompt="Please select the encryption key", options=options) kms_keyid = kms_key.split(":")[0] variables["kms_arn"] = [k["Arn"] for k in kms_keys if k["KeyId"] == kms_keyid][0] for key in [k for k in variables if k.startswith("pgpassword_")]: encrypted = encrypt(region=region, KeyId=kms_keyid, Plaintext=variables[key], b64encode=True) variables[key] = "aws:kms:{}".format(encrypted) set_default_variables(variables) check_s3_bucket(variables["wal_s3_bucket"], region) return variables
def gather_user_variables(variables, region, account_info): defaults = set_default_variables(dict()) if click.confirm('Do you want to set the docker image now? [No]'): prompt(variables, "docker_image", "Docker Image Version", default=get_latest_spilo_image()) prompt(variables, 'wal_s3_bucket', 'Postgres WAL S3 bucket to use', default='{}-{}-spilo-app'.format(get_account_alias(), region)) prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro') variables['hosted_zone'] = account_info.Domain or defaults['hosted_zone'] if (variables['hosted_zone'][-1:] != '.'): variables['hosted_zone'] += '.' prompt(variables, 'discovery_domain', 'ETCD Discovery Domain', default='postgres.' + variables['hosted_zone'][:-1]) if variables['instance_type'].lower().split('.')[0] in ('c3', 'g2', 'hi1', 'i2', 'm3', 'r3'): variables['use_ebs'] = click.confirm('Do you want database data directory on external (EBS) storage? [Yes]', default=defaults['use_ebs']) else: variables['use_ebs'] = True if variables['use_ebs']: prompt(variables, 'volume_size', 'Database volume size (GB, 10 or more)', default=defaults['volume_size']) prompt(variables, 'volume_type', 'Database volume type (gp2, io1 or standard)', default=defaults['volume_type']) if variables['volume_type'] == 'io1': pio_max = variables['volume_size'] * 30 prompt(variables, "volume_iops", 'Provisioned I/O operations per second (100 - {0})'. format(pio_max), default=str(pio_max)) prompt(variables, "snapshot_id", "ID of the snapshot to populate EBS volume from", default="") if ebs_optimized_supported(variables['instance_type']): variables['ebs_optimized'] = True prompt(variables, "fstype", "Filesystem for the data partition", default=defaults['fstype']) prompt(variables, "fsoptions", "Filesystem mount options (comma-separated)", default=defaults['fsoptions']) prompt(variables, "scalyr_account_key", "Account key for your scalyr account", "") prompt(variables, 'pgpassword_superuser', "Password for PostgreSQL superuser [random]", show_default=False, default=generate_random_password, hide_input=True, confirmation_prompt=True) prompt(variables, 'pgpassword_standby', "Password for PostgreSQL user standby [random]", show_default=False, default=generate_random_password, hide_input=True, confirmation_prompt=True) prompt(variables, 'pgpassword_admin', "Password for PostgreSQL user admin", show_default=True, default=defaults['pgpassword_admin'], hide_input=True, confirmation_prompt=True) if click.confirm('Do you wish to encrypt these passwords using KMS?', default=False): kms_keys = [k for k in list_kms_keys(region) if 'alias/aws/ebs' not in k['aliases']] if len(kms_keys) == 0: raise click.UsageError('No KMS key is available for encrypting and decrypting. ' 'Ensure you have at least 1 key available.') options = ['{}: {}'.format(k['KeyId'], k['Description']) for k in kms_keys] kms_key = choice(prompt='Please select the encryption key', options=options) kms_keyid = kms_key.split(':')[0] variables['kms_arn'] = [k['Arn'] for k in kms_keys if k['KeyId'] == kms_keyid][0] for key in [k for k in variables if k.startswith('pgpassword_')]: encrypted = encrypt(region=region, KeyId=kms_keyid, Plaintext=variables[key], b64encode=True) variables[key] = 'aws:kms:{}'.format(encrypted) set_default_variables(variables) sg_name = 'app-spilo' rules_missing = check_security_group(sg_name, [('tcp', 22), ('tcp', POSTGRES_PORT), ('tcp', HEALTHCHECK_PORT)], region, allow_from_self=True) if ('tcp', 22) in rules_missing: warning('Security group {} does not allow SSH access, you will not be able to ssh into your servers'. format(sg_name)) if ('tcp', POSTGRES_PORT) in rules_missing: error('Security group {} does not allow inbound TCP traffic on the default postgres port ({})'.format( sg_name, POSTGRES_PORT )) if ('tcp', HEALTHCHECK_PORT) in rules_missing: error('Security group {} does not allow inbound TCP traffic on the default health check port ({})'. format(sg_name, HEALTHCHECK_PORT)) variables['spilo_sg_id'] = get_security_group(region, sg_name).id check_s3_bucket(variables['wal_s3_bucket'], region) return variables
def gather_user_variables(variables, account_info, region): set_default_variables(variables) missing = [] for required in ('team_name', 'team_region', 'team_gateway_zone', 'hosted_zone'): if not variables.get(required): missing.append(required) if len(missing) > 0: fatal_error("Missing values for the following variables: {0}".format(', '.join(missing))) # redefine the region per the user input if variables['team_region'] != region.Region: fatal_error("Current region {0} do not match the requested region {1}\n" "Change the currect region with --region option or set AWS_DEFAULT_REGION variable.". format(region.Region, variables['team_region'])) variables['wal_s3_bucket'] = '{}-{}-spilo-dbaas'.format(get_account_alias(), region.Region) for name in ('team_gateway_zone', 'hosted_zone'): if variables[name][-1] != '.': variables[name] += '.' # split the ldap url into the URL and suffix (path component) if variables['ldap_url']: url = urlparse(variables['ldap_url']) if url.path and url.path[0] == '/': variables['ldap_suffix'] = url.path[1:] # if master DNS name is specified but not the replica one - derive the replica name from the master if variables['master_dns_name'] and not variables['replica_dns_name']: replica_dns_components = variables['master_dns_name'].split('.') replica_dns_components[0] += '-repl' variables['replica_dns_name'] = '.'.join(replica_dns_components) # make sure all DNS names belong to the hosted zone for v in ('master_dns_name', 'replica_dns_name'): if variables[v] and not check_dns_name(variables[v], variables['hosted_zone'][:-1]): fatal_error("{0} should end with {1}". format(v.replace('_', ' '), variables['hosted_zone'][:-1])) if variables['ldap_url'] and not variables['ldap_suffix']: fatal_error("LDAP URL is missing the suffix: shoud be in a format: " "ldap[s]://example.com[:port]/ou=people,dc=example,dc=com") # pick up the proper etcd address depending on the region variables['discovery_domain'] = detect_etcd_discovery_domain_for_region(variables['hosted_zone'], region.Region) # get the IP addresses of the NAT gateways to acess a given ELB. variables['nat_gateway_addresses'] = detect_eu_team_nat_gateways(variables['team_gateway_zone']) variables['odd_instance_addresses'] = detect_eu_team_odd_instances(variables['team_gateway_zone']) variables['spilo_security_group_ingress_rules_block'] = \ generate_spilo_master_security_group_ingress(variables['nat_gateway_addresses'] + variables['odd_instance_addresses']) if variables['postgresqlconf']: variables['postgresqlconf'] = generate_postgresql_configuration(variables['postgresqlconf']) odd_sg = get_security_group(region.Region, ODD_SG_NAME) variables['odd_sg_id'] = odd_sg.group_id # Find all Security Groups attached to the zmon worker with 'zmon' in their name variables['zmon_sg_id'] = detect_zmon_security_group(region.Region) if variables['volume_type'] == 'io1' and not variables['volume_iops']: pio_max = variables['volume_size'] * 30 variables['volume_iops'] = str(pio_max) variables['ebs_optimized'] = ebs_optimized_supported(variables['instance_type']) # pick up the first key with a description containing spilo kms_keys = [k for k in list_kms_keys(region.Region) if 'alias/aws/ebs' not in k['aliases'] and 'spilo' in ((k['Description']).lower())] if len(kms_keys) == 0: raise fatal_error('No KMS key is available for encrypting and decrypting. ' 'Ensure you have at least 1 key available.') kms_key = kms_keys[0] kms_keyid = kms_key['KeyId'] variables['kms_arn'] = kms_key['Arn'] for key in [k for k in variables if k.startswith('pgpassword_')] +\ (['scalyr_account_key'] if variables.get('scalyr_account_key') else []): encrypted = encrypt(region=region.Region, KeyId=kms_keyid, Plaintext=variables[key], b64encode=True) variables[key] = 'aws:kms:{}'.format(encrypted) check_s3_bucket(variables['wal_s3_bucket'], region.Region) return variables