Beispiel #1
0
def gather_user_variables(variables, region, account_info):  # pylint: disable=locally-disabled, unused-argument
    """
    Gather all the variables needed to create the redis node
    """
    # maximal 32 characters because of the loadbalancer-name
    prompt(variables,
           'application_id',
           'Application ID',
           default='hello-world',
           value_proc=check_value(18, '^[a-zA-Z][-a-zA-Z0-9]*$'))
    prompt(variables,
           'instance_type',
           'EC2 instance type',
           default='cache.t2.small')

    sg_name = 'redis-{}'.format(variables['application_id'])

    rules_missing = check_security_group(sg_name, [('tcp', 6379)],
                                         region,
                                         allow_from_self=True)
    if ('tcp', 6379) in rules_missing:
        warning('Security group {} does not allow tcp/6379 access, '
                'you will not be able to access your redis'.format(sg_name))

    return variables
Beispiel #2
0
def update_security_group(region_name: str, security_group: str, trusted_addresses: set):
    networks = trusted_addresses
    prefixlen = 31
    # FIXME the Networkcount is depending on exist Entrys and Port-Count!
    while len(networks) > 50:
        networks = consolidate_networks(networks, prefixlen)
        prefixlen -= 1
    info("{}/{} Prefixlen: {}, {} networks: {}".format(region_name, security_group, prefixlen, len(networks), networks))
    conn = boto.ec2.connect_to_region(region_name)
    for sg in conn.get_all_security_groups():
        if security_group in sg.name:
            for rule in sg.rules:
                info(
                    "Entrys from {}: {} {} {} {}".format(
                        sg.name, rule.ip_protocol, rule.from_port, rule.to_port, rule.grants
                    )
                )
                ipgrants = [IPNetwork("{}".format(grant)) for grant in rule.grants]
                for grant in ipgrants:
                    if grant not in networks:
                        warning("Remove {} from security group {}".format(grant, sg.name))
                        sg.revoke(
                            ip_protocol=rule.ip_protocol, from_port=rule.from_port, to_port=rule.to_port, cidr_ip=grant
                        )
            with Action("Updating security group {}..".format(sg.name)) as act:
                for cidr in sorted(networks):
                    try:
                        sg.authorize(ip_protocol="tcp", from_port=443, to_port=443, cidr_ip=cidr)
                    except boto.exception.EC2ResponseError as e:
                        if "already exists" not in e.message:
                            raise
                    act.progress()
def test_tsv_out(capsys):
    with OutputFormat('tsv'):
        warning('this is a warning')
        print_table('a b'.split(), [{"a": 1}, {"b": 2}])
    out, err = capsys.readouterr()
    assert 'a\tb\n1\t\n\t2\n' == out
    assert 'this is a warning\n' == err
def test_json_out(capsys):
    with OutputFormat('json'):
        warning('this is a warning')
        print_table('a b'.split(), [{}, {}])
    out, err = capsys.readouterr()
    assert '[{"a": null, "b": null}, {"a": null, "b": null}]\n' == out
    assert 'this is a warning\n' == err
def test_yaml_out(capsys):
    with OutputFormat('yaml'):
        warning('this is a warning')
        print_table('a b'.split(), [{}, {}])
    out, err = capsys.readouterr()
    assert 'a: null\nb: null\n---\na: null\nb: null\n\n' == out
    assert 'this is a warning\n' == err
Beispiel #6
0
def gather_user_variables(variables, region):
    prompt(
        variables,
        "application_id",
        "Application ID",
        default="hello-world",
        value_proc=check_value(60, "^[a-zA-Z][-a-zA-Z0-9]*$"),
    )
    prompt(
        variables,
        "docker_image",
        'Docker image without tag/version (e.g. "pierone.example.org/myteam/myapp")',
        default="stups/hello-world",
    )
    prompt(variables, "instance_type", "EC2 instance type", default="t2.micro")
    if "pierone" in variables["docker_image"] or confirm("Did you need OAuth-Credentials from Mint?"):
        prompt(variables, "mint_bucket", "Mint S3 bucket name", default=lambda: get_mint_bucket_name(region))
    else:
        variables["mint_bucket"] = None

    sg_name = "app-{}".format(variables["application_id"])
    rules_missing = check_security_group(sg_name, [("tcp", 22)], region, allow_from_self=True)

    if ("tcp", 22) in rules_missing:
        warning(
            "Security group {} does not allow SSH access, you will not be able to ssh into your servers".format(sg_name)
        )

    check_iam_role(variables["application_id"], variables["mint_bucket"], region)

    return variables
Beispiel #7
0
def output(output):
    '''Example for all possible Echo Formats

    You see the message only, if the Output TEXT
    '''
    with OutputFormat(output):
        action('This is a ok:')
        ok()
        action('This is a ok with message:')
        ok('all is fine')
        action('This is a warning:')
        warning('please check this')
        with Action('Start with working..') as act:
            # save_the_world()
            act.progress()
            act.progress()
            act.progress()
            act.progress()
        print_table('id name'.split(), [{
            'id': 1,
            'name': 'Test #1'
        }, {
            'id': 2,
            'name': 'Test #2'
        }])
        info('Only FYI')
        action('This is a error:')
        error('this is wrong, please fix')
        action('This is a fatal error:')
        fatal_error('this is a fuckup')
        info('I\'am not printed, the process a dead')
Beispiel #8
0
def gather_user_variables(variables, region):
    # maximal 32 characters because of the loadbalancer-name
    prompt(variables,
           'application_id',
           'Application ID',
           default='hello-world',
           value_proc=check_value(18, '^[a-zA-Z][-a-zA-Z0-9]*$'))
    prompt(variables,
           'instance_type',
           'EC2 instance type',
           default='cache.m3.medium')
    prompt(variables,
           'number_of_nodes',
           'Number of nodes in cluster',
           default='2',
           value_proc=check_value(1, '^[2-5]$'))

    sg_name = 'redis-{}'.format(variables['application_id'])

    rules_missing = check_security_group(sg_name, [('tcp', 6379)],
                                         region,
                                         allow_from_self=True)
    if ('tcp', 6379) in rules_missing:
        warning(
            'Security group {} does not allow tcp/6379 access yet, you will not be able to access redis'
            .format(sg_name))

    return variables
def test_json_out(capsys):
    with OutputFormat('json'):
        warning('this is a warning')
        print_table('a b'.split(), [{}, {}])
    out, err = capsys.readouterr()
    assert '[{"a": null, "b": null}, {"a": null, "b": null}]\n' == out
    assert 'this is a warning\n' == err
def test_text_out(capsys):
    with OutputFormat('text'):
        warning('this is a warning')
        print_table('a b'.split(), [{}, {}])
    out, err = capsys.readouterr()
    assert 'A│B\n    \n    \n' == out
    assert 'this is a warning\n' == err
Beispiel #11
0
def output(output):
    '''Example for all possible Echo Formats

    You see the message only, if the Output TEXT
    '''
    with OutputFormat(output):
        action('This is a ok:')
        ok()
        action('This is a ok with message:')
        ok('all is fine')
        action('This is a warning:')
        warning('please check this')
        with Action('Start with working..') as act:
            # save_the_world()
            act.progress()
            act.progress()
            act.progress()
            act.progress()
        print_table('id name'.split(), [{'id': 1, 'name': 'Test #1'}, {'id': 2, 'name': 'Test #2'}])
        info('Only FYI')
        action('This is a error:')
        error('this is wrong, please fix')
        action('This is a fatal error:')
        fatal_error('this is a fuckup')
        info('I\'am not printed, the process a dead')
def test_text_out(capsys):
    with OutputFormat('text'):
        warning('this is a warning')
        print_table('a b'.split(), [{}, {}])
    out, err = capsys.readouterr()
    assert u'A│B\n    \n    \n' == out
    assert 'this is a warning\n' == err
Beispiel #13
0
def gather_user_variables(variables, region):
    prompt(variables, 'application_id', 'Application ID', default='hello-world')
    prompt(variables, 'docker_image', 'Docker image without tag/version (e.g. "pierone.example.org/myteam/myapp")',
           default='stups/hello-world')
    prompt(variables, 'http_port', 'HTTP port', default=8080, type=int)
    prompt(variables, 'http_health_check_path', 'HTTP health check path', default='/')
    prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro')
    prompt(variables, 'mint_bucket', 'Mint S3 bucket name', default=lambda: get_mint_bucket_name(region))

    http_port = variables['http_port']

    sg_name = 'app-{}'.format(variables['application_id'])
    rules_missing = check_security_group(sg_name, [('tcp', 22), ('tcp', http_port)], region, allow_from_self=True)

    if ('tcp', 22) in rules_missing:
        warning('Security group {} does not allow SSH access, you will not be able to ssh into your servers'.format(
            sg_name))

    if ('tcp', http_port) in rules_missing:
        error('Security group {} does not allow inbound TCP traffic on the specified HTTP port ({})'.format(
            sg_name, http_port
        ))

    rules_missing = check_security_group(sg_name + '-lb', [('tcp', 443)], region)

    if rules_missing:
        error('Load balancer security group {} does not allow inbound HTTPS traffic'.format(sg_name))

    check_iam_role(variables['application_id'], variables['mint_bucket'], region)

    return variables
Beispiel #14
0
def gather_user_variables(variables, region):
    prompt(variables,
           'application_id',
           'Application ID',
           default='hello-world',
           value_proc=check_value(60, '^[a-zA-Z][-a-zA-Z0-9]*$'))
    prompt(
        variables,
        'docker_image',
        'Docker image without tag/version (e.g. "pierone.example.org/myteam/myapp")',
        default='stups/hello-world')
    prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro')
    if 'pierone' in variables['docker_image'] or confirm(
            'Did you need OAuth-Credentials from Mint?'):
        prompt(variables,
               'mint_bucket',
               'Mint S3 bucket name',
               default=lambda: get_mint_bucket_name(region))
    else:
        variables['mint_bucket'] = None

    sg_name = 'app-{}'.format(variables['application_id'])
    rules_missing = check_security_group(sg_name, [('tcp', 22)],
                                         region,
                                         allow_from_self=True)

    if ('tcp', 22) in rules_missing:
        warning(
            'Security group {} does not allow SSH access, you will not be able to ssh into your servers'
            .format(sg_name))

    check_iam_role(variables['application_id'], variables['mint_bucket'],
                   region)

    return variables
Beispiel #15
0
def get_config_data():
    fn = os.path.expanduser(DEFAULT_CONFIG_FILE)
    data = {}
    try:
        if os.path.exists(fn):
            with open(fn) as fd:
                data = yaml.safe_load(fd)

            if 'password' in data:
                keyring.set_password("zmon-cli", data['user'], data['password'])
                del data['password']
                with open(fn, mode='w') as fd:
                    yaml.dump(data, fd, default_flow_style=False,
                              allow_unicode=True,
                              encoding='utf-8')
        else:
            clickclick.warning("No configuration file found at [{}]".format(DEFAULT_CONFIG_FILE))
            data['url'] = click.prompt("ZMon Base URL (e.g. https://zmon2.local/rest/api/v1)")
            data['user'] = click.prompt("ZMon username", default=os.environ['USER'])

            with open(fn, mode='w') as fd:
                yaml.dump(data, fd, default_flow_style=False,
                          allow_unicode=True,
                          encoding='utf-8')
    except Exception as e:
        error(e)

    return validate_config(data)
def test_yaml_out(capsys):
    with OutputFormat('yaml'):
        warning('this is a warning')
        print_table('a b'.split(), [{}, {}])
    out, err = capsys.readouterr()
    assert 'a: null\nb: null\n---\na: null\nb: null\n\n' == out
    assert 'this is a warning\n' == err
Beispiel #17
0
def gather_user_variables(variables, region):
    prompt(variables, 'wal_s3_bucket', 'Postgres WAL S3 bucket to use', default='zalando-spilo-app')
    prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro')
    prompt(variables, 'hosted_zone', 'Hosted Zone', default=get_default_zone(region) or 'example.com')
    if (variables['hosted_zone'][-1:] != '.'):
        variables['hosted_zone'] += '.'
    prompt(variables, 'discovery_url', 'ETCD Discovery URL', default='postgres.'+variables['hosted_zone'][:-1])

    variables['postgres_port'] = POSTGRES_PORT
    variables['healthcheck_port'] = HEALTHCHECK_PORT

    sg_name = 'app-spilo'
    variables['spilo_sg_id'] = get_security_group(region, sg_name).id
    rules_missing = check_security_group(sg_name, [('tcp', 22), ('tcp', POSTGRES_PORT), ('tcp', HEALTHCHECK_PORT)],
                                         region, allow_from_self=True)

    if ('tcp', 22) in rules_missing:
        warning('Security group {} does not allow SSH access, you will not be able to ssh into your servers'.format(
            sg_name))

    if ('tcp', POSTGRES_PORT) in rules_missing:
        error('Security group {} does not allow inbound TCP traffic on the default postgres port ({})'.format(
            sg_name, POSTGRES_PORT
        ))

    if ('tcp', HEALTHCHECK_PORT) in rules_missing:
        error('Security group {} does not allow inbound TCP traffic on the default health check port ({})'.format(
            sg_name, HEALTHCHECK_PORT
        ))

    check_s3_bucket(variables['wal_s3_bucket'], region)

    return variables
def test_tsv_out(capsys):
    with OutputFormat('tsv'):
        warning('this is a warning')
        print_table('a b'.split(), [{"a": 1}, {"b": 2}])
    out, err = capsys.readouterr()
    assert 'a\tb\n1\t\n\t2\n' == out
    assert 'this is a warning\n' == err
Beispiel #19
0
def get_config_data(config_file=DEFAULT_CONFIG_FILE):
    fn = os.path.expanduser(config_file)
    data = {}

    try:
        if os.path.exists(fn):
            with open(fn) as fd:
                data = yaml.safe_load(fd)
        else:
            clickclick.warning(
                'No configuration file found at [{}]'.format(config_file))

            data['url'] = click.prompt(
                'ZMON Base URL (e.g. https://zmon.example.org/api/v1)')

            # TODO: either ask for fixed token or Zign
            data['user'] = click.prompt('ZMON username',
                                        default=os.environ['USER'])

            with open(fn, mode='w') as fd:
                yaml.dump(data,
                          fd,
                          default_flow_style=False,
                          allow_unicode=True,
                          encoding='utf-8')
    except Exception as e:
        error(e)

    return validate_config(data)
Beispiel #20
0
 def get_stacks(self) -> list:
     header = make_header(self.access_token)
     request = self.stacks_url.get(headers=header, verify=False)
     lizzy_version = request.headers.get('X-Lizzy-Version')
     if lizzy_version and lizzy_version != TARGET_VERSION:
         warning("Version Mismatch (Client: {}, Server: {})".format(TARGET_VERSION, lizzy_version))
     request.raise_for_status()
     return request.json()
Beispiel #21
0
 def get_stacks(self) -> list:
     header = make_header(self.access_token)
     request = self.stacks_url.get(headers=header, verify=False)
     lizzy_version = request.headers.get('X-Lizzy-Version')
     if lizzy_version and lizzy_version != TARGET_VERSION:
         warning("Version Mismatch (Client: {}, Server: {})".format(TARGET_VERSION, lizzy_version))
     request.raise_for_status()
     return request.json()
Beispiel #22
0
    def delete(self, stack_id: str):
        url = self.stacks_url / stack_id

        header = make_header(self.access_token)
        request = url.delete(headers=header, verify=False)
        lizzy_version = request.headers.get('X-Lizzy-Version')
        if lizzy_version and lizzy_version != TARGET_VERSION:
            warning("Version Mismatch (Client: {}, Server: {})".format(TARGET_VERSION, lizzy_version))
        request.raise_for_status()
Beispiel #23
0
def gather_user_variables(variables, region):
    # maximal 32 characters because of the loadbalancer-name
    prompt(variables,
           'application_id',
           'Application ID',
           default='hello-world',
           value_proc=check_value(32, '^[a-zA-Z][-a-zA-Z0-9]*$'))
    prompt(
        variables,
        'docker_image',
        'Docker image without tag/version (e.g. "pierone.example.org/myteam/myapp")',
        default='stups/hello-world')
    prompt(variables, 'http_port', 'HTTP port', default=8080, type=int)
    prompt(variables,
           'http_health_check_path',
           'HTTP health check path',
           default='/')
    prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro')
    if 'pierone' in variables['docker_image'] or confirm(
            'Did you need OAuth-Credentials from Mint?'):
        prompt(variables,
               'mint_bucket',
               'Mint S3 bucket name',
               default=lambda: get_mint_bucket_name(region))
    else:
        variables['mint_bucket'] = None

    http_port = variables['http_port']

    sg_name = 'app-{}'.format(variables['application_id'])
    rules_missing = check_security_group(sg_name, [('tcp', 22),
                                                   ('tcp', http_port)],
                                         region,
                                         allow_from_self=True)

    if ('tcp', 22) in rules_missing:
        warning(
            'Security group {} does not allow SSH access, you will not be able to ssh into your servers'
            .format(sg_name))

    if ('tcp', http_port) in rules_missing:
        error(
            'Security group {} does not allow inbound TCP traffic on the specified HTTP port ({})'
            .format(sg_name, http_port))

    rules_missing = check_security_group(sg_name + '-lb', [('tcp', 443)],
                                         region)

    if rules_missing:
        error(
            'Load balancer security group {} does not allow inbound HTTPS traffic'
            .format(sg_name))

    check_iam_role(variables['application_id'], variables['mint_bucket'],
                   region)

    return variables
Beispiel #24
0
    def delete(self, stack_id: str):
        url = self.stacks_url / stack_id

        header = make_header(self.access_token)
        request = url.delete(headers=header, verify=False)
        lizzy_version = request.headers.get('X-Lizzy-Version')
        if lizzy_version and lizzy_version != TARGET_VERSION:
            warning("Version Mismatch (Client: {}, Server: {})".format(TARGET_VERSION, lizzy_version))
        request.raise_for_status()
Beispiel #25
0
def gather_user_variables(variables, region):
    if click.confirm('Do you want to set the docker image now? [No]'):
        prompt(variables, "docker_image", "Docker Image Version", default=get_latest_spilo_image())
    prompt(variables, 'wal_s3_bucket', 'Postgres WAL S3 bucket to use', default='zalando-spilo-app')
    prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro')
    prompt(variables, 'hosted_zone', 'Hosted Zone', default=get_default_zone(region) or 'example.com')
    if (variables['hosted_zone'][-1:] != '.'):
        variables['hosted_zone'] += '.'
    prompt(variables, 'discovery_domain', 'ETCD Discovery Domain',
           default='postgres.'+variables['hosted_zone'][:-1])
    if variables['instance_type'].lower().split('.')[0] in ('c3', 'g2', 'hi1', 'i2', 'm3', 'r3'):
        variables['use_ebs'] = click.confirm('Do you want database data directory on external (EBS) storage? [Yes]',
                                             default=True)
    else:
        variables['use_ebs'] = True
    if variables['use_ebs']:
        prompt(variables, 'volume_size', 'Database volume size (GB, 10 or more)', default=10)
        prompt(variables, 'volume_type', 'Database volume type (gp2, io1 or standard)', default='gp2')
        if variables['volume_type'] == 'io1':
            pio_max = variables['volume_size'] * 30
            prompt(variables, "volume_iops", 'Provisioned I/O operations per second (100 - {0})'.
                   format(pio_max), default=str(pio_max))
        prompt(variables, "snapshot_id", "ID of the snapshot to populate EBS volume from", default="")
        if ebs_optimized_supported(variables['instance_type']):
            variables['ebs_optimized'] = True
    prompt(variables, "fstype", "Filesystem for the data partition", default="ext4")
    prompt(variables, "fsoptions", "Filesystem mount options (comma-separated)",
           default="noatime,nodiratime,nobarrier")
    prompt(variables, "scalyr_account_key", "Account key for your scalyr account", "")

    variables['postgres_port'] = POSTGRES_PORT
    variables['healthcheck_port'] = HEALTHCHECK_PORT

    sg_name = 'app-spilo'
    rules_missing = check_security_group(sg_name,
                                         [('tcp', 22), ('tcp', POSTGRES_PORT), ('tcp', HEALTHCHECK_PORT)],
                                         region, allow_from_self=True)

    if ('tcp', 22) in rules_missing:
        warning('Security group {} does not allow SSH access, you will not be able to ssh into your servers'.
                format(sg_name))

    if ('tcp', POSTGRES_PORT) in rules_missing:
        error('Security group {} does not allow inbound TCP traffic on the default postgres port ({})'.format(
            sg_name, POSTGRES_PORT
        ))

    if ('tcp', HEALTHCHECK_PORT) in rules_missing:
        error('Security group {} does not allow inbound TCP traffic on the default health check port ({})'.
              format(sg_name, HEALTHCHECK_PORT))
    variables['spilo_sg_id'] = get_security_group(region, sg_name).id

    check_s3_bucket(variables['wal_s3_bucket'], region)

    return variables
Beispiel #26
0
def get_k8s_nodes(api_server: str, token: str) -> list:
    headers = {"Authorization": "Bearer {}".format(token)}
    try:
        response = requests.get(api_server + "/api/v1/nodes",
                                headers=headers,
                                timeout=5)
        response.raise_for_status()
        return response.json()['items']
    except Exception as e:
        warning('Failed to query API server for nodes: {}'.format(e))
        return []
Beispiel #27
0
def configure(file, account_name_pattern, saml_user, saml_password, dry_run):
    '''Configure one or more AWS account(s) matching the provided pattern'''
    config = yaml.safe_load(file)
    accounts = config.get('accounts', {})

    account_names = sorted(fnmatch.filter(accounts.keys(), account_name_pattern))

    if not account_names:
        error('No configuration found for account {}'.format(account_name_pattern))
        return

    trusted_addresses = None

    global_cfg = config.get('global', {})

    for account_name in account_names:
        cfg = accounts.get(account_name) or {}
        for key, val in global_cfg.items():
            if key not in cfg:
                cfg[key] = val

        saml_url = cfg.get('saml_identity_provider_url')
        saml_role = cfg.get('saml_admin_login_role')

        if saml_user and saml_url and saml_role:
            if not saml_password:
                saml_password = keyring.get_password('sevenseconds', saml_user)
            if not saml_password:
                saml_password = click.prompt('Please enter your SAML password', hide_input=True)

            with Action('Authenticating against {}..'.format(saml_url)):
                saml_xml, roles = authenticate(saml_url, saml_user, saml_password)
            keyring.set_password('sevenseconds', saml_user, saml_password)

            account_alias = cfg.get('alias', account_name).format(account_name=account_name)
            matching_roles = [(parn, rarn, aname)
                              for parn, rarn, aname in roles if aname == account_alias and rarn.endswith(saml_role)]
            if not matching_roles:
                error('No matching role found for account {}: {}'.format(account_name, roles))
                warning('Skipping account configuration of {} due to missing credentials'.format(account_name))
                continue
            role = matching_roles[0]
            with Action('Assuming role {}..'.format(role)):
                key_id, secret, session_token = assume_role(saml_xml, role[0], role[1])
            write_aws_credentials('default', key_id, secret, session_token)

        if not trusted_addresses:
            trusted_addresses = get_trusted_addresses(config)

        try:
            configure_account(account_name, cfg, trusted_addresses, dry_run)
        except Exception:
            error('Error while configuring {}: {}'.format(account_name, traceback.format_exc()))
Beispiel #28
0
def compensate(
    calculation_error,
    compensations,
    identifier,
    new_record_weights,
    partial_count,
    percentage,
    identifier_versions,
):
    """
    Compensate for the rounding errors as well as for the fact, that we do not
    allow to bring down the minimal weights lower then minimal possible value
    not to disable traffic from the minimally configured versions (1) and
    we do not allow to add any values to the already disabled versions (0).
    """
    # distribute the error on the versions, other then the current one
    assert partial_count
    part = calculation_error / partial_count
    if part > 0:
        part = int(max(1, part))
    else:
        part = int(min(-1, part))
    # avoid changing the older version distributions
    for i in sorted(new_record_weights.keys(),
                    key=lambda x: identifier_versions[x],
                    reverse=True):
        if i == identifier:
            continue
        new_weight = new_record_weights[i] + part
        if new_weight <= 0:
            # do not remove the traffic from the minimal traffic versions
            continue
        new_record_weights[i] = new_weight
        calculation_error -= part
        compensations[i] = part
        if calculation_error == 0:
            break
    if calculation_error != 0:
        adjusted_percentage = percentage + calculation_error
        compensations[identifier] = calculation_error
        calculation_error = 0
        warning((
            "Changing given percentage from {} to {} " +
            "because all other versions are already getting the possible minimum traffic"
        ).format(
            percentage / PERCENT_RESOLUTION,
            adjusted_percentage / PERCENT_RESOLUTION,
        ))
        percentage = adjusted_percentage
        new_record_weights[identifier] = percentage
    assert calculation_error == 0
    return percentage
Beispiel #29
0
def gather_user_variables(variables, region):
    # maximal 32 characters because of the loadbalancer-name
    prompt(variables, 'application_id', 'Application ID', default='hello-world',
           value_proc=check_value(18, '^[a-zA-Z][-a-zA-Z0-9]*$'))
    prompt(variables, 'instance_type', 'EC2 instance type', default='cache.t2.small')

    sg_name = 'redis-{}'.format(variables['application_id'])

    rules_missing = check_security_group(sg_name, [('tcp', 6379)], region, allow_from_self=True)
    if ('tcp', 6379) in rules_missing:
        warning('Security group {} does not allow tcp/6379 access, you will not be able to access your redis'.format(
            sg_name))

    return variables
Beispiel #30
0
def _request_access(even_url, cacert, username, hostname, reason, remote_host,
                    lifetime, clip, connect, tunnel):
    data = {'username': username, 'hostname': hostname, 'reason': reason}
    host_via = hostname
    if remote_host:
        data['remote_host'] = remote_host
        host_via = '{} via {}'.format(remote_host, hostname)
    if lifetime:
        data['lifetime_minutes'] = lifetime
    try:
        access_token = zign.api.get_token("piu", ['uid'])
    except zign.api.ServerError as e:
        click.secho('{}'.format(e), fg='red', bold=True)
        return 500

    click.secho('Requesting access to host {host_via} for {username}..'.format(host_via=host_via, username=username),
                bold=True)
    r = requests.post(even_url, headers={'Content-Type': 'application/json',
                                         'Authorization': 'Bearer {}'.format(access_token)},
                      data=json.dumps(data),
                      verify=cacert)
    if r.status_code == 200:
        click.secho(r.text, fg='green', bold=True)
        ssh_command = ''
        if remote_host:
            ssh_command = 'ssh -o StrictHostKeyChecking=no {username}@{remote_host}'.format(**vars())
            if tunnel:
                ports = tunnel.split(':')
                ssh_command = '-L {local_port}:{remote_host}:{remote_port}'.format(
                    local_port=ports[0], remote_host=remote_host, remote_port=ports[1])
        command = 'ssh -tA {username}@{hostname} {ssh_command}'.format(
                  username=username, hostname=hostname, ssh_command=ssh_command)
        if connect or tunnel:
            subprocess.call(command.split())

        if not ssh_keys_added():
            warning("No SSH identities found. Please add one using ssh-add, for example:")
            warning('ssh-add ~/.ssh/id_rsa')

        click.secho('You can access your server with the following command:')
        click.secho(command)

        if clip:
            click.secho('\nOr just check your clipboard and run ctrl/command + v (requires package "xclip" on Linux)')
            if pyperclip is not None:
                pyperclip.copy(command)
    else:
        click.secho('Server returned status {code}: {text}'.format(code=r.status_code, text=r.text),
                    fg='red', bold=True)
    return r.status_code
def test_echo():
    action('Action..')
    ok()

    action('Action..')
    error(' some error')

    action('Action..')
    with pytest.raises(SystemExit):
        fatal_error(' some fatal error')  # noqa

    action('Action..')
    warning(' some warning')

    info('Some info')
Beispiel #32
0
    def scale(self, stack_id: str, new_scale: int,
              region: Optional[str]=None):
        url = self.stacks_url / stack_id
        data = {"new_scale": new_scale}
        if region:
            data['region'] = region

        header = make_header(self.access_token)
        response = url.patch(json=data, headers=header, verify=False)
        try:
            response.raise_for_status()
        except requests.RequestException:
            warning('Data Json:')
            print(json.dumps(data, indent=4))
            raise
Beispiel #33
0
    def traffic(self, stack_id: str, percentage: int):
        url = self.stacks_url / stack_id
        data = {"new_traffic": percentage}

        header = make_header(self.access_token)
        request = url.patch(data=json.dumps(data), headers=header, verify=False)
        lizzy_version = request.headers.get('X-Lizzy-Version')
        if lizzy_version and lizzy_version != TARGET_VERSION:
            warning("Version Mismatch (Client: {}, Server: {})".format(TARGET_VERSION, lizzy_version))
        try:
            request.raise_for_status()
        except requests.RequestException:
            warning('Data Json:')
            print(json.dumps(data, indent=4))
            raise
def test_echo():
    action('Action..')
    ok()

    action('Action..')
    error(' some error')

    action('Action..')
    with pytest.raises(SystemExit):
        fatal_error(' some fatal error')  # noqa

    action('Action..')
    warning(' some warning')

    info('Some info')
Beispiel #35
0
    def traffic(self, stack_id: str, percentage: int):
        url = self.stacks_url / stack_id
        data = {"new_traffic": percentage}

        header = make_header(self.access_token)
        request = url.patch(data=json.dumps(data), headers=header, verify=False)
        lizzy_version = request.headers.get('X-Lizzy-Version')
        if lizzy_version and lizzy_version != TARGET_VERSION:
            warning("Version Mismatch (Client: {}, Server: {})".format(TARGET_VERSION, lizzy_version))
        try:
            request.raise_for_status()
        except requests.RequestException:
            warning('Data Json:')
            print(json.dumps(data, indent=4))
            raise
Beispiel #36
0
def raise_for_status(response, elastigroup_id):
    try:
        response.raise_for_status()
    except HTTPError:

        status = response.json().get("response", {}).get("status")
        details = response.json().get("response", {}).get("errors")[0]

        error("HTTP Error: {}[{}]".format(status.get("message"),
                                          status.get("code")))
        error("{}[{}]".format(details.get("message"), details.get("code")))

        if details.get("code") == "DEPLOYMENT_ALREADY_IN_PROGRESS":
            warning(
                "An older deploy is still running, check on SpotInst console deployments tab for elastigroup: [{}]."
                .format(elastigroup_id))
Beispiel #37
0
def gather_user_variables(variables, region):
    prompt(variables, 'application_id', 'Application ID', default='hello-world')
    prompt(variables, 'docker_image', 'Docker image without tag/version (e.g. "pierone.example.org/myteam/myapp")',
           default='stups/hello-world')
    prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro')
    prompt(variables, 'mint_bucket', 'Mint S3 bucket name', default=lambda: get_mint_bucket_name(region))

    sg_name = 'app-{}'.format(variables['application_id'])
    rules_missing = check_security_group(sg_name, [('tcp', 22)], region, allow_from_self=True)

    if ('tcp', 22) in rules_missing:
        warning('Security group {} does not allow SSH access, you will not be able to ssh into your servers'.format(
            sg_name))

    check_iam_role(variables['application_id'], variables['mint_bucket'], region)

    return variables
Beispiel #38
0
def run_linter(spec_file):
    spec = yaml.safe_load(spec_file)
    spec = compatibility_layer(spec)
    try:
        resolver = validate_spec(spec)
    except Exception as e:
        error('Error during Swagger schema validation:\n{}'.format(e))
        return

    # collect all "rules" defined as functions starting with "lint_"
    rules = [f for name, f in globals().items() if name.startswith('lint_')]
    for func in rules:
        for issue in func(spec, resolver):
            if isinstance(issue, tuple):
                location, message = issue
            else:
                location = issue
                message = None
            warning('{}: {}{}'.format(location, message + ' ' if message else '', func.__doc__))
Beispiel #39
0
def ssh_connection(clip,
                   connect,
                   hostname,
                   remote_host,
                   tunnel,
                   odd_user,
                   remote_user,
                   quick_login=False):
    if tunnel:
        ports = tunnel.split(":")
        ssh_command = "ssh {odd_user}@{hostname} -L {local_port}:{remote_host}:{remote_port}".format(
            odd_user=odd_user,
            hostname=hostname,
            local_port=ports[0],
            remote_host=remote_host,
            remote_port=ports[1])
    else:
        if remote_host:
            ssh_command = "ssh -J {odd_user}@{odd_host} {remote_user}@{remote_host}".format(
                odd_user=odd_user,
                odd_host=hostname,
                remote_user=remote_user,
                remote_host=remote_host)
        else:
            ssh_command = "ssh {odd_user}@{odd_host}".format(odd_user=odd_user,
                                                             odd_host=hostname)
    if connect or tunnel:
        subprocess.call(ssh_command.split())
    elif quick_login:
        click.secho("Please login within the next 60 seconds.")
    if not ssh_keys_added():
        warning(
            "No SSH identities found. Please add one using ssh-add, for example:"
        )
        warning("ssh-add ~/.ssh/id_rsa")
    click.secho("You can access your server with the following command:")
    click.secho(ssh_command)
    if clip:
        click.secho(
            '\nOr just check your clipboard and run ctrl/command + v (requires package "xclip" on Linux)'
        )
        if pyperclip is not None:
            pyperclip.copy(ssh_command)
Beispiel #40
0
def gather_user_variables(variables, region, account_info):
    # maximal 32 characters because of the loadbalancer-name
    prompt(variables, 'application_id', 'Application ID', default='hello-world',
           value_proc=check_value(60, '^[a-zA-Z][-a-zA-Z0-9]*$'))
    prompt(variables, 'docker_image', 'Docker image without tag/version (e.g. "pierone.example.org/myteam/myapp")',
           default='stups/hello-world')
    prompt(variables, 'http_port', 'HTTP port', default=8080, type=int)
    prompt(variables, 'http_health_check_path', 'HTTP health check path', default='/')
    prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro')
    if 'pierone' in variables['docker_image'] or confirm('Did you need OAuth-Credentials from Mint?'):
        prompt(variables, 'mint_bucket', 'Mint S3 bucket name', default=lambda: get_mint_bucket_name(region))
    else:
        variables['mint_bucket'] = None
    choice(variables, 'loadbalancer_scheme',
           prompt='Please select the load balancer scheme',
           options=[('internal',
                     'internal: only accessible from the own VPC'),
                    ('internet-facing',
                     'internet-facing: accessible from the public internet')],
           default='internal')
    http_port = variables['http_port']

    sg_name = 'app-{}'.format(variables['application_id'])
    rules_missing = check_security_group(sg_name, [('tcp', 22), ('tcp', http_port)], region, allow_from_self=True)

    if ('tcp', 22) in rules_missing:
        warning('Security group {} does not allow SSH access, you will not be able to ssh into your servers'.format(
            sg_name))

    if ('tcp', http_port) in rules_missing:
        error('Security group {} does not allow inbound TCP traffic on the specified HTTP port ({})'.format(
            sg_name, http_port
        ))

    rules_missing = check_security_group(sg_name + '-lb', [('tcp', 443)], region)

    if rules_missing:
        error('Load balancer security group {} does not allow inbound HTTPS traffic'.format(sg_name))

    check_iam_role(variables['application_id'], variables['mint_bucket'], region)

    return variables
Beispiel #41
0
def get_config_data(config_file=DEFAULT_CONFIG_FILE):
    fn = os.path.expanduser(config_file)
    data = {}

    try:
        if os.path.exists(fn):
            with open(fn) as fd:
                data = json.load(fd)
        else:
            warning('No configuration file found at [{}]'.format(config_file))

            data = set_config_file()

            if not data:
                error('Failed to configure ZMON SLR cli.')

    except Exception as e:
        fatal_error(e)

    return data
Beispiel #42
0
def gather_user_variables(variables, region, account_info):
    prompt(variables, 'application_id', 'Application ID', default='hello-world',
           value_proc=check_value(60, '^[a-zA-Z][-a-zA-Z0-9]*$'))
    prompt(variables, 'docker_image', 'Docker image without tag/version (e.g. "pierone.example.org/myteam/myapp")',
           default='stups/hello-world')
    prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro')
    if 'pierone' in variables['docker_image'] or confirm('Did you need OAuth-Credentials from Mint?'):
        prompt(variables, 'mint_bucket', 'Mint S3 bucket name', default=lambda: get_mint_bucket_name(region))
    else:
        variables['mint_bucket'] = None

    sg_name = 'app-{}'.format(variables['application_id'])
    rules_missing = check_security_group(sg_name, [('tcp', 22)], region, allow_from_self=True)

    if ('tcp', 22) in rules_missing:
        warning('Security group {} does not allow SSH access, you will not be able to ssh into your servers'.format(
            sg_name))

    check_iam_role(variables['application_id'], variables['mint_bucket'], region)

    return variables
Beispiel #43
0
def longest_grace_period(node_name: str, config: dict):
    """
    Find the longest grace period of any pods on the node.
    """
    headers = {
        "Authorization": "Bearer {}".format(config["worker_shared_secret"])
    }
    params = {"fieldSelector": "spec.nodeName={}".format(node_name)}
    resp = requests.get(config["api_server"] + "/api/v1/pods",
                        params=params,
                        headers=headers,
                        timeout=5)
    pods = resp.json()
    grace_period = 0
    if not pods:
        warning("Response does not contain valid json {}", resp)
        return grace_period
    for pod in pods["items"]:
        grace_period = max(pod["spec"]["terminationGracePeriodSeconds"],
                           grace_period)
    return grace_period
Beispiel #44
0
def compensate(calculation_error, compensations, identifier, new_record_weights, partial_count,
               percentage, identifier_versions):
    """
    Compensate for the rounding errors as well as for the fact, that we do not
    allow to bring down the minimal weights lower then minimal possible value
    not to disable traffic from the minimally configured versions (1) and
    we do not allow to add any values to the already disabled versions (0).
    """
    # distribute the error on the versions, other then the current one
    assert partial_count
    part = calculation_error / partial_count
    if part > 0:
        part = int(max(1, part))
    else:
        part = int(min(-1, part))
    # avoid changing the older version distributions
    for i in sorted(new_record_weights.keys(), key=lambda x: identifier_versions[x], reverse=True):
        if i == identifier:
            continue
        new_weight = new_record_weights[i] + part
        if new_weight <= 0:
            # do not remove the traffic from the minimal traffic versions
            continue
        new_record_weights[i] = new_weight
        calculation_error -= part
        compensations[i] = part
        if calculation_error == 0:
            break
    if calculation_error != 0:
        adjusted_percentage = percentage + calculation_error
        compensations[identifier] = calculation_error
        calculation_error = 0
        warning(
            ("Changing given percentage from {} to {} " +
             "because all other versions are already getting the possible minimum traffic").format(
                 percentage / PERCENT_RESOLUTION, adjusted_percentage / PERCENT_RESOLUTION))
        percentage = adjusted_percentage
        new_record_weights[identifier] = percentage
    assert calculation_error == 0
    return percentage
Beispiel #45
0
def gather_user_variables(variables, region, account_info):
    # maximal 32 characters because of the loadbalancer-name
    prompt(
        variables,
        "application_id",
        "Application ID",
        default="hello-world",
        value_proc=check_value(18, "^[a-zA-Z][-a-zA-Z0-9]*$"),
    )
    prompt(variables, "instance_type", "EC2 instance type", default="cache.t2.small")

    sg_name = "redis-{}".format(variables["application_id"])

    rules_missing = check_security_group(sg_name, [("tcp", 6379)], region, allow_from_self=True)
    if ("tcp", 6379) in rules_missing:
        warning(
            "Security group {} does not allow tcp/6379 access, you will not be able to access your redis".format(
                sg_name
            )
        )

    return variables
Beispiel #46
0
    def new_stack(self, image_version: str, keep_stacks: str, new_traffic: int, senza_yaml_path: str,
                  application_version: Optional[str], parameters: List[str]) -> str:
        header = make_header(self.access_token)

        with open(senza_yaml_path) as senza_yaml_file:
            senza_yaml = senza_yaml_file.read()

        data = {'image_version': image_version,
                'keep_stacks': keep_stacks,
                'new_traffic': new_traffic,
                'parameters': parameters,
                'senza_yaml': senza_yaml}

        if application_version:
            data['application_version'] = application_version

        request = self.stacks_url.post(data=json.dumps(data), headers=header, verify=False)
        lizzy_version = request.headers.get('X-Lizzy-Version')
        if lizzy_version and lizzy_version != TARGET_VERSION:
            warning("Version Mismatch (Client: {}, Server: {})".format(TARGET_VERSION, lizzy_version))
        request.raise_for_status()
        stack_info = request.json()
        return stack_info['stack_id']
Beispiel #47
0
def drain_node(node_name: str, config: dict, max_grace_period=60):
    """
    Drains a node for pods. Pods will be terminated with a grace period
    respecting the longest grace period of any pod on the node limited to
    max_grace_period. Default max_grace_period is 60s.
    """
    # respect pod terminate grace period
    grace_period = min(longest_grace_period(node_name, config),
                       max_grace_period)

    for i in range(3):
        try:
            subprocess.check_call([
                'kubectl', '--server', config["api_server"], '--token',
                config["worker_shared_secret"], 'drain', node_name, '--force',
                '--delete-local-data', '--ignore-daemonsets',
                '--grace-period={}'.format(grace_period)
            ])
            break
        except Exception as e:
            warning('Kubectl failed to drain node: {}'.format(e))
            time.sleep(grace_period)
    time.sleep(grace_period)
Beispiel #48
0
    def new_stack(self,
                  image_version: str,
                  keep_stacks: int,
                  new_traffic: int,
                  senza_yaml_path: str,
                  stack_version: Optional[str],
                  application_version: Optional[str],
                  disable_rollback: bool,
                  parameters: List[str]) -> str:
        """
        Requests a new stack.
        """
        header = make_header(self.access_token)

        with open(senza_yaml_path) as senza_yaml_file:
            senza_yaml = senza_yaml_file.read()

        data = {'image_version': image_version,
                'disable_rollback': disable_rollback,
                'keep_stacks': keep_stacks,
                'new_traffic': new_traffic,
                'parameters': parameters,
                'senza_yaml': senza_yaml}

        if application_version:
            data['application_version'] = application_version

        if stack_version:
            data['stack_version'] = stack_version

        request = self.stacks_url.post(data=json.dumps(data, sort_keys=True), headers=header, verify=False)
        lizzy_version = request.headers.get('X-Lizzy-Version')
        if lizzy_version and lizzy_version != TARGET_VERSION:
            warning("Version Mismatch (Client: {}, Server: {})".format(TARGET_VERSION, lizzy_version))
        request.raise_for_status()
        stack_info = request.json()
        return stack_info['stack_id']
Beispiel #49
0
def check_senza_version(current_version: str):
    """
    Checks if senza is updated and prints a warning with instructions to update
    if it's not.
    """
    if not sys.stdout.isatty():
        return
    current_version = LooseVersion(current_version)
    try:
        latest_version = get_latest_version()
    except Exception:
        if sentry is not None:
            sentry.captureException()
        return

    if latest_version is not None and current_version < latest_version:
        if __file__.startswith('/home'):
            # if it's installed in the user folder
            cmd = "pip install --upgrade stups-senza"
        else:
            cmd = "sudo pip install --upgrade stups-senza"
        warning("Your senza version ({current}) is outdated. "
                "Please install the new one using '{cmd}'".format(current=current_version,
                                                                  cmd=cmd))
Beispiel #50
0
def role_ldif(configuration_file, account_name_pattern, saml_user, saml_password):
    '''Print Role-LDIF with Template in Configuration YAML'''
    config = yaml.safe_load(configuration_file)
    accounts = config.get('accounts', {})
    global_cfg = config.get('global', {})
    saml_url = global_cfg.get('saml_identity_provider_url')
    saml_role = global_cfg.get('saml_admin_login_role')
    account_names = []
    for pattern in account_name_pattern:
        account_names.extend(sorted(fnmatch.filter(accounts.keys(), pattern)))

    if os.environ.get('AWS_PROFILE'):
        account_names.append(os.environ.get('AWS_PROFILE'))

    if not account_names:
        print('# No Account set. Try "default"..', file=sys.stderr)
        account_names.append('default')

    print('Render LDIF for following accounts: {}'.format(', '.join(account_names)), file=sys.stderr)
    for account_name in account_names:
        os.environ['AWS_PROFILE'] = account_name
        cfg = accounts.get(account_name) or {}
        for key, val in global_cfg.items():
            if key not in cfg:
                cfg[key] = val
        saml_url = cfg.get('saml_identity_provider_url')
        saml_role = cfg.get('saml_admin_login_role')

        if saml_user and saml_url and saml_role:
            account_alias = cfg.get('alias', account_name).format(account_name=account_name)
            aws_profile = 'sevenseconds-{}'.format(account_name)
            if not get_aws_credentials(saml_user, saml_password, saml_url, saml_role, account_alias, aws_profile):
                warning('Skipping account configuration of {} due to missing credentials'.format(account_name))
                continue
            os.environ['AWS_PROFILE'] = aws_profile
        print(get_role_ldif(cfg))
Beispiel #51
0
def check_senza_version(current_version: str):
    """
    Checks if senza is updated and prints a warning with instructions to update
    if it's not.
    """
    if not sys.stdout.isatty():
        return
    current_version = LooseVersion(current_version)
    try:
        latest_version = get_latest_version()
    except Exception:
        if sentry is not None:
            sentry.captureException()
        return

    if latest_version is not None and current_version < latest_version:
        if __file__.startswith('/home'):
            # if it's installed in the user folder
            cmd = "pip3 install --upgrade stups-senza"
        else:
            cmd = "sudo pip3 install --upgrade stups-senza"
        warning("Your senza version ({current}) is outdated. "
                "Please install the new one using '{cmd}'".format(current=current_version,
                                                                  cmd=cmd))
Beispiel #52
0
def create(definition: dict, version: str, parameter: tuple,
           region: str,
           disable_rollback: bool,
           dry_run: bool,
           force: bool,
           tag: List[str],
           timeout: int,
           keep_stacks: Optional[int],
           traffic: int,
           verbose: bool,
           remote: str,
           parameter_file: Optional[str]
           ):
    """
    Create a new Cloud Formation stack from the given Senza definition file
    """
    lizzy = setup_lizzy_client(remote)
    parameter = list(parameter) or []
    if parameter_file:
        parameter.extend(read_parameter_file(parameter_file))

    if not force:  # pragma: no cover
        # supporting artifact checking would imply copying a large amount of code
        # from senza, so it should be considered out of scope until senza
        # and lizzy client are merged
        warning("WARNING: "
                "Artifact checking is still not supported by lizzy-client.")

    with Action('Requesting new stack..') as action:
        new_stack, output = lizzy.new_stack(keep_stacks, traffic,
                                            definition, version,
                                            disable_rollback, parameter,
                                            region=region,
                                            dry_run=dry_run,
                                            tags=tag)

    stack_id = '{stack_name}-{version}'.format_map(new_stack)
    print(output)

    info('Stack ID: {}'.format(stack_id))

    if dry_run:
        info("Post deployment steps skipped")
        exit(0)

    with Action('Waiting for new stack...') as action:
        if verbose:
            print()  # ensure that new states will not be printed on the same line as the action

        last_state = None
        for state in lizzy.wait_for_deployment(stack_id, region=region):
            if state != last_state and verbose:
                click.echo(' {}'.format(state))
            else:
                action.progress()
            last_state = state

        # TODO be prepared to handle all final AWS CF states
        if last_state == 'ROLLBACK_COMPLETE':
            fatal_error(
                'Stack was rollback after deployment. Check your application log for possible reasons.')
        elif last_state != 'CREATE_COMPLETE':
            fatal_error('Deployment failed: {}'.format(last_state))

    info('Deployment Successful')

    if traffic is not None:
        with Action('Requesting traffic change..'):
            try:
                lizzy.traffic(stack_id, traffic, region=region)
            except requests.ConnectionError as e:
                connection_error(e, fatal=False)
            except requests.HTTPError as e:
                agent_error(e, fatal=False)

    # TODO unit test this
    if keep_stacks is not None:
        versions_to_keep = keep_stacks + 1
        stacks_to_remove_counter = 1
        end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=timeout)
        while stacks_to_remove_counter > 0 and datetime.datetime.utcnow() <= end_time:
            try:
                all_stacks = lizzy.get_stacks([new_stack['stack_name']],
                                              region=region)
            except requests.ConnectionError as e:
                connection_error(e, fatal=False)
                error("Failed to fetch old stacks. "
                      "Old stacks WILL NOT BE DELETED")
                exit(1)
            except requests.HTTPError as e:
                agent_error(e, fatal=False)
                error("Failed to fetch old stacks. "
                      "Old stacks WILL NOT BE DELETED")
                exit(1)
            else:
                sorted_stacks = sorted(all_stacks,
                                       key=lambda stack: stack['creation_time'])
                stacks_to_remove = sorted_stacks[:-versions_to_keep]
                stacks_to_remove_counter = len(stacks_to_remove)
                with Action('Deleting old stacks..'):
                    print()
                    for old_stack in stacks_to_remove:
                        old_stack_id = '{stack_name}-{version}'.format_map(
                            old_stack)
                        if old_stack['status'] in COMPLETE_STATES:
                            click.echo(' {}'.format(old_stack_id))
                            try:
                                lizzy.delete(old_stack_id, region=region)
                                stacks_to_remove_counter -= 1
                            except requests.ConnectionError as e:
                                connection_error(e, fatal=False)
                            except requests.HTTPError as e:
                                agent_error(e, fatal=False)
                        else:
                            click.echo(' > {} current status is {} trying '
                                       'again later'.format(old_stack_id,
                                                            old_stack['status']))
                if stacks_to_remove_counter > 0:
                    time.sleep(5)

        if datetime.datetime.utcnow() > end_time:
            click.echo('Timeout waiting for related stacks to be ready.')
Beispiel #53
0
def configure_iam(account_name: str, dns_domain: str, cfg):
    # NOTE: hardcoded region as IAM is region-independent
    conn = boto.iam.connect_to_region("eu-west-1")
    iam = boto3.resource("iam")

    roles = cfg.get("roles", {})

    account_id = get_account_id()

    info("Account ID is {}".format(account_id))

    for role_name, role_cfg in sorted(roles.items()):
        with Action("Checking role {role_name}..", **vars()) as act:
            try:
                res = conn.get_role_policy(role_name, role_name)["get_role_policy_response"]["get_role_policy_result"]
                current_policy = json.loads(urllib.parse.unquote(res["policy_document"]))
                approved_policy = role_cfg["policy"]
                res = conn.list_role_policies(role_name)
                policy_names = res["list_role_policies_response"]["list_role_policies_result"]["policy_names"]
                if (
                    current_policy == approved_policy
                    and len(policy_names) == 1
                    and len(list(iam.Role(role_name).attached_policies.all())) == 0
                ):
                    continue
                else:
                    act.error("missmatch")
            except:
                act.error("Failed")
        try:
            conn.get_role(role_name)
        except:
            with Action("Creating role {role_name}..", **vars()):
                policy_document = json.dumps(role_cfg.get("assume_role_policy")).replace("{account_id}", account_id)
                conn.create_role(role_name, policy_document, "/")
        with Action("Updating policy for role {role_name}..", **vars()):
            conn.put_role_policy(role_name, role_name, json.dumps(role_cfg["policy"]))
        with Action("Removing invalid policies from role {role_name}..", **vars()):
            res = conn.list_role_policies(role_name)
            policy_names = res["list_role_policies_response"]["list_role_policies_result"]["policy_names"]
            for policy_name in policy_names:
                if policy_name != role_name:
                    warning("Delete {} from {}".format(policy_name, role_name))
                    conn.delete_role_policy(role_name, policy_name)
            for policy in iam.Role(role_name).attached_policies.all():
                warning("Detach {} from {}".format(policy.policy_name, role_name))
                policy.detach_role(RoleName=role_name)

    res = conn.list_saml_providers()["list_saml_providers_response"]["list_saml_providers_result"]
    saml_providers = res["saml_provider_list"]
    for name, url in cfg.get("saml_providers", {}).items():
        arn = "arn:aws:iam::{account_id}:saml-provider/{name}".format(account_id=account_id, name=name)
        found = False
        for provider in saml_providers:
            if provider["arn"] == arn:
                found = True
        if found:
            info("Found existing SAML provider {name}".format(name=name))
        else:
            with Action("Creating SAML provider {name}..", **vars()):
                r = requests.get(url)
                saml_metadata_document = r.text
                conn.create_saml_provider(saml_metadata_document, name)

    cert_name = dns_domain.replace(".", "-")
    certs = conn.list_server_certs()["list_server_certificates_response"]["list_server_certificates_result"]
    certs = certs["server_certificate_metadata_list"]
    cert_names = [d["server_certificate_name"] for d in certs]
    info("Found existing SSL certs: {}".format(", ".join(cert_names)))
    if cert_name not in cert_names:
        with Action("Uploading SSL server certificate..") as act:
            dir = os.environ.get("SSLDIR")
            if dir and os.path.isdir(dir):
                dir += "/"
            else:
                dir = ""
            file = dir + "_." + dns_domain
            try:
                with open(file + ".crt") as fd:
                    cert_body = fd.read()
                if os.path.isfile(file + ".key") and os.path.getsize(file + ".key") > 0:
                    with open(file + ".key") as fd:
                        private_key = fd.read()
                elif os.path.isfile(file + ".key.gpg") and os.path.getsize(file + ".key.gpg") > 0:
                    gpg = gnupg.GPG()
                    with open(file + ".key.gpg", "rb") as fd:
                        gpg_obj = gpg.decrypt_file(fd)
                    if gpg_obj.ok:
                        private_key = gpg_obj.data
                    else:
                        act.error("decryption error: {}".format(gpg_obj.stderr))
                        return
                with open(dir + "trusted_chain.pem") as fd:
                    cert_chain = fd.read()
                conn.upload_server_cert(cert_name, cert_body=cert_body, private_key=private_key, cert_chain=cert_chain)
            except FileNotFoundError as e:
                act.error("Could not upload SSL cert: {}".format(e))
Beispiel #54
0
def gather_user_variables(variables, region, account_info):
    defaults = set_default_variables(dict())

    if click.confirm('Do you want to set the docker image now? [No]'):
        prompt(variables,
               "docker_image",
               "Docker Image Version",
               default=get_latest_image())

    prompt(variables,
           'wal_s3_bucket',
           'Postgres WAL S3 bucket to use',
           default='{}-{}-spilo-app'.format(get_account_alias(), region))

    prompt(variables,
           'instance_type',
           'EC2 instance type',
           default='t2.medium')

    variables['hosted_zone'] = account_info.Domain or defaults['hosted_zone']
    if (variables['hosted_zone'][-1:] != '.'):
        variables['hosted_zone'] += '.'
    prompt(variables,
           'discovery_domain',
           'ETCD Discovery Domain',
           default='postgres.' + variables['hosted_zone'][:-1])

    variables['add_replica_loadbalancer'] = click.confirm(
        'Do you want a replica ELB?', default=False)

    prompt(variables,
           'elb_access_cidr',
           'Which network should be allowed to access the ELB'
           's? (default=vpc)',
           default=get_vpc_attribute(region=region,
                                     vpc_id=account_info.VpcID,
                                     attribute='cidr_block'))

    odd_sg_name = 'Odd (SSH Bastion Host)'
    odd_sg = get_security_group(region, odd_sg_name)
    if odd_sg and click.confirm(
            'Do you want to allow access to the Spilo nodes from {}?'.format(
                odd_sg_name),
            default=True):
        variables['odd_sg_id'] = odd_sg.group_id

    # Find all Security Groups attached to the zmon worker with 'zmon' in their name
    ec2 = boto3.client('ec2', region)
    filters = [{
        'Name': 'tag-key',
        'Values': ['StackName']
    }, {
        'Name': 'tag-value',
        'Values': ['zmon-worker']
    }]
    zmon_sgs = list()
    for reservation in ec2.describe_instances(Filters=filters).get(
            'Reservations', []):
        for instance in reservation.get('Instances', []):
            zmon_sgs += [
                sg['GroupId'] for sg in instance.get('SecurityGroups', [])
                if 'zmon' in sg['GroupName']
            ]

    if len(zmon_sgs) == 0:
        warning('Could not find zmon security group')
    else:
        click.confirm(
            'Do you want to allow access to the Spilo nodes from zmon?',
            default=True)
        if len(zmon_sgs) > 1:
            prompt(
                variables, 'zmon_sg_id',
                'Which Security Group should we allow access from? {}'.format(
                    zmon_sgs))
        else:
            variables['zmon_sg_id'] = zmon_sgs[0]

    if variables['instance_type'].lower().split('.')[0] in ('c3', 'g2', 'hi1',
                                                            'i2', 'm3', 'r3'):
        variables['use_ebs'] = click.confirm(
            'Do you want database data directory on external (EBS) storage? [Yes]',
            default=defaults['use_ebs'])
    else:
        variables['use_ebs'] = True

    if variables['use_ebs']:
        prompt(variables,
               'volume_size',
               'Database volume size (GB, 10 or more)',
               default=defaults['volume_size'])
        prompt(variables,
               'volume_type',
               'Database volume type (gp2, io1 or standard)',
               default=defaults['volume_type'])
        if variables['volume_type'] == 'io1':
            pio_max = variables['volume_size'] * 30
            prompt(variables,
                   "volume_iops",
                   'Provisioned I/O operations per second (100 - {0})'.format(
                       pio_max),
                   default=str(pio_max))
        prompt(variables,
               "snapshot_id",
               "ID of the snapshot to populate EBS volume from",
               default="")
        if ebs_optimized_supported(variables['instance_type']):
            variables['ebs_optimized'] = True
    prompt(variables,
           "fstype",
           "Filesystem for the data partition",
           default=defaults['fstype'])
    prompt(variables,
           "fsoptions",
           "Filesystem mount options (comma-separated)",
           default=defaults['fsoptions'])
    prompt(variables, "scalyr_account_key",
           "Account key for your scalyr account", "")

    prompt(variables,
           'pgpassword_superuser',
           "Password for PostgreSQL superuser [random]",
           show_default=False,
           default=generate_random_password,
           hide_input=True,
           confirmation_prompt=True)
    prompt(variables,
           'pgpassword_standby',
           "Password for PostgreSQL user standby [random]",
           show_default=False,
           default=generate_random_password,
           hide_input=True,
           confirmation_prompt=True)
    prompt(variables,
           'pgpassword_admin',
           "Password for PostgreSQL user admin",
           show_default=True,
           default=defaults['pgpassword_admin'],
           hide_input=True,
           confirmation_prompt=True)

    if click.confirm('Do you wish to encrypt these passwords using KMS?',
                     default=False):
        kms_keys = [
            k for k in list_kms_keys(region)
            if 'alias/aws/ebs' not in k['aliases']
        ]

        if len(kms_keys) == 0:
            raise click.UsageError(
                'No KMS key is available for encrypting and decrypting. '
                'Ensure you have at least 1 key available.')

        options = [
            '{}: {}'.format(k['KeyId'], k['Description']) for k in kms_keys
        ]
        kms_key = choice(prompt='Please select the encryption key',
                         options=options)
        kms_keyid = kms_key.split(':')[0]

        variables['kms_arn'] = [
            k['Arn'] for k in kms_keys if k['KeyId'] == kms_keyid
        ][0]

        for key in [
                k for k in variables
                if k.startswith('pgpassword_') or k == 'scalyr_account_key'
        ]:
            encrypted = encrypt(region=region,
                                KeyId=kms_keyid,
                                Plaintext=variables[key],
                                b64encode=True)
            variables[key] = 'aws:kms:{}'.format(encrypted)

    set_default_variables(variables)

    check_s3_bucket(variables['wal_s3_bucket'], region)

    return variables
Beispiel #55
0
def _request_access(even_url, cacert, username, hostname, reason, remote_host,
                    lifetime, clip, connect, tunnel):
    data = {'username': username, 'hostname': hostname, 'reason': reason}
    host_via = hostname
    if remote_host:
        data['remote_host'] = remote_host
        host_via = '{} via {}'.format(remote_host, hostname)
    if lifetime:
        data['lifetime_minutes'] = lifetime
    try:
        access_token = zign.api.get_token("piu", ['uid'])
    except zign.api.ServerError as e:
        click.secho('{}'.format(e), fg='red', bold=True)
        return 500

    click.secho('Requesting access to host {host_via} for {username}..'.format(
        host_via=host_via, username=username),
                bold=True)
    r = requests.post(even_url,
                      headers={
                          'Content-Type': 'application/json',
                          'Authorization': 'Bearer {}'.format(access_token)
                      },
                      data=json.dumps(data),
                      verify=cacert)
    if r.status_code == 200:
        click.secho(r.text, fg='green', bold=True)
        ssh_command = ''
        if remote_host:
            ssh_command = 'ssh -o StrictHostKeyChecking=no {username}@{remote_host}'.format(
                **vars())
            if tunnel:
                ports = tunnel.split(':')
                ssh_command = '-L {local_port}:{remote_host}:{remote_port}'.format(
                    local_port=ports[0],
                    remote_host=remote_host,
                    remote_port=ports[1])
        command = 'ssh -tA {username}@{hostname} {ssh_command}'.format(
            username=username, hostname=hostname, ssh_command=ssh_command)
        if connect or tunnel:
            subprocess.call(command.split())

        if not ssh_keys_added():
            warning(
                "No SSH identities found. Please add one using ssh-add, for example:"
            )
            warning('ssh-add ~/.ssh/id_rsa')

        click.secho('You can access your server with the following command:')
        click.secho(command)

        if clip:
            click.secho(
                '\nOr just check your clipboard and run ctrl/command + v (requires package "xclip" on Linux)'
            )
            if pyperclip is not None:
                pyperclip.copy(command)
    else:
        click.secho('Server returned status {code}: {text}'.format(
            code=r.status_code, text=r.text),
                    fg='red',
                    bold=True)
    return r.status_code
Beispiel #56
0
def gather_user_variables(variables, region, account_info):
    if click.confirm('Do you want to set the docker image now? [No]'):
        prompt(variables,
               "docker_image",
               "Docker Image Version",
               default=get_latest_spilo_image())
    else:
        variables['docker_image'] = None
    prompt(variables,
           'wal_s3_bucket',
           'Postgres WAL S3 bucket to use',
           default='{}-{}-spilo-app'.format(get_account_alias(), region))
    prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro')
    variables['hosted_zone'] = account_info.Domain or 'example.com'
    if (variables['hosted_zone'][-1:] != '.'):
        variables['hosted_zone'] += '.'
    prompt(variables,
           'discovery_domain',
           'ETCD Discovery Domain',
           default='postgres.' + variables['hosted_zone'][:-1])
    if variables['instance_type'].lower().split('.')[0] in ('c3', 'g2', 'hi1',
                                                            'i2', 'm3', 'r3'):
        variables['use_ebs'] = click.confirm(
            'Do you want database data directory on external (EBS) storage? [Yes]',
            default=True)
    else:
        variables['use_ebs'] = True
    variables['ebs_optimized'] = None
    variables['volume_iops'] = None
    variables['snapshot_id'] = None
    if variables['use_ebs']:
        prompt(variables,
               'volume_size',
               'Database volume size (GB, 10 or more)',
               default=10)
        prompt(variables,
               'volume_type',
               'Database volume type (gp2, io1 or standard)',
               default='gp2')
        if variables['volume_type'] == 'io1':
            pio_max = variables['volume_size'] * 30
            prompt(variables,
                   "volume_iops",
                   'Provisioned I/O operations per second (100 - {0})'.format(
                       pio_max),
                   default=str(pio_max))
        prompt(variables,
               "snapshot_id",
               "ID of the snapshot to populate EBS volume from",
               default="")
        if ebs_optimized_supported(variables['instance_type']):
            variables['ebs_optimized'] = True
    prompt(variables,
           "fstype",
           "Filesystem for the data partition",
           default="ext4")
    prompt(variables,
           "fsoptions",
           "Filesystem mount options (comma-separated)",
           default="noatime,nodiratime,nobarrier")
    prompt(variables, "scalyr_account_key",
           "Account key for your scalyr account", "")

    variables['postgres_port'] = POSTGRES_PORT
    variables['healthcheck_port'] = HEALTHCHECK_PORT

    sg_name = 'app-spilo'
    rules_missing = check_security_group(sg_name, [('tcp', 22),
                                                   ('tcp', POSTGRES_PORT),
                                                   ('tcp', HEALTHCHECK_PORT)],
                                         region,
                                         allow_from_self=True)

    if ('tcp', 22) in rules_missing:
        warning(
            'Security group {} does not allow SSH access, you will not be able to ssh into your servers'
            .format(sg_name))

    if ('tcp', POSTGRES_PORT) in rules_missing:
        error(
            'Security group {} does not allow inbound TCP traffic on the default postgres port ({})'
            .format(sg_name, POSTGRES_PORT))

    if ('tcp', HEALTHCHECK_PORT) in rules_missing:
        error(
            'Security group {} does not allow inbound TCP traffic on the default health check port ({})'
            .format(sg_name, HEALTHCHECK_PORT))
    variables['spilo_sg_id'] = get_security_group(region, sg_name).id

    check_s3_bucket(variables['wal_s3_bucket'], region)

    return variables
Beispiel #57
0
def configure_bastion_host(account_name: str, dns_domain: str, ec2_conn, subnets: list, cfg: dict, vpc_net: IPNetwork):
    try:
        subnet = list(filter_subnets(subnets, "dmz"))[0]
    except:
        warning("No DMZ subnet found")

    az_name = subnet.availability_zone
    sg_name = "Odd (SSH Bastion Host)"
    sg = [group for group in ec2_conn.get_all_security_groups() if group.name == sg_name]
    if not sg:
        sg = ec2_conn.create_security_group(sg_name, "Allow SSH access to the bastion host", vpc_id=subnet.vpc_id)
        # We are to fast for AWS (InvalidGroup.NotFound)
        time.sleep(2)
        sg.add_tags({"Name": sg_name})

        sg.authorize(ip_protocol="tcp", from_port=22, to_port=22, cidr_ip="0.0.0.0/0")
        sg.authorize(ip_protocol="tcp", from_port=2222, to_port=2222, cidr_ip="0.0.0.0/0")
    else:
        sg = sg[0]

    instances = ec2_conn.get_only_instances(filters={"tag:Name": sg_name, "instance-state-name": "running"})
    re_deploy = cfg.get("re_deploy")
    if instances and re_deploy:
        for instance in instances:
            drop_bastionhost(instance)
        instances = None
    if instances:
        instance = instances[0]
        ip = instance.ip_address
    else:
        with Action("Launching SSH Bastion instance in {az_name}..", az_name=az_name) as act:
            config = substitute_template_vars(
                cfg.get("ami_config"), {"account_name": account_name, "vpc_net": str(vpc_net)}
            )
            user_data = "#taupage-ami-config\n{}".format(yaml.safe_dump(config))

            res = ec2_conn.run_instances(
                cfg.get("ami_id"),
                subnet_id=subnet.id,
                instance_type=cfg.get("instance_type", "t2.micro"),
                security_group_ids=[sg.id],
                user_data=user_data.encode("utf-8"),
                key_name=cfg.get("key_name"),
                disable_api_termination=True,
                monitoring_enabled=True,
            )
            instance = res.instances[0]

            status = instance.update()
            while status == "pending":
                time.sleep(5)
                status = instance.update()
                act.progress()

            if status == "running":
                instance.add_tag("Name", sg_name)

        with Action("Associating Elastic IP.."):
            addr = None
            for _addr in ec2_conn.get_all_addresses():
                if not _addr.instance_id:
                    # use existing Elastic IP (e.g. to re-use IP from previous bastion host)
                    addr = _addr
            if not addr:
                addr = ec2_conn.allocate_address("vpc")
            addr.associate(instance.id)
        ip = addr.public_ip
    info("SSH Bastion instance is running with public IP {}".format(ip))
    try:
        ec2_conn.revoke_security_group_egress(sg.id, -1, from_port=-1, to_port=-1, cidr_ip="0.0.0.0/0")
    except boto.exception.EC2ResponseError as e:
        if "rule does not exist" not in e.message:
            raise
    rules = [
        # allow ALL connections to our internal EC2 instances
        ("tcp", 0, 65535, vpc_net),
        # allow HTTPS to the internet (actually only needed for SSH access service)
        ("tcp", 443, 443, "0.0.0.0/0"),
        # allow pings
        ("icmp", -1, -1, "0.0.0.0/0"),
        # allow DNS
        ("udp", 53, 53, "0.0.0.0/0"),
        ("tcp", 53, 53, "0.0.0.0/0"),
    ]
    for proto, from_port, to_port, cidr in rules:
        try:
            ec2_conn.authorize_security_group_egress(
                sg.id, ip_protocol=proto, from_port=from_port, to_port=to_port, cidr_ip=cidr
            )
        except boto.exception.EC2ResponseError as e:
            if "already exists" not in e.message:
                raise
    dns = "odd-{}.{}.".format(az_name[:-1], dns_domain)
    with Action("Adding DNS record {}".format(dns)):
        dns_conn = boto.route53.connect_to_region("eu-west-1")
        zone = dns_conn.get_zone(dns_domain + ".")
        rr = zone.get_records()
        change = rr.add_change("UPSERT", dns, "A")
        change.add_value(ip)
        rr.commit()

    launch_time = dateutil.parser.parse(instance.launch_time)
    if (
        not wait_for_ssh_port(ip, 60)
        and datetime.timedelta(hours=1) < datetime.datetime.now(launch_time.tzinfo) - launch_time
    ):
        error("Bastion Host does not response. Drop Bastionhost and create new one")
        drop_bastionhost(instance)
        configure_bastion_host(account_name, dns_domain, ec2_conn, subnets, cfg, vpc_net)
Beispiel #58
0
def component_elastic_load_balancer(definition, configuration, args, info,
                                    force, account_info):
    lb_name = configuration["Name"]

    # domains pointing to the load balancer
    main_zone = None
    for name, domain in configuration.get('Domains', {}).items():
        name = '{}{}'.format(lb_name, name)
        definition["Resources"][name] = {
            "Type": "AWS::Route53::RecordSet",
            "Properties": {
                "Type": "CNAME",
                "TTL": 20,
                "ResourceRecords": [{
                    "Fn::GetAtt": [lb_name, "DNSName"]
                }],
                "Name": "{0}.{1}".format(domain["Subdomain"], domain["Zone"]),
                "HostedZoneName": "{0}".format(domain["Zone"])
            },
        }

        if domain["Type"] == "weighted":
            definition["Resources"][name]["Properties"]['Weight'] = 0
            definition["Resources"][name]["Properties"][
                'SetIdentifier'] = "{0}-{1}".format(info["StackName"],
                                                    info["StackVersion"])
            main_zone = domain['Zone']

    ssl_cert = configuration.get('SSLCertificateId')

    pattern = None
    if not ssl_cert:
        if main_zone:
            pattern = main_zone.lower().rstrip('.').replace('.', '-')
        else:
            pattern = ''
    elif not ssl_cert.startswith('arn:'):
        pattern = ssl_cert

    if pattern is not None:
        ssl_cert = find_ssl_certificate_arn(args.region, pattern)

        if not ssl_cert:
            fatal_error(
                'Could not find any matching SSL certificate for "{}"'.format(
                    pattern))

    health_check_protocol = "HTTP"
    allowed_health_check_protocols = ("HTTP", "TCP", "UDP", "SSL")
    if "HealthCheckProtocol" in configuration:
        health_check_protocol = configuration["HealthCheckProtocol"]

    if health_check_protocol not in allowed_health_check_protocols:
        raise click.UsageError(
            'Protocol "{}" is not supported for LoadBalancer'.format(
                health_check_protocol))

    health_check_path = "/ui/"
    if "HealthCheckPath" in configuration:
        health_check_path = configuration["HealthCheckPath"]

    health_check_port = configuration["HTTPPort"]
    if "HealthCheckPort" in configuration:
        health_check_port = configuration["HealthCheckPort"]

    health_check_target = "{0}:{1}{2}".format(health_check_protocol,
                                              health_check_port,
                                              health_check_path)
    if configuration.get('NameSuffix'):
        loadbalancer_name = get_load_balancer_name(
            info["StackName"], '{}-{}'.format(info["StackVersion"],
                                              configuration['NameSuffix']))
        del (configuration['NameSuffix'])
    elif configuration.get('NameSufix'):
        # get rid of this branch (typo) as soon as possible
        # https://github.com/zalando/planb-revocation/issues/29
        warning(
            'The "NameSufix" property is deprecated. Use "NameSuffix" instead.'
        )
        loadbalancer_name = get_load_balancer_name(
            info["StackName"], '{}-{}'.format(info["StackVersion"],
                                              configuration['NameSufix']))
        del (configuration['NameSufix'])
    else:
        loadbalancer_name = get_load_balancer_name(info["StackName"],
                                                   info["StackVersion"])

    loadbalancer_scheme = "internal"
    allowed_loadbalancer_schemes = ("internet-facing", "internal")
    if "Scheme" in configuration:
        loadbalancer_scheme = configuration["Scheme"]
    else:
        configuration["Scheme"] = loadbalancer_scheme

    if loadbalancer_scheme == 'internet-facing':
        click.secho(
            'You are deploying an internet-facing ELB that will be publicly accessible! '
            + 'You should have OAUTH2 and HTTPS in place!',
            fg='red',
            bold=True,
            err=True)

    if loadbalancer_scheme not in allowed_loadbalancer_schemes:
        raise click.UsageError(
            'Scheme "{}" is not supported for LoadBalancer'.format(
                loadbalancer_scheme))

    if loadbalancer_scheme == "internal":
        loadbalancer_subnet_map = "LoadBalancerInternalSubnets"
    else:
        loadbalancer_subnet_map = "LoadBalancerSubnets"

    # load balancer
    definition["Resources"][lb_name] = {
        "Type": "AWS::ElasticLoadBalancing::LoadBalancer",
        "Properties": {
            "Subnets": {
                "Fn::FindInMap":
                [loadbalancer_subnet_map, {
                    "Ref": "AWS::Region"
                }, "Subnets"]
            },
            "HealthCheck": {
                "HealthyThreshold": "2",
                "UnhealthyThreshold": "2",
                "Interval": "10",
                "Timeout": "5",
                "Target": health_check_target
            },
            "Listeners": [{
                "PolicyNames": [],
                "SSLCertificateId": ssl_cert,
                "Protocol": "HTTPS",
                "InstancePort": configuration["HTTPPort"],
                "LoadBalancerPort": 443
            }],
            "ConnectionDrainingPolicy": {
                "Enabled": True,
                "Timeout": 60
            },
            "CrossZone":
            "true",
            "LoadBalancerName":
            loadbalancer_name,
            "SecurityGroups":
            resolve_security_groups(configuration["SecurityGroups"],
                                    args.region),
            "Tags": [
                # Tag "Name"
                {
                    "Key":
                    "Name",
                    "Value":
                    "{0}-{1}".format(info["StackName"], info["StackVersion"])
                },
                # Tag "StackName"
                {
                    "Key": "StackName",
                    "Value": info["StackName"],
                },
                # Tag "StackVersion"
                {
                    "Key": "StackVersion",
                    "Value": info["StackVersion"]
                }
            ]
        }
    }
    for key, val in configuration.items():
        # overwrite any specified properties, but
        # ignore our special Senza properties as they are not supported by CF
        if key not in SENZA_PROPERTIES:
            definition['Resources'][lb_name]['Properties'][key] = val

    return definition