def main():
    configure_logging()
    config = get_config()

    if not contains_valid_dockercfg(config):
        sys.exit(0)

    logging.info('Writing dockercfg')

    try:
        path = os.path.expanduser('~/.dockercfg')
        write_file(path, json.dumps(config.get('dockercfg')))

        directory = os.path.expanduser('~/.docker')
        if not os.path.exists(directory):
            os.makedirs(directory)
        path = os.path.expanduser('~/.docker/config.json')
        if os.path.exists(path):
            #load
            data = json.loads(open(path).read())
            existing = config.get('dockercfg', {})
            #merge
            data['auths'].update(existing)
            #write
            write_file(path, json.dumps(data))
        else:
            write_file(path, json.dumps( {
                'auths' : config.get('dockercfg', {})
            }))

        logging.info('Successfully placed dockercfg')
    except Exception as e:
        logging.error('Failed to create dockercfg')
        logging.exception(e)
        sys.exit(1)
def main():
    configure_logging()
    config = get_config()

    if not contains_cloudwatch_logs_config(config):
        logging.info('Cloudwatch Logs Agent disabled by configuration')
        sys.exit(0)

    logging.info('Configuring Cloudwatch Logs Agent')

    # identity = {'region': 'eu-west-1', 'accountId': 123456, 'instanceId': 'i-123'}
    identity = boto.utils.get_instance_identity()['document']

    environment = {
        'observed_log_files': config.get('cloudwatch_logs'),
        'application_id': config.get('application_id'),
        'application_version': config.get('application_version'),
        'region': identity['region'],
        'account_id': identity['accountId'],
        'instance_id': identity['instanceId']
    }

    try:
        write_file('/var/awslogs/etc/aws.conf', render_template(AWS_CONFIG_TEMPLATE, environment))
        write_file('/var/awslogs/etc/awslogs.conf', render_template(AWSLOGS_CONFIG_TEMPLATE, environment))

        start_awslogs_service()

        logging.info('Successfully configured and started Cloudwatch Logs Agent')
    except Exception as e:
        logging.error('Failed to configure Cloudwatch Logs Agent')
        logging.exception(e)
        sys.exit(1)
Example #3
0
def main():
    """Configure values for white-listed sysfs paths"""
    SYSFS_WHITELIST = ['/sys/kernel/mm/transparent_hugepage/enabled']

    configure_logging()
    config = get_config()

    sysfs = config.get('sysfs')

    if sysfs is None:
        sys.exit(0)

    disallowed_paths = set(sysfs.keys()) - set(SYSFS_WHITELIST)
    if disallowed_paths:
        logging.error('You are not allowed to edit the sysfs path(s) {}'.format(list(disallowed_paths)))

    # Sanitize our dict first
    clean_sysfs = {key: value for (key, value) in sysfs.items()
                   if key not in disallowed_paths}

    try:
        for key, value in clean_sysfs.items():
            with open(key, 'w') as file:
                file.write(value + '\n')
        logging.info('Successfully written allowed sysfs paths')
    except Exception as e:
        logging.error('Failed to write sysfs paths')
        logging.exception(e)
        sys.exit(1)
Example #4
0
def main():
    """Configure custom sysctl parameters

    If a sysctl section is present, add the valid parameters to sysctl and reloads.
    """
    CUSTOM_SYSCTL_CONF = '/etc/sysctl.d/99-custom.conf'

    configure_logging()
    config = get_config()

    sysctl = config.get('sysctl')

    if sysctl is None:
        sys.exit(0)

    try:
        sysctl_entries = ['{} = {}'.format(key, value) for key, value in sysctl.items()]
        with open(CUSTOM_SYSCTL_CONF, 'w') as file:
            file.write('\n'.join(sysctl_entries)+'\n')
        logging.info('Successfully written sysctl parameters')
    except Exception as e:
        logging.error('Failed to write sysctl parameters')
        logging.exception(e)
        sys.exit(1)

    try:
        exitcode = subprocess.call(['/sbin/sysctl', '-p', CUSTOM_SYSCTL_CONF])
        if exitcode != 0:
            logging.error('Reloading sysctl failed with exitcode {}'.format(exitcode))
            sys.exit(1)
        logging.info('Successfully reloaded sysctl parameters')
    except Exception as e:
        logging.error('Failed to reload sysctl')
        logging.exception(e)
        sys.exit(1)
Example #5
0
def main():
    """Configure values for white-listed sysfs paths"""
    SYSFS_WHITELIST = ['/sys/kernel/mm/transparent_hugepage/enabled']

    configure_logging()
    config = get_config()

    sysfs = config.get('sysfs')

    if sysfs is None:
        sys.exit(0)

    disallowed_paths = set(sysfs.keys()) - set(SYSFS_WHITELIST)
    if disallowed_paths:
        logging.error(
            'You are not allowed to edit the sysfs path(s) {}'.format(
                list(disallowed_paths)))

    # Sanitize our dict first
    clean_sysfs = {
        key: value
        for (key, value) in sysfs.items() if key not in disallowed_paths
    }

    try:
        for key, value in clean_sysfs.items():
            with open(key, 'w') as file:
                file.write(value + '\n')
        logging.info('Successfully written allowed sysfs paths')
    except Exception as e:
        logging.error('Failed to write sysfs paths')
        logging.exception(e)
        sys.exit(1)
Example #6
0
def main():
    configure_logging()
    config = get_config()

    telegraf_config = get_telegraf_config(config)
    if not telegraf_config:
        logging.info('Telegraf disabled by configuration')
        sys.exit(0)

    logging.info('Configuring Telegraf')

    # identity = {'region': 'eu-west-1', 'accountId': 123456, 'instanceId': 'i-123'}
    identity = boto.utils.get_instance_identity()['document']

    environment = {
        'application_id': config.get('application_id'),
        'application_version': config.get('application_version'),
        'region': identity['region'],
        'instance_id': identity['instanceId']
    }

    environment.update(telegraf_config)

    try:
        write_file('/etc/telegraf/telegraf.conf',
                   render_template(CONFIG_TEMPLATE, environment))

        start_telegraf_service()

        logging.info('Successfully configured and started Telegraf')
        sys.exit(0)
    except Exception as e:
        logging.error('Failed to configure Telegraf')
        logging.exception(e)
        sys.exit(0)
def main():
    configure_logging()
    config = get_config()

    telegraf_config = get_telegraf_config(config)
    if not telegraf_config:
        logging.info('Telegraf disabled by configuration')
        sys.exit(0)

    logging.info('Configuring Telegraf')

    # identity = {'region': 'eu-west-1', 'accountId': 123456, 'instanceId': 'i-123'}
    identity = boto.utils.get_instance_identity()['document']

    environment = {
        'application_id': config.get('application_id'),
        'application_version': config.get('application_version'),
        'region': identity['region'],
        'instance_id': identity['instanceId']
    }

    environment.update(telegraf_config)

    try:
        write_file('/etc/telegraf/telegraf.conf', render_template(CONFIG_TEMPLATE, environment))

        start_telegraf_service()

        logging.info('Successfully configured and started Telegraf')
        sys.exit(0)
    except Exception as e:
        logging.error('Failed to configure Telegraf')
        logging.exception(e)
        sys.exit(0)
Example #8
0
def main():
    configure_logging()

    config = get_config()

    instance_logs_url = config.get('instance_logs_url')

    if not instance_logs_url:
        logging.warn('No endpoint for instance logs configured.')
        return

    # identity = {'region': 'eu-west-1', 'accountId': 123456, 'instanceId': 'i-123'}
    identity = boto.utils.get_instance_identity()['document']

    region = identity['region']
    account_id = identity['accountId']
    instance_id = identity['instanceId']

    boot_time = get_boot_time()

    is_shutdown = False
    if len(sys.argv) > 1:
        is_shutdown = sys.argv[1] == '--shutdown'

    while True:
        for fn in glob.glob('/var/log/audit.log.*.gz'):
            push_audit_log(config, instance_logs_url, account_id, region, instance_id, boot_time, fn)
        if is_shutdown:
            for fn in glob.glob('/var/log/audit.log'):
                push_audit_log(config, instance_logs_url, account_id, region, instance_id, boot_time, fn, compress=True)
            return
        time.sleep(60)
Example #9
0
def main():
    configure_logging()
    config = get_config()

    if not contains_valid_dockercfg(config):
        sys.exit(0)

    logging.info('Writing dockercfg')

    try:
        path = os.path.expanduser('~/.dockercfg')
        write_file(path, json.dumps(config.get('dockercfg')))

        directory = os.path.expanduser('~/.docker')
        if not os.path.exists(directory):
            os.makedirs(directory)
        path = os.path.expanduser('~/.docker/config.json')
        if os.path.exists(path):
            # load
            data = json.loads(open(path).read())
            existing = config.get('dockercfg', {})
            # merge
            data['auths'].update(existing)
            # write
            write_file(path, json.dumps(data))
        else:
            write_file(path, json.dumps({
                'auths': config.get('dockercfg', {})
            }))

        logging.info('Successfully placed dockercfg')
    except Exception as e:
        logging.error('Failed to create dockercfg')
        logging.exception(e)
        sys.exit(1)
Example #10
0
def main():
    configure_logging()

    config = get_config()

    instance_logs_url = config.get('instance_logs_url')

    if not instance_logs_url:
        logging.warn('No endpoint for instance logs configured.')
        return

    # identity = {'region': 'eu-west-1', 'accountId': 123456, 'instanceId': 'i-123'}
    identity = boto.utils.get_instance_identity()['document']

    region = identity['region']
    account_id = identity['accountId']
    instance_id = identity['instanceId']

    boot_time = get_boot_time()

    is_shutdown = False
    if len(sys.argv) > 1:
        is_shutdown = sys.argv[1] == '--shutdown'

    while True:
        for fn in glob.glob('/var/log/audit.log.*.gz'):
            push_audit_log(config, instance_logs_url, account_id, region, instance_id, boot_time, fn)
        if is_shutdown:
            for fn in glob.glob('/var/log/audit.log'):
                push_audit_log(config, instance_logs_url, account_id, region, instance_id, boot_time, fn, compress=True)
            return
        rtime = random.randrange(60, 3000)
        time.sleep(rtime)
Example #11
0
def main():
    configure_logging()
    config = get_config()

    instance_logs_url = config.get('instance_logs_url')

    if instance_logs_url:
        userAndPass = b64encode(
            bytes('{}:{}'.format(config.get('logsink_username'),
                                 config.get('logsink_password')),
                  encoding='ascii')).decode("ascii") or ''

        # identity = {'region': 'eu-west-1', 'accountId': 123456, 'instanceId': 'i-123'}
        identity = boto.utils.get_instance_identity()['document']

        region = identity['region']
        account_id = identity['accountId']
        instance_id = identity['instanceId']

        boot_time = get_boot_time()

        # remove "sensitive" information from Taupage Config
        # (should be encrypted anyway, but better be sure..)
        masked_config = mask_dictionary(config)

        data = {
            'account_id':
            str(account_id),
            'region':
            region,
            'instance_boot_time':
            boot_time,
            'instance_id':
            instance_id,
            'log_data':
            codecs.encode(
                yaml.safe_dump(masked_config).encode('utf-8'),
                'base64').decode('utf-8'),
            'log_type':
            'USER_DATA'
        }
        logging.info('Pushing Taupage YAML to {}..'.format(instance_logs_url))
        try:
            # TODO: use OAuth credentials
            response = requests.post(instance_logs_url,
                                     data=json.dumps(data),
                                     timeout=5,
                                     headers={
                                         'Content-Type':
                                         'application/json',
                                         'Authorization':
                                         'Basic {}'.format(userAndPass)
                                     })
            if response.status_code != 201:
                logging.warn(
                    'Failed to push Taupage YAML: server returned HTTP status {}: {}'
                    .format(response.status_code, response.text))
        except Exception:
            logging.exception('Failed to push Taupage YAML')
Example #12
0
def main():
    """Configure custom sysctl parameters

    If a sysctl section is present, add the valid parameters to sysctl and reloads.

    As some kernel parameters may not be allowed to be tuned, only parameters
    on a whitelist are allowed to be specified.
    """
    SYSCTL_WHITELIST = [
        'fs.file-max', 'vm.dirty_background_ratio', 'vm.dirty_ratio',
        'vm.overcommit_memory', 'vm.overcommit_ratio', 'vm.swappiness'
    ]
    CUSTOM_SYSCTL_CONF = '/etc/sysctl.d/99-custom.conf'

    configure_logging()
    config = get_config()

    sysctl = config.get('sysctl')

    if sysctl is None:
        sys.exit(0)

    disallowed_keys = set(sysctl.keys()) - set(SYSCTL_WHITELIST)
    if disallowed_keys:
        logging.error(
            'You are not allowed to configure the sysctl parameters {}'.format(
                list(disallowed_keys)))

    try:
        sysctl_entries = [
            '{} = {}'.format(key, value) for key, value in sysctl.items()
            if key in SYSCTL_WHITELIST
        ]
        with open(CUSTOM_SYSCTL_CONF, 'w') as file:
            file.write('\n'.join(sysctl_entries) + '\n')
        logging.info('Successfully written sysctl parameters')
    except Exception as e:
        logging.error('Failed to write sysctl parameters')
        logging.exception(e)
        sys.exit(1)

    try:
        exitcode = subprocess.call(['/sbin/sysctl', '-p', CUSTOM_SYSCTL_CONF])
        if exitcode != 0:
            logging.error(
                'Reloading sysctl failed with exitcode {}'.format(exitcode))
            sys.exit(1)
        logging.info('Successfully reloaded sysctl parameters')
    except Exception as e:
        logging.error('Failed to reload sysctl')
        logging.exception(e)
        sys.exit(1)
Example #13
0
def main():
    """Configure custom sysctl parameters

    If a sysctl section is present, add the valid parameters to sysctl and reloads.

    As some kernel parameters may not be allowed to be tuned, only parameters
    on a whitelist are allowed to be specified.
    """
    SYSCTL_WHITELIST = ['fs.file-max',
                        'vm.dirty_background_ratio',
                        'vm.dirty_ratio',
                        'vm.max_map_count',
                        'vm.overcommit_memory',
                        'vm.overcommit_ratio',
                        'vm.swappiness',
                        'net.core.somaxconn']
    CUSTOM_SYSCTL_CONF = '/etc/sysctl.d/99-custom.conf'

    configure_logging()
    config = get_config()

    sysctl = config.get('sysctl')

    if sysctl is None:
        sys.exit(0)

    disallowed_keys = set(sysctl.keys()) - set(SYSCTL_WHITELIST)
    if disallowed_keys:
        logging.error('You are not allowed to configure the sysctl parameters {}'.format(list(disallowed_keys)))

    try:
        sysctl_entries = ['{} = {}'.format(key, value) for key, value in sysctl.items() if key in SYSCTL_WHITELIST]
        with open(CUSTOM_SYSCTL_CONF, 'w') as file:
            file.write('\n'.join(sysctl_entries)+'\n')
        logging.info('Successfully written sysctl parameters')
    except Exception as e:
        logging.error('Failed to write sysctl parameters')
        logging.exception(e)
        sys.exit(1)

    try:
        exitcode = subprocess.call(['/sbin/sysctl', '-p', CUSTOM_SYSCTL_CONF])
        if exitcode != 0:
            logging.error('Reloading sysctl failed with exitcode {}'.format(exitcode))
            sys.exit(1)
        logging.info('Successfully reloaded sysctl parameters')
    except Exception as e:
        logging.error('Failed to reload sysctl')
        logging.exception(e)
        sys.exit(1)
def main():
    configure_logging()
    config = get_config()

    if not contains_local_monitor_config(config):
        sys.exit(0)

    try:
        logging.info('Starting local_monitor')
        start_local_monitor_service()
        sys.exit(0)
    except Exception as e:
        logging.exception(e)
        sys.exit(0)
Example #15
0
def main():
    configure_logging()
    config = get_config()

    if not contains_local_monitor_config(config):
        sys.exit(0)

    try:
        logging.info('Starting local_monitor')
        start_local_monitor_service()
        sys.exit(0)
    except Exception as e:
        logging.exception(e)
        sys.exit(0)
Example #16
0
def main():
    # Process arguments
    args = process_arguments()
    if args.debug:
        configure_logging(logging.DEBUG)
    else:
        configure_logging(logging.INFO)

    # Load configuration from YAML file
    config = get_config(args.filename)

    if config.get("volumes"):
        handle_volumes(args, config)

    # Iterate over mount points
    iterate_mounts(config)
Example #17
0
def main():
    # Process arguments
    args = process_arguments()
    if args.debug:
        configure_logging(logging.DEBUG)
    else:
        configure_logging(logging.INFO)

    # Load configuration from YAML file
    config = get_config(args.filename)

    if config.get("volumes"):
        handle_volumes(args, config)

    # Iterate over mount points
    iterate_mounts(config)
Example #18
0
def main():
    configure_logging()
    config = get_config()

    if not contains_valid_dockercfg(config):
        sys.exit(0)

    logging.info('Writing dockercfg')

    try:
        path = os.path.expanduser('~/.dockercfg')
        write_file(path, json.dumps(config.get('dockercfg')))
        logging.info('Successfully placed dockercfg')
    except Exception as e:
        logging.error('Failed to create dockercfg')
        logging.exception(e)
        sys.exit(1)
Example #19
0
def main():
    configure_logging()
    config = get_config()

    instance_logs_url = config.get('instance_logs_url')

    if instance_logs_url:
        userAndPass = b64encode(bytes('{}:{}'.format(
                config.get('logsink_username'),
                config.get('logsink_password')),
                encoding='ascii')).decode("ascii") or ''

        # identity = {'region': 'eu-west-1', 'accountId': 123456, 'instanceId': 'i-123'}
        identity = boto.utils.get_instance_identity()['document']

        region = identity['region']
        account_id = identity['accountId']
        instance_id = identity['instanceId']

        boot_time = get_boot_time()

        # remove "sensitive" information from Taupage Config
        # (should be encrypted anyway, but better be sure..)
        masked_config = mask_dictionary(config)

        data = {'account_id': str(account_id),
                'region': region,
                'instance_boot_time': boot_time,
                'instance_id': instance_id,
                'log_data': codecs.encode(yaml.safe_dump(masked_config).encode('utf-8'), 'base64').decode('utf-8'),
                'log_type': 'USER_DATA'}
        logging.info('Pushing Taupage YAML to {}..'.format(instance_logs_url))
        try:
            # TODO: use OAuth credentials
            response = requests.post(instance_logs_url, data=json.dumps(data), timeout=5,
                                     headers={'Content-Type': 'application/json',
                                              'Authorization': 'Basic {}'.format(userAndPass)})
            if response.status_code != 201:
                logging.warn('Failed to push Taupage YAML: server returned HTTP status {}: {}'.format(
                    response.status_code,
                    response.text))
        except:
            logging.exception('Failed to push Taupage YAML')
Example #20
0
def main():
    """Configure custom sysctl parameters

    If a sysctl section is present, add the valid parameters to sysctl and reloads.
    """
    CUSTOM_SYSCTL_CONF = '/etc/sysctl.d/99-custom.conf'

    configure_logging()
    config = get_config()

    sysctl = config.get('sysctl')

    if sysctl is None:
        sys.exit(0)

    try:
        sysctl_entries = [
            '{} = {}'.format(key, value) for key, value in sysctl.items()
        ]
        with open(CUSTOM_SYSCTL_CONF, 'w') as file:
            file.write('\n'.join(sysctl_entries) + '\n')
        logging.info('Successfully written sysctl parameters')
    except Exception as e:
        logging.error('Failed to write sysctl parameters')
        logging.exception(e)
        sys.exit(1)

    try:
        exitcode = subprocess.call(['/sbin/sysctl', '-p', CUSTOM_SYSCTL_CONF])
        if exitcode != 0:
            logging.error(
                'Reloading sysctl failed with exitcode {}'.format(exitcode))
            sys.exit(1)
        logging.info('Successfully reloaded sysctl parameters')
    except Exception as e:
        logging.error('Failed to reload sysctl')
        logging.exception(e)
        sys.exit(1)
Example #21
0
 def __init__(self, region):
     configure_logging()
     self.logger = logging.getLogger(__name__)
     self.elb_client = elb.connect_to_region(region)
Example #22
0
def main():
    """Confugure custom routing if necessary"""

    configure_logging()
    config = get_config()

    nat_gateways = config.get('nat_gateways')

    if not nat_gateways or not isinstance(
            nat_gateways, dict):  # nat gateways must be non empty dictionary
        sys.exit(0)

    METADATA_URL = 'http://169.254.169.254/latest/meta-data/'
    try:
        r = requests.get(METADATA_URL + 'placement/availability-zone')
        region = r.text.strip()[:-1]
        logging.info('Region=%s', region)

        r = requests.get(METADATA_URL + 'mac')
        mac = r.text.strip()

        r = requests.get(METADATA_URL + 'network/interfaces/macs/' + mac +
                         '/subnet-id')
        subnet = r.text
        if subnet not in nat_gateways:
            logging.warning(
                'Can not find subnet %s in the nat_gateways mapping', subnet)
            sys.exit(0)

        logging.info('Will use %s nat gateway for outgoing https traffic',
                     nat_gateways[subnet])
    except Exception:
        logging.exception('Failed to read metadata')
        sys.exit(1)

    RT_TABLES = '/etc/iproute2/rt_tables'

    try:
        with open(RT_TABLES, 'a') as f:
            f.write('\n150 https\n')
        logging.info('Created new routing table for https traffic')
    except Exception:
        logging.exception('Failed to write into %s', RT_TABLES)
        sys.exit(1)

    iptables = ['iptables', '-w', '-t', 'mangle']

    subprocess_call(iptables + [
        '-A', 'OUTPUT', '-p', 'tcp', '!', '-d', '172.16.0.0/12', '--dport',
        '443', '-j', 'MARK', '--set-mark', '443'
    ])

    subprocess_call(iptables + [
        '-A', 'OUTPUT', '-p', 'udp', '--dport', '123', '-j', 'MARK',
        '--set-mark', '443'
    ])

    subprocess_call(['ip', 'rule', 'add', 'fwmark', '443', 'lookup', 'https'])

    subprocess_call([
        'ip', 'route', 'add', 'default', 'via', nat_gateways[subnet], 'table',
        'https'
    ])

    # S3 is exceptional, it has it's own endpoint in VPC
    try:
        r = requests.get('https://ip-ranges.amazonaws.com/ip-ranges.json')
        ranges = [
            e['ip_prefix'] for e in r.json()['prefixes'] if
            e['service'] == 'S3' and e['region'] == region and 'ip_prefix' in e
        ]
    except Exception:
        logging.exception('Failed to load ip-ranges.json')

    # Don't mark outgoing traffic to S3
    for r in ranges:
        subprocess_call(iptables + ['-I', 'OUTPUT', '-d', r, '-j', 'ACCEPT'])
Example #23
0
def main():
    args = process_arguments()

    # Setup logging
    if args.debug:
        configure_logging(logging.DEBUG)
    else:
        configure_logging(logging.INFO)

    current_region = args.region if args.region else get_region()

    # Load configuration from YAML file
    config = get_config(args.filename)

    if config.get("network_interfaces"):
        handle_network_interfaces(current_region, config)

        # The goal here is to be able to assign static IPs to instances
        # Within the Zalando AWS account setup we have a private subnet per
        # AZ. The idea is to create an ENI in each AZ in the private subnet where
        # you want a static IP. This means, your instance is going to have two
        # network interfaces on the same subnet, which causes some issues.
        #
        # The below code is based of the explaination at: https://goo.gl/2D8KrV
        # for handling two network interfaces in the same subnet.

        # Setting this to 1 Allows you to have multiple network interfaces on the same
        # subnet, and have the ARPs for each interface be answered based
        # on whether or not the kernel would route a packet from the
        # the ARP'd IP out that interface
        with open("/proc/sys/net/ipv4/conf/all/arp_filter", "w") as all_arp_filter:
            all_arp_filter.write("1")
        network_interfaces = []
        default_gateway = netifaces.gateways()['default'][netifaces.AF_INET][0]

        for device_index in range(0, len(config.get("network_interfaces")) + 1):
            network_interfaces.append("eth{}".format(device_index))

        # Run dhclient on all newly created interfaces to enable them to get IPs
        # Note, we do not run dhclient on eth0 as this may affect network connectivity
        # of the instance
        for network_interface in network_interfaces[1:]:
            subprocess.check_call(["dhclient", str(network_interface)])
        route_tables = []

        # Here we implement source-based routing, according to the serverfault post linked above
        for device_index in range(0, len(config.get("network_interfaces")) + 1):
            route_tables.append("{} eth{}".format(
                device_index + 1, device_index))

        with open("/etc/iproute2/rt_tables", "w") as rt_tables:
            rt_tables.write("\n".join(route_tables))

        for network_interface in network_interfaces:
            interface = netifaces.ifaddresses(network_interface)[
                netifaces.AF_INET][0]
            ip = interface['addr']
            subnet_cidr = str(IPAddress(interface["netmask"]).netmask_bits())
            subprocess.check_call(["ip", "route", "add", "default", "via", default_gateway, "dev",
                                   network_interface, "table", network_interface])
            subprocess.check_call(["ip", "route", "add", subnet_cidr, "dev", network_interface,
                                   "src", ip, "table", network_interface])
            subprocess.check_call(["ip", "rule", "add", "from", ip, "table",
                                   network_interface])
Example #24
0
File: elb.py Project: s12v/taupage
 def __init__(self, region):
     configure_logging()
     self.logger = logging.getLogger(__name__)
     self.elb_client = elb.connect_to_region(region)
def main():
    """Confugure custom routing if necessary"""

    configure_logging()
    config = get_config()

    nat_gateways = config.get('nat_gateways')

    if not nat_gateways or not isinstance(nat_gateways, dict):  # nat gateways must be non empty dictionary
        sys.exit(0)

    METADATA_URL = 'http://169.254.169.254/latest/meta-data/'
    try:
        r = requests.get(METADATA_URL + 'placement/availability-zone')
        region = r.text.strip()[:-1]
        logging.info('Region=%s', region)

        r = requests.get(METADATA_URL + 'mac')
        mac = r.text.strip()

        r = requests.get(METADATA_URL + 'network/interfaces/macs/' + mac + '/subnet-id')
        subnet = r.text
        if subnet not in nat_gateways:
            logging.warning('Can not find subnet %s in the nat_gateways mapping', subnet)
            sys.exit(0)

        logging.info('Will use %s nat gateway for outgoing https traffic', nat_gateways[subnet])
    except Exception:
        logging.exception('Failed to read metadata')
        sys.exit(1)

    RT_TABLES = '/etc/iproute2/rt_tables'

    try:
        with open(RT_TABLES, 'a') as f:
            f.write('\n150 https\n')
        logging.info('Created new routing table for https traffic')
    except Exception:
        logging.exception('Failed to write into %s', RT_TABLES)
        sys.exit(1)

    iptables = ['iptables', '-w', '-t', 'mangle']

    subprocess_call(iptables + ['-A', 'OUTPUT', '-p', 'tcp', '!', '-d', '172.16.0.0/12',
                                '--dport', '443', '-j', 'MARK', '--set-mark', '443'])

    subprocess_call(iptables + ['-A', 'OUTPUT', '-p', 'udp', '--dport', '123', '-j', 'MARK', '--set-mark', '443'])

    subprocess_call(['ip', 'rule', 'add', 'fwmark', '443', 'lookup', 'https'])

    subprocess_call(['ip', 'route', 'add', 'default', 'via', nat_gateways[subnet], 'table', 'https'])

    # S3 is exceptional, it has it's own endpoint in VPC
    try:
        r = requests.get('https://ip-ranges.amazonaws.com/ip-ranges.json')
        ranges = [e['ip_prefix'] for e in r.json()['prefixes']
                  if e['service'] == 'S3' and e['region'] == region and 'ip_prefix' in e]
    except Exception:
        logging.exception('Failed to load ip-ranges.json')

    # Don't mark outgoing traffic to S3
    for r in ranges:
        subprocess_call(iptables + ['-I', 'OUTPUT', '-d', r, '-j', 'ACCEPT'])