def main():
    configure_logging()
    config = get_config()

    if not contains_cloudwatch_logs_config(config):
        logging.info('Cloudwatch Logs Agent disabled by configuration')
        sys.exit(0)

    logging.info('Configuring Cloudwatch Logs Agent')

    # identity = {'region': 'eu-west-1', 'accountId': 123456, 'instanceId': 'i-123'}
    identity = boto.utils.get_instance_identity()['document']

    environment = {
        'observed_log_files': config.get('cloudwatch_logs'),
        'application_id': config.get('application_id'),
        'application_version': config.get('application_version'),
        'region': identity['region'],
        'account_id': identity['accountId'],
        'instance_id': identity['instanceId']
    }

    try:
        write_file('/var/awslogs/etc/aws.conf', render_template(AWS_CONFIG_TEMPLATE, environment))
        write_file('/var/awslogs/etc/awslogs.conf', render_template(AWSLOGS_CONFIG_TEMPLATE, environment))

        start_awslogs_service()

        logging.info('Successfully configured and started Cloudwatch Logs Agent')
    except Exception as e:
        logging.error('Failed to configure Cloudwatch Logs Agent')
        logging.exception(e)
        sys.exit(1)
Beispiel #2
0
def main():
    configure_logging()
    config = get_config()

    telegraf_config = get_telegraf_config(config)
    if not telegraf_config:
        logging.info('Telegraf disabled by configuration')
        sys.exit(0)

    logging.info('Configuring Telegraf')

    # identity = {'region': 'eu-west-1', 'accountId': 123456, 'instanceId': 'i-123'}
    identity = boto.utils.get_instance_identity()['document']

    environment = {
        'application_id': config.get('application_id'),
        'application_version': config.get('application_version'),
        'region': identity['region'],
        'instance_id': identity['instanceId']
    }

    environment.update(telegraf_config)

    try:
        write_file('/etc/telegraf/telegraf.conf',
                   render_template(CONFIG_TEMPLATE, environment))

        start_telegraf_service()

        logging.info('Successfully configured and started Telegraf')
        sys.exit(0)
    except Exception as e:
        logging.error('Failed to configure Telegraf')
        logging.exception(e)
        sys.exit(0)
Beispiel #3
0
def main():
    configure_logging()

    config = get_config()

    instance_logs_url = config.get('instance_logs_url')

    if not instance_logs_url:
        logging.warn('No endpoint for instance logs configured.')
        return

    # identity = {'region': 'eu-west-1', 'accountId': 123456, 'instanceId': 'i-123'}
    identity = boto.utils.get_instance_identity()['document']

    region = identity['region']
    account_id = identity['accountId']
    instance_id = identity['instanceId']

    boot_time = get_boot_time()

    is_shutdown = False
    if len(sys.argv) > 1:
        is_shutdown = sys.argv[1] == '--shutdown'

    while True:
        for fn in glob.glob('/var/log/audit.log.*.gz'):
            push_audit_log(config, instance_logs_url, account_id, region, instance_id, boot_time, fn)
        if is_shutdown:
            for fn in glob.glob('/var/log/audit.log'):
                push_audit_log(config, instance_logs_url, account_id, region, instance_id, boot_time, fn, compress=True)
            return
        time.sleep(60)
Beispiel #4
0
def main():
    """Configure custom sysctl parameters

    If a sysctl section is present, add the valid parameters to sysctl and reloads.
    """
    CUSTOM_SYSCTL_CONF = '/etc/sysctl.d/99-custom.conf'

    configure_logging()
    config = get_config()

    sysctl = config.get('sysctl')

    if sysctl is None:
        sys.exit(0)

    try:
        sysctl_entries = ['{} = {}'.format(key, value) for key, value in sysctl.items()]
        with open(CUSTOM_SYSCTL_CONF, 'w') as file:
            file.write('\n'.join(sysctl_entries)+'\n')
        logging.info('Successfully written sysctl parameters')
    except Exception as e:
        logging.error('Failed to write sysctl parameters')
        logging.exception(e)
        sys.exit(1)

    try:
        exitcode = subprocess.call(['/sbin/sysctl', '-p', CUSTOM_SYSCTL_CONF])
        if exitcode != 0:
            logging.error('Reloading sysctl failed with exitcode {}'.format(exitcode))
            sys.exit(1)
        logging.info('Successfully reloaded sysctl parameters')
    except Exception as e:
        logging.error('Failed to reload sysctl')
        logging.exception(e)
        sys.exit(1)
Beispiel #5
0
def main():
    """Configure values for white-listed sysfs paths"""
    SYSFS_WHITELIST = ['/sys/kernel/mm/transparent_hugepage/enabled']

    configure_logging()
    config = get_config()

    sysfs = config.get('sysfs')

    if sysfs is None:
        sys.exit(0)

    disallowed_paths = set(sysfs.keys()) - set(SYSFS_WHITELIST)
    if disallowed_paths:
        logging.error(
            'You are not allowed to edit the sysfs path(s) {}'.format(
                list(disallowed_paths)))

    # Sanitize our dict first
    clean_sysfs = {
        key: value
        for (key, value) in sysfs.items() if key not in disallowed_paths
    }

    try:
        for key, value in clean_sysfs.items():
            with open(key, 'w') as file:
                file.write(value + '\n')
        logging.info('Successfully written allowed sysfs paths')
    except Exception as e:
        logging.error('Failed to write sysfs paths')
        logging.exception(e)
        sys.exit(1)
def main():
    configure_logging()
    config = get_config()

    if not contains_valid_dockercfg(config):
        sys.exit(0)

    logging.info('Writing dockercfg')

    try:
        path = os.path.expanduser('~/.dockercfg')
        write_file(path, json.dumps(config.get('dockercfg')))

        directory = os.path.expanduser('~/.docker')
        if not os.path.exists(directory):
            os.makedirs(directory)
        path = os.path.expanduser('~/.docker/config.json')
        if os.path.exists(path):
            #load
            data = json.loads(open(path).read())
            existing = config.get('dockercfg', {})
            #merge
            data['auths'].update(existing)
            #write
            write_file(path, json.dumps(data))
        else:
            write_file(path, json.dumps( {
                'auths' : config.get('dockercfg', {})
            }))

        logging.info('Successfully placed dockercfg')
    except Exception as e:
        logging.error('Failed to create dockercfg')
        logging.exception(e)
        sys.exit(1)
Beispiel #7
0
def main():
    configure_logging()
    config = get_config()

    if not contains_valid_dockercfg(config):
        sys.exit(0)

    logging.info('Writing dockercfg')

    try:
        path = os.path.expanduser('~/.dockercfg')
        write_file(path, json.dumps(config.get('dockercfg')))

        directory = os.path.expanduser('~/.docker')
        if not os.path.exists(directory):
            os.makedirs(directory)
        path = os.path.expanduser('~/.docker/config.json')
        if os.path.exists(path):
            # load
            data = json.loads(open(path).read())
            existing = config.get('dockercfg', {})
            # merge
            data['auths'].update(existing)
            # write
            write_file(path, json.dumps(data))
        else:
            write_file(path, json.dumps({
                'auths': config.get('dockercfg', {})
            }))

        logging.info('Successfully placed dockercfg')
    except Exception as e:
        logging.error('Failed to create dockercfg')
        logging.exception(e)
        sys.exit(1)
Beispiel #8
0
def get_scalyr_api_key():
    ''' Read Scalyr API key from Taupage config and set in template file '''
    main_config = get_config()
    config = main_config.get('logging')
    if config:
        scalyr_api_key = config.get('scalyr_account_key', False)
    else:
        scalyr_api_key = False

    if scalyr_api_key:
        # If scalyr_api_key starts with "aws:kms:" then decrypt key
        match_kms_key = re.search('aws:kms:', scalyr_api_key, re.IGNORECASE)
        if match_kms_key:
            scalyr_api_key = re.sub(r'aws:kms:', '', scalyr_api_key)
            try:
                scalyr_api_key = subprocess.check_output([
                    'python3', '/opt/taupage/bin/decrypt-kms.py',
                    scalyr_api_key
                ]).decode('UTF-8').strip()
            except Exception:
                logger.error('Failed to run /opt/taupage/bin/decrypt-kms.py')
                raise SystemExit()
        if scalyr_api_key == "Invalid KMS key.":
            logger.error('Failed to decrypt KMS Key')
            raise SystemExit()
        return scalyr_api_key
def main():
    configure_logging()
    config = get_config()

    telegraf_config = get_telegraf_config(config)
    if not telegraf_config:
        logging.info('Telegraf disabled by configuration')
        sys.exit(0)

    logging.info('Configuring Telegraf')

    # identity = {'region': 'eu-west-1', 'accountId': 123456, 'instanceId': 'i-123'}
    identity = boto.utils.get_instance_identity()['document']

    environment = {
        'application_id': config.get('application_id'),
        'application_version': config.get('application_version'),
        'region': identity['region'],
        'instance_id': identity['instanceId']
    }

    environment.update(telegraf_config)

    try:
        write_file('/etc/telegraf/telegraf.conf', render_template(CONFIG_TEMPLATE, environment))

        start_telegraf_service()

        logging.info('Successfully configured and started Telegraf')
        sys.exit(0)
    except Exception as e:
        logging.error('Failed to configure Telegraf')
        logging.exception(e)
        sys.exit(0)
Beispiel #10
0
def main():
    configure_logging()

    config = get_config()

    instance_logs_url = config.get('instance_logs_url')

    if not instance_logs_url:
        logging.warn('No endpoint for instance logs configured.')
        return

    # identity = {'region': 'eu-west-1', 'accountId': 123456, 'instanceId': 'i-123'}
    identity = boto.utils.get_instance_identity()['document']

    region = identity['region']
    account_id = identity['accountId']
    instance_id = identity['instanceId']

    boot_time = get_boot_time()

    is_shutdown = False
    if len(sys.argv) > 1:
        is_shutdown = sys.argv[1] == '--shutdown'

    while True:
        for fn in glob.glob('/var/log/audit.log.*.gz'):
            push_audit_log(config, instance_logs_url, account_id, region, instance_id, boot_time, fn)
        if is_shutdown:
            for fn in glob.glob('/var/log/audit.log'):
                push_audit_log(config, instance_logs_url, account_id, region, instance_id, boot_time, fn, compress=True)
            return
        rtime = random.randrange(60, 3000)
        time.sleep(rtime)
Beispiel #11
0
def main():
    """Configure values for white-listed sysfs paths"""
    SYSFS_WHITELIST = ['/sys/kernel/mm/transparent_hugepage/enabled']

    configure_logging()
    config = get_config()

    sysfs = config.get('sysfs')

    if sysfs is None:
        sys.exit(0)

    disallowed_paths = set(sysfs.keys()) - set(SYSFS_WHITELIST)
    if disallowed_paths:
        logging.error('You are not allowed to edit the sysfs path(s) {}'.format(list(disallowed_paths)))

    # Sanitize our dict first
    clean_sysfs = {key: value for (key, value) in sysfs.items()
                   if key not in disallowed_paths}

    try:
        for key, value in clean_sysfs.items():
            with open(key, 'w') as file:
                file.write(value + '\n')
        logging.info('Successfully written allowed sysfs paths')
    except Exception as e:
        logging.error('Failed to write sysfs paths')
        logging.exception(e)
        sys.exit(1)
Beispiel #12
0
def main():
    configure_logging()
    config = get_config()

    instance_logs_url = config.get('instance_logs_url')

    if instance_logs_url:
        userAndPass = b64encode(
            bytes('{}:{}'.format(config.get('logsink_username'),
                                 config.get('logsink_password')),
                  encoding='ascii')).decode("ascii") or ''

        # identity = {'region': 'eu-west-1', 'accountId': 123456, 'instanceId': 'i-123'}
        identity = boto.utils.get_instance_identity()['document']

        region = identity['region']
        account_id = identity['accountId']
        instance_id = identity['instanceId']

        boot_time = get_boot_time()

        # remove "sensitive" information from Taupage Config
        # (should be encrypted anyway, but better be sure..)
        masked_config = mask_dictionary(config)

        data = {
            'account_id':
            str(account_id),
            'region':
            region,
            'instance_boot_time':
            boot_time,
            'instance_id':
            instance_id,
            'log_data':
            codecs.encode(
                yaml.safe_dump(masked_config).encode('utf-8'),
                'base64').decode('utf-8'),
            'log_type':
            'USER_DATA'
        }
        logging.info('Pushing Taupage YAML to {}..'.format(instance_logs_url))
        try:
            # TODO: use OAuth credentials
            response = requests.post(instance_logs_url,
                                     data=json.dumps(data),
                                     timeout=5,
                                     headers={
                                         'Content-Type':
                                         'application/json',
                                         'Authorization':
                                         'Basic {}'.format(userAndPass)
                                     })
            if response.status_code != 201:
                logging.warn(
                    'Failed to push Taupage YAML: server returned HTTP status {}: {}'
                    .format(response.status_code, response.text))
        except Exception:
            logging.exception('Failed to push Taupage YAML')
Beispiel #13
0
def main():
    """Configure custom sysctl parameters

    If a sysctl section is present, add the valid parameters to sysctl and reloads.

    As some kernel parameters may not be allowed to be tuned, only parameters
    on a whitelist are allowed to be specified.
    """
    SYSCTL_WHITELIST = [
        'fs.file-max', 'vm.dirty_background_ratio', 'vm.dirty_ratio',
        'vm.overcommit_memory', 'vm.overcommit_ratio', 'vm.swappiness'
    ]
    CUSTOM_SYSCTL_CONF = '/etc/sysctl.d/99-custom.conf'

    configure_logging()
    config = get_config()

    sysctl = config.get('sysctl')

    if sysctl is None:
        sys.exit(0)

    disallowed_keys = set(sysctl.keys()) - set(SYSCTL_WHITELIST)
    if disallowed_keys:
        logging.error(
            'You are not allowed to configure the sysctl parameters {}'.format(
                list(disallowed_keys)))

    try:
        sysctl_entries = [
            '{} = {}'.format(key, value) for key, value in sysctl.items()
            if key in SYSCTL_WHITELIST
        ]
        with open(CUSTOM_SYSCTL_CONF, 'w') as file:
            file.write('\n'.join(sysctl_entries) + '\n')
        logging.info('Successfully written sysctl parameters')
    except Exception as e:
        logging.error('Failed to write sysctl parameters')
        logging.exception(e)
        sys.exit(1)

    try:
        exitcode = subprocess.call(['/sbin/sysctl', '-p', CUSTOM_SYSCTL_CONF])
        if exitcode != 0:
            logging.error(
                'Reloading sysctl failed with exitcode {}'.format(exitcode))
            sys.exit(1)
        logging.info('Successfully reloaded sysctl parameters')
    except Exception as e:
        logging.error('Failed to reload sysctl')
        logging.exception(e)
        sys.exit(1)
def main():
    configure_logging()
    config = get_config()

    if not contains_local_monitor_config(config):
        sys.exit(0)

    try:
        logging.info('Starting local_monitor')
        start_local_monitor_service()
        sys.exit(0)
    except Exception as e:
        logging.exception(e)
        sys.exit(0)
Beispiel #15
0
def main():
    configure_logging()
    config = get_config()

    if not contains_local_monitor_config(config):
        sys.exit(0)

    try:
        logging.info('Starting local_monitor')
        start_local_monitor_service()
        sys.exit(0)
    except Exception as e:
        logging.exception(e)
        sys.exit(0)
Beispiel #16
0
def main():
    """Configure custom sysctl parameters

    If a sysctl section is present, add the valid parameters to sysctl and reloads.

    As some kernel parameters may not be allowed to be tuned, only parameters
    on a whitelist are allowed to be specified.
    """
    SYSCTL_WHITELIST = ['fs.file-max',
                        'vm.dirty_background_ratio',
                        'vm.dirty_ratio',
                        'vm.max_map_count',
                        'vm.overcommit_memory',
                        'vm.overcommit_ratio',
                        'vm.swappiness',
                        'net.core.somaxconn']
    CUSTOM_SYSCTL_CONF = '/etc/sysctl.d/99-custom.conf'

    configure_logging()
    config = get_config()

    sysctl = config.get('sysctl')

    if sysctl is None:
        sys.exit(0)

    disallowed_keys = set(sysctl.keys()) - set(SYSCTL_WHITELIST)
    if disallowed_keys:
        logging.error('You are not allowed to configure the sysctl parameters {}'.format(list(disallowed_keys)))

    try:
        sysctl_entries = ['{} = {}'.format(key, value) for key, value in sysctl.items() if key in SYSCTL_WHITELIST]
        with open(CUSTOM_SYSCTL_CONF, 'w') as file:
            file.write('\n'.join(sysctl_entries)+'\n')
        logging.info('Successfully written sysctl parameters')
    except Exception as e:
        logging.error('Failed to write sysctl parameters')
        logging.exception(e)
        sys.exit(1)

    try:
        exitcode = subprocess.call(['/sbin/sysctl', '-p', CUSTOM_SYSCTL_CONF])
        if exitcode != 0:
            logging.error('Reloading sysctl failed with exitcode {}'.format(exitcode))
            sys.exit(1)
        logging.info('Successfully reloaded sysctl parameters')
    except Exception as e:
        logging.error('Failed to reload sysctl')
        logging.exception(e)
        sys.exit(1)
Beispiel #17
0
def main():
    # Process arguments
    args = process_arguments()
    if args.debug:
        configure_logging(logging.DEBUG)
    else:
        configure_logging(logging.INFO)

    # Load configuration from YAML file
    config = get_config(args.filename)

    if config.get("volumes"):
        handle_volumes(args, config)

    # Iterate over mount points
    iterate_mounts(config)
Beispiel #18
0
def main():
    # Process arguments
    args = process_arguments()
    if args.debug:
        configure_logging(logging.DEBUG)
    else:
        configure_logging(logging.INFO)

    # Load configuration from YAML file
    config = get_config(args.filename)

    if config.get("volumes"):
        handle_volumes(args, config)

    # Iterate over mount points
    iterate_mounts(config)
Beispiel #19
0
def main():
    configure_logging()
    config = get_config()

    if not contains_valid_dockercfg(config):
        sys.exit(0)

    logging.info('Writing dockercfg')

    try:
        path = os.path.expanduser('~/.dockercfg')
        write_file(path, json.dumps(config.get('dockercfg')))
        logging.info('Successfully placed dockercfg')
    except Exception as e:
        logging.error('Failed to create dockercfg')
        logging.exception(e)
        sys.exit(1)
Beispiel #20
0
def main():
    configure_logging()
    config = get_config()

    instance_logs_url = config.get('instance_logs_url')

    if instance_logs_url:
        userAndPass = b64encode(bytes('{}:{}'.format(
                config.get('logsink_username'),
                config.get('logsink_password')),
                encoding='ascii')).decode("ascii") or ''

        # identity = {'region': 'eu-west-1', 'accountId': 123456, 'instanceId': 'i-123'}
        identity = boto.utils.get_instance_identity()['document']

        region = identity['region']
        account_id = identity['accountId']
        instance_id = identity['instanceId']

        boot_time = get_boot_time()

        # remove "sensitive" information from Taupage Config
        # (should be encrypted anyway, but better be sure..)
        masked_config = mask_dictionary(config)

        data = {'account_id': str(account_id),
                'region': region,
                'instance_boot_time': boot_time,
                'instance_id': instance_id,
                'log_data': codecs.encode(yaml.safe_dump(masked_config).encode('utf-8'), 'base64').decode('utf-8'),
                'log_type': 'USER_DATA'}
        logging.info('Pushing Taupage YAML to {}..'.format(instance_logs_url))
        try:
            # TODO: use OAuth credentials
            response = requests.post(instance_logs_url, data=json.dumps(data), timeout=5,
                                     headers={'Content-Type': 'application/json',
                                              'Authorization': 'Basic {}'.format(userAndPass)})
            if response.status_code != 201:
                logging.warn('Failed to push Taupage YAML: server returned HTTP status {}: {}'.format(
                    response.status_code,
                    response.text))
        except:
            logging.exception('Failed to push Taupage YAML')
Beispiel #21
0
def main():
    """Configure custom sysctl parameters

    If a sysctl section is present, add the valid parameters to sysctl and reloads.
    """
    CUSTOM_SYSCTL_CONF = '/etc/sysctl.d/99-custom.conf'

    configure_logging()
    config = get_config()

    sysctl = config.get('sysctl')

    if sysctl is None:
        sys.exit(0)

    try:
        sysctl_entries = [
            '{} = {}'.format(key, value) for key, value in sysctl.items()
        ]
        with open(CUSTOM_SYSCTL_CONF, 'w') as file:
            file.write('\n'.join(sysctl_entries) + '\n')
        logging.info('Successfully written sysctl parameters')
    except Exception as e:
        logging.error('Failed to write sysctl parameters')
        logging.exception(e)
        sys.exit(1)

    try:
        exitcode = subprocess.call(['/sbin/sysctl', '-p', CUSTOM_SYSCTL_CONF])
        if exitcode != 0:
            logging.error(
                'Reloading sysctl failed with exitcode {}'.format(exitcode))
            sys.exit(1)
        logging.info('Successfully reloaded sysctl parameters')
    except Exception as e:
        logging.error('Failed to reload sysctl')
        logging.exception(e)
        sys.exit(1)
Beispiel #22
0
        self.logger.info("Waiting for instance {0} to become healthy in elb {1}".format(instance_id, elb_name))

        for i in range(0, max(int(self.TIMEOUT / self.INTERVAL), 1)):
            state = self._get_elb_instance_state(instance_id, elb_name)
            if state == 'InService':
                self.logger.info("instance in service")
                return True
            else:
                self.logger.debug('waiting for instance')
                sleep(self.INTERVAL)

        self.logger.warning("timeout for in-service check exceeded")
        return False


if __name__ == '__main__':
    region = get_instance_identity()['document']['region']
    instance_id = get_instance_identity()['document']['instanceId']

    config = get_config()
    loadbalancer_name = config['healthcheck']['loadbalancer_name']

    healthchecker = ElbHealthChecker(region)
    is_in_service = healthchecker.is_in_service_from_elb_perspective(instance_id, loadbalancer_name)

    if is_in_service:
        sys.exit(0)
    else:

        sys.exit(1)
Beispiel #23
0
def main():
    args = process_arguments()

    # Setup logging
    if args.debug:
        configure_logging(logging.DEBUG)
    else:
        configure_logging(logging.INFO)

    current_region = args.region if args.region else get_region()

    # Load configuration from YAML file
    config = get_config(args.filename)

    if config.get("network_interfaces"):
        handle_network_interfaces(current_region, config)

        # The goal here is to be able to assign static IPs to instances
        # Within the Zalando AWS account setup we have a private subnet per
        # AZ. The idea is to create an ENI in each AZ in the private subnet where
        # you want a static IP. This means, your instance is going to have two
        # network interfaces on the same subnet, which causes some issues.
        #
        # The below code is based of the explaination at: https://goo.gl/2D8KrV
        # for handling two network interfaces in the same subnet.

        # Setting this to 1 Allows you to have multiple network interfaces on the same
        # subnet, and have the ARPs for each interface be answered based
        # on whether or not the kernel would route a packet from the
        # the ARP'd IP out that interface
        with open("/proc/sys/net/ipv4/conf/all/arp_filter", "w") as all_arp_filter:
            all_arp_filter.write("1")
        network_interfaces = []
        default_gateway = netifaces.gateways()['default'][netifaces.AF_INET][0]

        for device_index in range(0, len(config.get("network_interfaces")) + 1):
            network_interfaces.append("eth{}".format(device_index))

        # Run dhclient on all newly created interfaces to enable them to get IPs
        # Note, we do not run dhclient on eth0 as this may affect network connectivity
        # of the instance
        for network_interface in network_interfaces[1:]:
            subprocess.check_call(["dhclient", str(network_interface)])
        route_tables = []

        # Here we implement source-based routing, according to the serverfault post linked above
        for device_index in range(0, len(config.get("network_interfaces")) + 1):
            route_tables.append("{} eth{}".format(
                device_index + 1, device_index))

        with open("/etc/iproute2/rt_tables", "w") as rt_tables:
            rt_tables.write("\n".join(route_tables))

        for network_interface in network_interfaces:
            interface = netifaces.ifaddresses(network_interface)[
                netifaces.AF_INET][0]
            ip = interface['addr']
            subnet_cidr = str(IPAddress(interface["netmask"]).netmask_bits())
            subprocess.check_call(["ip", "route", "add", "default", "via", default_gateway, "dev",
                                   network_interface, "table", network_interface])
            subprocess.check_call(["ip", "route", "add", subnet_cidr, "dev", network_interface,
                                   "src", ip, "table", network_interface])
            subprocess.check_call(["ip", "rule", "add", "from", ip, "table",
                                   network_interface])
Beispiel #24
0
def main():
    """Confugure custom routing if necessary"""

    configure_logging()
    config = get_config()

    nat_gateways = config.get('nat_gateways')

    if not nat_gateways or not isinstance(
            nat_gateways, dict):  # nat gateways must be non empty dictionary
        sys.exit(0)

    METADATA_URL = 'http://169.254.169.254/latest/meta-data/'
    try:
        r = requests.get(METADATA_URL + 'placement/availability-zone')
        region = r.text.strip()[:-1]
        logging.info('Region=%s', region)

        r = requests.get(METADATA_URL + 'mac')
        mac = r.text.strip()

        r = requests.get(METADATA_URL + 'network/interfaces/macs/' + mac +
                         '/subnet-id')
        subnet = r.text
        if subnet not in nat_gateways:
            logging.warning(
                'Can not find subnet %s in the nat_gateways mapping', subnet)
            sys.exit(0)

        logging.info('Will use %s nat gateway for outgoing https traffic',
                     nat_gateways[subnet])
    except Exception:
        logging.exception('Failed to read metadata')
        sys.exit(1)

    RT_TABLES = '/etc/iproute2/rt_tables'

    try:
        with open(RT_TABLES, 'a') as f:
            f.write('\n150 https\n')
        logging.info('Created new routing table for https traffic')
    except Exception:
        logging.exception('Failed to write into %s', RT_TABLES)
        sys.exit(1)

    iptables = ['iptables', '-w', '-t', 'mangle']

    subprocess_call(iptables + [
        '-A', 'OUTPUT', '-p', 'tcp', '!', '-d', '172.16.0.0/12', '--dport',
        '443', '-j', 'MARK', '--set-mark', '443'
    ])

    subprocess_call(iptables + [
        '-A', 'OUTPUT', '-p', 'udp', '--dport', '123', '-j', 'MARK',
        '--set-mark', '443'
    ])

    subprocess_call(['ip', 'rule', 'add', 'fwmark', '443', 'lookup', 'https'])

    subprocess_call([
        'ip', 'route', 'add', 'default', 'via', nat_gateways[subnet], 'table',
        'https'
    ])

    # S3 is exceptional, it has it's own endpoint in VPC
    try:
        r = requests.get('https://ip-ranges.amazonaws.com/ip-ranges.json')
        ranges = [
            e['ip_prefix'] for e in r.json()['prefixes'] if
            e['service'] == 'S3' and e['region'] == region and 'ip_prefix' in e
        ]
    except Exception:
        logging.exception('Failed to load ip-ranges.json')

    # Don't mark outgoing traffic to S3
    for r in ranges:
        subprocess_call(iptables + ['-I', 'OUTPUT', '-d', r, '-j', 'ACCEPT'])
Beispiel #25
0
def update_configuration_from_template():
    ''' Update Jinja Template to create configuration file for Scalyr '''
    fluentd_destinations = dict(scalyr=False,
                                s3=False,
                                rsyslog=False,
                                scalyr_s3=False)
    config = get_config()
    logging_config = config.get('logging')
    application_id = config.get('application_id')
    application_version = config.get('application_version')
    stack = config.get('notify_cfn')['stack']
    source = config.get('source')
    image = config.get('source').split(':', 1)[0]
    instance_data = boto.utils.get_instance_identity()['document']
    aws_region = instance_data['region']
    aws_account = instance_data['accountId']
    hostname = boto.utils.get_instance_metadata()['local-hostname'].split(
        '.')[0]
    customlog = config.get('mount_custom_log')
    if config.get('rsyslog_aws_metadata'):
        scalyr_syslog_log_parser = 'systemLogMetadata'
    else:
        scalyr_syslog_log_parser = 'systemLog'
    scalyr_application_log_parser = logging_config.get(
        'scalyr_application_log_parser', 'slf4j')
    scalyr_custom_log_parser = logging_config.get('scalyr_custom_log_parser',
                                                  'slf4j')
    fluentd_log_destination = logging_config.get('log_destination', 'scalyr')
    fluentd_syslog_destination = logging_config.get('syslog_destination',
                                                    fluentd_log_destination)
    fluentd_applog_destination = logging_config.get('applog_destination',
                                                    fluentd_log_destination)
    fluentd_authlog_destination = logging_config.get('authlog_destination',
                                                     fluentd_log_destination)
    fluentd_customlog_destination = logging_config.get(
        'customlog_destination', fluentd_log_destination)
    fluentd_loglevel = logging_config.get('fluentd_loglevel', 'info')
    fluentd_s3_region = logging_config.get('s3_region', 'eu-central-1')
    fluentd_s3_bucket = logging_config.get('s3_bucket')
    fluentd_s3_timekey = logging_config.get('s3_timekey', '1m')
    fluentd_rsyslog_host = logging_config.get('rsyslog_host')
    fluentd_rsyslog_port = logging_config.get('rsyslog_port', '514')
    fluentd_rsyslog_protocol = logging_config.get('rsyslog_protocol', 'tcp')
    fluentd_rsyslog_severity = logging_config.get('rsyslog_severity', 'notice')
    fluentd_rsyslog_program = logging_config.get('rsyslog_program', 'fluentd')
    fluentd_rsyslog_hostname = logging_config.get('rsyslog_hostname', hostname)

    for destination in (fluentd_applog_destination,
                        fluentd_authlog_destination,
                        fluentd_customlog_destination,
                        fluentd_syslog_destination):
        fluentd_destinations[destination] = True

    # Get Scalyr key only if configured
    if fluentd_destinations.get('scalyr') or fluentd_destinations.get(
            'scalyr_s3'):
        scalyr_api_key = get_scalyr_api_key()

    env = Environment(loader=FileSystemLoader(TD_AGENT_TEMPLATE_PATH),
                      trim_blocks=True)
    template_data = env.get_template(TPL_NAME).render(
        scalyr_api_key=scalyr_api_key,
        application_id=application_id,
        application_version=application_version,
        stack=stack,
        source=source,
        image=image,
        aws_region=aws_region,
        aws_account=aws_account,
        customlog=customlog,
        scalyr_application_log_parser=scalyr_application_log_parser,
        scalyr_syslog_log_parser=scalyr_syslog_log_parser,
        scalyr_custom_log_parser=scalyr_custom_log_parser,
        fluentd_syslog_destination=fluentd_syslog_destination,
        fluentd_applog_destination=fluentd_applog_destination,
        fluentd_authlog_destination=fluentd_authlog_destination,
        fluentd_customlog_destination=fluentd_customlog_destination,
        fluentd_loglevel=fluentd_loglevel,
        fluentd_s3_region=fluentd_s3_region,
        fluentd_s3_bucket=fluentd_s3_bucket,
        fluentd_s3_timekey=fluentd_s3_timekey,
        fluentd_rsyslog_host=fluentd_rsyslog_host,
        fluentd_rsyslog_port=fluentd_rsyslog_port,
        fluentd_rsyslog_protocol=fluentd_rsyslog_protocol,
        fluentd_rsyslog_severity=fluentd_rsyslog_severity,
        fluentd_rsyslog_program=fluentd_rsyslog_program,
        fluentd_rsyslog_hostname=fluentd_rsyslog_hostname,
        fluentd_destinations=fluentd_destinations)

    try:
        with open(TD_AGENT_OUTPUT_PATH, 'w') as f:
            f.write(template_data)
    except Exception:
        logger.exception('Failed to write file td-agent.conf')
Beispiel #26
0
        fluentd_rsyslog_program=fluentd_rsyslog_program,
        fluentd_rsyslog_hostname=fluentd_rsyslog_hostname,
        fluentd_destinations=fluentd_destinations)

    try:
        with open(TD_AGENT_OUTPUT_PATH, 'w') as f:
            f.write(template_data)
    except Exception:
        logger.exception('Failed to write file td-agent.conf')
        raise SystemExit(1)


if __name__ == '__main__':
    hostname = boto.utils.get_instance_metadata()['local-hostname'].split(
        '.')[0]
    config = get_config()
    logging_config = config.get('logging')
    s3_default = False
    if logging_config:
        if not logging_config.get('fluentd_enabled'):
            logger.info('Fluentd disabled; skipping Fluentd initialization')
            raise SystemExit()
    if not logging_config:
        logger.info(
            'Found no logging section in senza.yaml; enable dafault logging to s3'
        )
        s3_default = True
        try:
            with open('/var/local/textfile_collector/fluentd_default_s3.prom',
                      'w') as file:
                file.write(
Beispiel #27
0
def update_configuration_from_template(s3_default):
    ''' Update Jinja Template to create configuration file for Scalyr '''
    fluentd_destinations = dict(scalyr=False,
                                s3=False,
                                rsyslog=False,
                                scalyr_s3=False)
    config = get_config()
    logging_config = config.get('logging', {})
    application_id = config.get('application_id')
    application_version = config.get('application_version')
    stack = config.get('notify_cfn', {}).get('stack')
    source = config.get('source')
    image = config.get('source').split(':', 1)[0]
    instance_data = boto.utils.get_instance_identity()['document']
    aws_region = instance_data['region']
    aws_account = instance_data['accountId']
    hostname = boto.utils.get_instance_metadata()['local-hostname'].split(
        '.')[0]
    customlog = config.get('mount_custom_log')
    if config.get('rsyslog_aws_metadata'):
        scalyr_syslog_log_parser = 'systemLogMetadata'
    else:
        scalyr_syslog_log_parser = 'systemLog'
    scalyr_application_log_parser = logging_config.get(
        'scalyr_application_log_parser', 'slf4j')
    scalyr_custom_log_parser = logging_config.get('scalyr_custom_log_parser',
                                                  'slf4j')
    fluentd_log_destination = logging_config.get('log_destination', 's3')
    fluentd_syslog_destination = logging_config.get('syslog_destination',
                                                    fluentd_log_destination)
    fluentd_applog_destination = logging_config.get('applog_destination',
                                                    fluentd_log_destination)
    fluentd_authlog_destination = logging_config.get('authlog_destination',
                                                     fluentd_log_destination)
    fluentd_customlog_destination = logging_config.get(
        'customlog_destination', fluentd_log_destination)
    fluentd_applog_filter_exclude = logging_config.get('applog_filter_exclude',
                                                       None)
    fluentd_customlog_filter_exclude = logging_config.get(
        'customlog_filter_exclude', None)
    fluentd_loglevel = logging_config.get('fluentd_loglevel', 'error')
    fluentd_s3_raw_log_format = logging_config.get('s3_raw_log_format', 'true')
    fluentd_s3_region = logging_config.get('s3_region', aws_region)
    fluentd_s3_bucket = logging_config.get(
        's3_bucket', 'zalando-logging-' + aws_account + '-' + aws_region)
    fluentd_s3_timekey = logging_config.get('s3_timekey', '5m')
    fluentd_s3_acl = logging_config.get('s3_acl', 'bucket-owner-full-control')
    fluentd_rsyslog_host = logging_config.get('rsyslog_host')
    fluentd_rsyslog_port = logging_config.get('rsyslog_port', '514')
    fluentd_rsyslog_protocol = logging_config.get('rsyslog_protocol', 'tcp')
    fluentd_rsyslog_severity = logging_config.get('rsyslog_severity', 'notice')
    fluentd_rsyslog_program = logging_config.get('rsyslog_program', 'fluentd')
    fluentd_rsyslog_hostname = logging_config.get('rsyslog_hostname', hostname)

    for destination in (fluentd_applog_destination,
                        fluentd_authlog_destination,
                        fluentd_customlog_destination,
                        fluentd_syslog_destination):
        fluentd_destinations[destination] = True

    # Get Scalyr key only if configured
    if fluentd_destinations.get('scalyr') or fluentd_destinations.get(
            'scalyr_s3'):
        scalyr_api_key = get_scalyr_api_key()
    else:
        scalyr_api_key = None

    if fluentd_destinations.get('s3') or fluentd_destinations.get('scalyr_s3'):
        try:
            with open('/etc/cron.d/s3-iam-check', 'w') as file:
                file.write('#!/bin/bash\n')
                file.write(
                    'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n'
                )
                file.write(
                    '*/5 * * * * root /opt/taupage/bin/s3-iam-check.py test {!s}\n'
                    .format(fluentd_s3_bucket))
        except Exception:
            logger.exception('Failed to write file /etc/cron.d/s3-iam-check')
            raise SystemExit(1)

    env = Environment(loader=FileSystemLoader(TD_AGENT_TEMPLATE_PATH),
                      trim_blocks=True)
    template_data = env.get_template(TPL_NAME).render(
        scalyr_api_key=scalyr_api_key,
        application_id=application_id,
        application_version=application_version,
        stack=stack,
        source=source,
        image=image,
        aws_region=aws_region,
        aws_account=aws_account,
        customlog=customlog,
        scalyr_application_log_parser=scalyr_application_log_parser,
        scalyr_syslog_log_parser=scalyr_syslog_log_parser,
        scalyr_custom_log_parser=scalyr_custom_log_parser,
        fluentd_syslog_destination=fluentd_syslog_destination,
        fluentd_applog_destination=fluentd_applog_destination,
        fluentd_applog_filter_exclude=fluentd_applog_filter_exclude,
        fluentd_authlog_destination=fluentd_authlog_destination,
        fluentd_customlog_destination=fluentd_customlog_destination,
        fluentd_customlog_filter_exclude=fluentd_customlog_filter_exclude,
        fluentd_loglevel=fluentd_loglevel,
        fluentd_s3_raw_log_format=fluentd_s3_raw_log_format,
        fluentd_s3_region=fluentd_s3_region,
        fluentd_s3_bucket=fluentd_s3_bucket,
        fluentd_s3_timekey=fluentd_s3_timekey,
        fluentd_s3_acl=fluentd_s3_acl,
        fluentd_rsyslog_host=fluentd_rsyslog_host,
        fluentd_rsyslog_port=fluentd_rsyslog_port,
        fluentd_rsyslog_protocol=fluentd_rsyslog_protocol,
        fluentd_rsyslog_severity=fluentd_rsyslog_severity,
        fluentd_rsyslog_program=fluentd_rsyslog_program,
        fluentd_rsyslog_hostname=fluentd_rsyslog_hostname,
        fluentd_destinations=fluentd_destinations)

    try:
        with open(TD_AGENT_OUTPUT_PATH, 'w') as f:
            f.write(template_data)
    except Exception:
        logger.exception('Failed to write file td-agent.conf')
        raise SystemExit(1)
def main():
    """Confugure custom routing if necessary"""

    configure_logging()
    config = get_config()

    nat_gateways = config.get('nat_gateways')

    if not nat_gateways or not isinstance(nat_gateways, dict):  # nat gateways must be non empty dictionary
        sys.exit(0)

    METADATA_URL = 'http://169.254.169.254/latest/meta-data/'
    try:
        r = requests.get(METADATA_URL + 'placement/availability-zone')
        region = r.text.strip()[:-1]
        logging.info('Region=%s', region)

        r = requests.get(METADATA_URL + 'mac')
        mac = r.text.strip()

        r = requests.get(METADATA_URL + 'network/interfaces/macs/' + mac + '/subnet-id')
        subnet = r.text
        if subnet not in nat_gateways:
            logging.warning('Can not find subnet %s in the nat_gateways mapping', subnet)
            sys.exit(0)

        logging.info('Will use %s nat gateway for outgoing https traffic', nat_gateways[subnet])
    except Exception:
        logging.exception('Failed to read metadata')
        sys.exit(1)

    RT_TABLES = '/etc/iproute2/rt_tables'

    try:
        with open(RT_TABLES, 'a') as f:
            f.write('\n150 https\n')
        logging.info('Created new routing table for https traffic')
    except Exception:
        logging.exception('Failed to write into %s', RT_TABLES)
        sys.exit(1)

    iptables = ['iptables', '-w', '-t', 'mangle']

    subprocess_call(iptables + ['-A', 'OUTPUT', '-p', 'tcp', '!', '-d', '172.16.0.0/12',
                                '--dport', '443', '-j', 'MARK', '--set-mark', '443'])

    subprocess_call(iptables + ['-A', 'OUTPUT', '-p', 'udp', '--dport', '123', '-j', 'MARK', '--set-mark', '443'])

    subprocess_call(['ip', 'rule', 'add', 'fwmark', '443', 'lookup', 'https'])

    subprocess_call(['ip', 'route', 'add', 'default', 'via', nat_gateways[subnet], 'table', 'https'])

    # S3 is exceptional, it has it's own endpoint in VPC
    try:
        r = requests.get('https://ip-ranges.amazonaws.com/ip-ranges.json')
        ranges = [e['ip_prefix'] for e in r.json()['prefixes']
                  if e['service'] == 'S3' and e['region'] == region and 'ip_prefix' in e]
    except Exception:
        logging.exception('Failed to load ip-ranges.json')

    # Don't mark outgoing traffic to S3
    for r in ranges:
        subprocess_call(iptables + ['-I', 'OUTPUT', '-d', r, '-j', 'ACCEPT'])
import sys

import boto.utils
import boto3
import requests
import yaml
from taupage import get_config

FAKE_CI_ACCOUNT_KEY = "foo1234"

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

scalyr_agent_config_file = '/etc/scalyr-agent-2/agent.json'

main_config = get_config()
logging_config = main_config.get('logging', {})

mount_custom_log = main_config.get('mount_custom_log')
rsyslog_aws_metadata = main_config.get('rsyslog_aws_metadata')

application_log_path = '/var/log/application.log'
custom_log_path = '/var/log-custom/*.log'
auth_log_path = '/var/log/auth.log'
sys_log_path = '/var/log/syslog'

jwt_redaction = [{
    'match_expression':
    'eyJ[a-zA-Z0-9/+_=-]{5,}\\.eyJ[a-zA-Z0-9/+_=-]{5,}\\.[a-zA-Z0-9/+_=-]{5,}',
    'replacement': '+++JWT_TOKEN_REDACTED+++'
}]