def upload_to_s3(filename, log_date):

    s3 = S3(setting.get('elb_log_s3', 'bucket'))
    prefix = os.path.join(setting.get('elb_log_s3', 'archived_log_key_prefix'))
    key_name = os.path.join(prefix, '%s.zip' % log_date)
    s3.upload(key_name, filename)
    logger.info('Upload %s', key_name)
def delete_archived_log(target_date):

    s3 = S3(setting.get('elb_log_s3', 'bucket'))
    prefix = os.path.join(setting.get('elb_log_s3', 'archived_log_key_prefix'))
    key_name = os.path.join(prefix, '%s.zip' % target_date)
    s3.bucket.delete_key(key_name)
    logger.info('Delete object: %s', key_name)
Example #3
0
def upload_to_s3(filename, log_date):

    s3 = S3(setting.get('elb_log_s3', 'bucket'))
    prefix = os.path.join(setting.get('elb_log_s3', 'archived_log_key_prefix'))
    key_name = os.path.join(prefix, '%s.zip' % log_date)
    s3.upload(key_name, filename)
    logger.info('Upload %s', key_name)
Example #4
0
def delete_archived_log(target_date):

    s3 = S3(setting.get('elb_log_s3', 'bucket'))
    prefix = os.path.join(setting.get('elb_log_s3', 'archived_log_key_prefix'))
    key_name = os.path.join(prefix, '%s.zip' % target_date)
    s3.delete(key_name)
    logger.info('Delete object: %s', key_name)
Example #5
0
    def __init__(self, bucket_name):

        self.conn = boto.s3.connect_to_region(
            setting.get('elb_log_s3', 'region'),
            aws_access_key_id=setting.get('aws', 'access_key'),
            aws_secret_access_key=setting.get('aws', 'secret_key')
        )
        self.bucket = self.conn.get_bucket(bucket_name)
Example #6
0
def get_queue():

    conn = boto.sqs.connect_to_region(
        setting.get('elb_log_sqs', 'region'),
        aws_access_key_id=setting.get('aws', 'access_key'),
        aws_secret_access_key=setting.get('aws', 'secret_key')
    )
    return conn.get_queue(setting.get('elb_log_sqs', 'queue_name'))
def download_logs_of_a_date(log_date, output_folder):

    log_date = datetime.strptime(log_date, '%Y-%m-%d')
    key_prefix = setting.get('elb_log_s3', 'log_key_prefix')
    key_prefix = ''.join([key_prefix, log_date.strftime('%Y/%m/%d')])
    s3 = S3(setting.get('elb_log_s3', 'bucket'))
    key_names = [k.name for k in s3.bucket.list(key_prefix)]
    pool = GreenPool(10)
    download_fn = lambda key_name: download_log(s3, key_name, output_folder)

    list(pool.imap(download_fn, key_names))
    logger.info('Download all logs on %s', log_date.isoformat())
    return key_names
Example #8
0
    def test_call(self):

        self.actor.respond(self.api)
        self.actor.slack.chat.post_message.assert_called_with(
            setting.get('slack', 'channel'),
            'Server error [users#create]',
            username='******',
            attachments=[{
                'fallback': '2015-01-01 00:00:00 500 500 POST /users',
                'color': 'danger',
                'text': 'this is message'
            }],
            icon_url=setting.get('slack', 'icon'))
Example #9
0
def download_logs_of_a_date(log_date, output_folder):

    log_date = datetime.strptime(log_date, '%Y-%m-%d')
    key_prefix = setting.get('elb_log_s3', 'log_key_prefix')
    key_prefix = ''.join([key_prefix, log_date.strftime('%Y/%m/%d')])
    s3 = S3(setting.get('elb_log_s3', 'bucket'))
    key_names = []

    with futures.ThreadPoolExecutor(max_workers=10) as executor:
        for key_name in s3.list(key_prefix):
            executor.submit(download_log, s3, key_name, output_folder)
            key_names.append(key_name)

    logger.info('Download all logs on %s', log_date.isoformat())
    return key_names
def delete_elasticsearch_index(target_date):

    log_date = datetime.strptime(target_date, '%Y-%m-%d')
    index_name = 'logstash-%s' % log_date.strftime('%Y.%m.%d')
    es = Elasticsearch(setting.get('elasticsearch', 'url'))
    es.indices.delete(index=index_name)
    logger.info('Delete elasticsearch index: %s', index_name)
Example #11
0
def delete_elasticsearch_index(target_date):

    log_date = datetime.strptime(target_date, '%Y-%m-%d')
    index_name = 'logstash-%s' % log_date.strftime('%Y.%m.%d')
    es = Elasticsearch(setting.get('elasticsearch', 'url'))
    es.indices.delete(index=index_name)
    logger.info('Delete elasticsearch index: %s', index_name)
    def test_call(self):

        self.actor.respond(self.api)
        self.actor.slack.chat.post_message.assert_called_with(
            setting.get('slack', 'channel'),
            'Server error [users#create]',
            username='******',
            attachments=[
                {
                    'fallback': '2015-01-01 00:00:00 500 500 POST /users',
                    'color': 'danger',
                    'text': 'this is message'
                }
            ],
            icon_url=setting.get('slack', 'icon')
        )
Example #13
0
    def respond(self, api_record):

        ctrl_action = api_record.get('rails', {}).get('controller#action', '')

        for attempt in xrange(5):
            try:
                self.slack.chat.post_message(
                    setting.get('slack', 'channel'),
                    'Server error [%(ctrl_action)s]' % locals(),
                    username='******',
                    attachments=self._make_attachments(api_record),
                    icon_url=setting.get('slack', 'icon'))
                break

            except Exception as e:
                logger.exception(e)
                logger.error('Slack API failed, try again')
Example #14
0
def delete_logs(key_names):

    s3 = S3(setting.get('elb_log_s3', 'bucket'))

    with futures.ThreadPoolExecutor(max_workers=10) as executor:
        for key_name in key_names:
            executor.submit(s3.delete, key_name)

    logger.info('Delete archived logs')
    def respond(self, api_record):

        ctrl_action = api_record.get('rails', {}).get('controller#action', '')

        for attempt in xrange(5):
            try:
                self.slack.chat.post_message(
                    setting.get('slack', 'channel'),
                    'Server error [%(ctrl_action)s]' % locals(),
                    username='******',
                    attachments=self._make_attachments(api_record),
                    icon_url=setting.get('slack', 'icon')
                )
                break

            except Exception as e:
                logger.exception(e)
                logger.error('Slack API failed, try again')
def query_server_error_records(begin_time, end_time):

    es = Elasticsearch(setting.get('elasticsearch', 'url'))
    indices = []
    day = timedelta(days=1)
    d = begin_time.date()

    while d <= end_time.date():
        indices.append('logstash-' + d.strftime('%Y.%m.%d'))
        d += day

    begin_at = timegm(begin_time.timetuple()) * 1000
    end_at = timegm(end_time.timetuple()) * 1000
    index = indices = ','.join(indices)
    offset = 0
    results = []
    body = {
        'filter': {
            'bool': {
                'must': [
                    {'range': {'timestamp': {'gte': begin_at, 'lt': end_at}}}
                ],
                'should': [
                    {'range': {'backend_status_code': {'gte': 500}}},
                    {'range': {'elb_status_code': {'gte': 500}}}
                ]
            }
        }
    }

    while True:
        body['from'] = offset
        result = es.search(
            index=index,
            body=body,
            sort='timestamp:asc',
            size=100,
        )
        logger.debug(result)
        hits = result.get('hits', {}).get('hits', [])

        if len(hits) == 0:
            break

        results.extend(map(lambda h: h['_source'], hits))
        offset += len(hits)

    return results
Example #17
0
def query_server_error_records(begin_time, end_time):

    es_api = Elasticsearch(setting.get('elasticsearch', 'url'))
    indices = [
        index_name(begin_time.date() + timedelta(n))
        for n in xrange((end_time.date() - begin_time.date()).days)
    ]
    begin_at = timegm(begin_time.timetuple()) * 1000
    end_at = timegm(end_time.timetuple()) * 1000
    index = ','.join(indices) if indices else index_name(begin_time.date())
    offset = 0
    results = []
    body = {
        'query': {
            'bool': {
                'filter': [
                    RangeClause('timestamp', begin_at, end_at).get_clause(),
                    RangeClause('elb_status_code', 500).get_clause(),
                    # Filter out /robots.txt requests
                    ExistClause('rails.controller#action').get_clause(),
                    # Filter out https://52.197.62.134 requests
                    TermClause('domain_name', 'api.thekono.com').get_clause()
                ]
            }
        }
    }

    while True:
        body['from'] = offset
        args = dict(index=index, body=body, sort='timestamp:asc', size=100)
        result = es_api.search(**args)
        logger.debug(result)
        hits = result.get('hits', {}).get('hits', [])

        if not hits:
            break

        results.extend([h['_source'] for h in hits])
        offset += len(hits)

    return results
def delete_logs(key_names):

    s3 = S3(setting.get('elb_log_s3', 'bucket'))
    s3.bucket.delete_keys(key_names, quiet=True)
    logger.info('Delete archived logs')
    def init_slack(self):

        self.slack = Slacker(setting.get('slack', 'token'))
Example #20
0
#!/usr/bin/env python

# standard library imports
import logging
from logging.handlers import TimedRotatingFileHandler

# third party related imports

# local library imports
from elb_log_analyzer.config import setting


__all__ = ['logger']
logging_level = getattr(logging, setting.get('logger', 'level'))

handler = TimedRotatingFileHandler(
    filename=setting.get('logger', 'filename'),
    when='midnight',
    backupCount=30
)
handler.setLevel(logging_level)
handler.setFormatter(logging.Formatter(setting.get('logger', 'format')))

logger = logging.getLogger('elb_log_analyzer')
logger.setLevel(logging_level)
logger.addHandler(handler)
Example #21
0
    def init_slack(self):

        self.slack = Slacker(setting.get('slack', 'token'))
    def __init__(self):

        self.logstash_script = setting.get('logstash', 'script')
        self.route_spec_path = setting.get('logstash', 'route_spec')
        cwd = os.path.abspath(os.path.dirname(__file__))
        self.tmpl_path = os.path.join(cwd, 'logstash.conf.jinja2')
Example #23
0
    def get_channel(self):

        return setting.get('slack', 'channel')
Example #24
0
    def get_icon_url(self):

        return setting.get('slack', 'icon')
Example #25
0
    def __init__(self):

        self.slack = Slacker(setting.get('slack', 'token'))
    def get_es(self):

        return Elasticsearch(setting.get('elasticsearch', 'url'))
Example #27
0
    def __init__(self, bucket_name):

        region = setting.get('elb_log_s3', 'region')
        self.client = boto3.client('s3', region_name=region)
        self.bucket = bucket_name
    def get_icon_url(self):

        return setting.get('slack', 'icon')
    def get_channel(self):

        return setting.get('slack', 'channel')
    def __init__(self):

        self.slack = Slacker(setting.get('slack', 'token'))
Example #31
0
    def __init__(self):

        self.logstash_script = setting.get('logstash', 'script')
        self.route_spec_path = setting.get('logstash', 'route_spec')
        cwd = os.path.abspath(os.path.dirname(__file__))
        self.tmpl_path = os.path.join(cwd, 'logstash.conf.jinja2')