Esempio n. 1
0
def main():
    if options.output=='syslog':
        logger.addHandler(SysLogHandler(address=(options.sysloghostname,options.syslogport)))
    else:
        sh=logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    #logger.debug(options)
    try:
        es=pyes.ES((list('{0}'.format(s) for s in options.esservers)))
        boto.connect_cloudtrail(aws_access_key_id=options.aws_access_key_id,aws_secret_access_key=options.aws_secret_access_key)
        #capture the time we start running so next time we catch any files created while we run.
        lastrun=toUTC(datetime.now()).isoformat()
        #in case we don't archive files..only look at today and yesterday's files.
        yesterday=date.strftime(datetime.utcnow()-timedelta(days=1),'%Y/%m/%d')
        today = date.strftime(datetime.utcnow(),'%Y/%m/%d')
        for region in boto.cloudtrail.regions():
            logger.debug('connecting to AWS region {0}'.format(region.name))
            ct=boto.cloudtrail.connect_to_region(region.name,aws_access_key_id=options.aws_access_key_id,aws_secret_access_key=options.aws_secret_access_key)
            trails=ct.describe_trails()['trailList']

            for trail in trails:
                s3 = boto.connect_s3(aws_access_key_id=options.aws_access_key_id,aws_secret_access_key=options.aws_secret_access_key)
                ctbucket=s3.get_bucket(trail['S3BucketName'])
                #ctbucket.get_all_keys()
                filelist=list()
                for bfile in ctbucket.list():

                    if 'CloudTrail' in bfile.key and 'json' in bfile.key:
                        if today in bfile.key or yesterday in bfile.key:
                            filelist.append(bfile.key)
                        else:
                            if options.purge:   #delete old files so we don't try to keep reading them.
                                s3file=ctbucket.get_key(afile)
                                s3file.delete()
                for afile in filelist:
                    s3file=ctbucket.get_key(afile)
                    logger.debug('{0} {1}'.format(afile,s3file.last_modified))

                    if toUTC(s3file.last_modified)>options.lastrun:
                        compressedData=s3file.read()
                        databuf=StringIO(compressedData)
                        f=gzip.GzipFile(fileobj=databuf)
                        jlog=json.loads(f.read())
                        try:
                            for r in jlog['Records']:
                                r['utctimestamp']=toUTC(r['eventTime']).isoformat()
                                jbody=json.dumps(r)
                                res=es.index(index='events',doc_type='cloudtrail',doc=jbody)
                                #logger.debug(res)
                        except Exception as e:
                            logger.error('Error handling log record {0} {1}'.format(r, e))
                            continue
            setConfig('lastrun',lastrun,options.configfile)
    except boto.exception.NoAuthHandlerFound:
        logger.error("No auth handler found, check your credentials")
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r"%e)
Esempio n. 2
0
def CTRAILConn(reg, profile = 'defailt'):
	ctrail = ''
	endpt = 'cloudtrail.' + reg + '.amazonaws.com'	
	reg = boto.regioninfo.RegionInfo(name=reg,endpoint=endpt)
	try:
		ctrail = boto.connect_cloudtrail(profile_name=profile, region = reg)
	except Exception, e:
                boto.log.error("Cannot validate provided AWS credentials: %s" % e)
Esempio n. 3
0
    def test_cloudtrail(self):
        cloudtrail = boto.connect_cloudtrail()

        # Don't delete existing customer data!
        res = cloudtrail.describe_trails()
        if len(res['trailList']):
            self.fail('A trail already exists on this account!')

        # Who am I?
        iam = boto.connect_iam()
        response = iam.get_user()
        account_id = response['get_user_response']['get_user_result'] \
                             ['user']['user_id']

        # Setup a new bucket
        s3 = boto.connect_s3()
        bucket_name = 'cloudtrail-integ-{0}'.format(time())
        policy = DEFAULT_S3_POLICY.replace('<BucketName>', bucket_name)\
                                  .replace('<CustomerAccountID>', account_id)\
                                  .replace('<Prefix>/', '')
        b = s3.create_bucket(bucket_name)
        b.set_policy(policy)

        # Setup CloudTrail
        cloudtrail.create_trail(trail={
            'Name': 'test',
            'S3BucketName': bucket_name
        })

        cloudtrail.update_trail(trail={
            'Name': 'test',
            'IncludeGlobalServiceEvents': False
        })

        trails = cloudtrail.describe_trails()

        self.assertEqual('test', trails['trailList'][0]['Name'])
        self.assertFalse(trails['trailList'][0]['IncludeGlobalServiceEvents'])

        cloudtrail.start_logging(name='test')

        status = cloudtrail.get_trail_status(name='test')
        self.assertTrue(status['IsLogging'])

        cloudtrail.stop_logging(name='test')

        status = cloudtrail.get_trail_status(name='test')
        self.assertFalse(status['IsLogging'])

        # Clean up
        cloudtrail.delete_trail(name='test')

        for key in b.list():
            key.delete()

        s3.delete_bucket(bucket_name)
Esempio n. 4
0
    def test_cloudtrail(self):
        cloudtrail = boto.connect_cloudtrail()

        # Don't delete existing customer data!
        res = cloudtrail.describe_trails()
        if len(res['trailList']):
            self.fail('A trail already exists on this account!')

        # Who am I?
        iam = boto.connect_iam()
        response = iam.get_user()
        account_id = response['get_user_response']['get_user_result'] \
                             ['user']['user_id']

        # Setup a new bucket
        s3 = boto.connect_s3()
        bucket_name = 'cloudtrail-integ-{0}'.format(time())
        policy = DEFAULT_S3_POLICY.replace('<BucketName>', bucket_name)\
                                  .replace('<CustomerAccountID>', account_id)\
                                  .replace('<Prefix>/', '')
        b = s3.create_bucket(bucket_name)
        b.set_policy(policy)

        # Setup CloudTrail
        cloudtrail.create_trail(trail={'Name': 'test', 'S3BucketName': bucket_name})

        cloudtrail.update_trail(trail={'Name': 'test', 'IncludeGlobalServiceEvents': False})

        trails = cloudtrail.describe_trails()

        self.assertEqual('test', trails['trailList'][0]['Name'])
        self.assertFalse(trails['trailList'][0]['IncludeGlobalServiceEvents'])

        cloudtrail.start_logging(name='test')

        status = cloudtrail.get_trail_status(name='test')
        self.assertTrue(status['IsLogging'])

        cloudtrail.stop_logging(name='test')

        status = cloudtrail.get_trail_status(name='test')
        self.assertFalse(status['IsLogging'])

        # Clean up
        cloudtrail.delete_trail(name='test')

        for key in b.list():
            key.delete()

        s3.delete_bucket(bucket_name)
Esempio n. 5
0
    def scached_closure(func, *args, **kw):
        key = md5(':'.join([func.__name__, str(args), str(kw)])).hexdigest()
        try:
            d = shelve.open(cache_file)
            changes = False
        except:
            print("Cache appears to be corrupted. Re-genrating.")
            expire_cache(cache_file=cache_file)
            changes = True
            d = shelve.open(cache_file)

        # Expire old data if we have to
        try:
            if key in d:
                if d[key].get(
                        'expires_on',
                        datetime.datetime.now()) < datetime.datetime.now():
                    del d[key]
                eprint("Cache set to expire on %s" % d[key]['expires_on'])
                eprint("Checking for changes...")
                dt_earlier = d[key]['expires_on'] - expiry
                earlier = time.mktime(dt_earlier.timetuple())
                stime = datetime.datetime.now()
                now = time.mktime(stime.timetuple())
                ct = boto.connect_cloudtrail()
                # print "earlier: %s - %s" % (earlier, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(earlier)))
                # print "now: %s - %s" % (now, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(now)))
                events = ct.lookup_events(start_time=earlier,
                                          end_time=now,
                                          lookup_attributes=[{
                                              'AttributeKey':
                                              'EventName',
                                              'AttributeValue':
                                              'RunInstances'
                                          }])['Events']
                for ev in events:
                    ts = time.strftime('%Y-%m-%d %H:%M:%S',
                                       time.localtime(ev['EventTime']))
                    if 'ami' in ev['Resources'][0]['ResourceName']:
                        eprint(
                            " !! Change detected: Instance: %s updated at %s" %
                            (ev['Resources'][0]['ResourceName'], ts))
                        changes = True
                if not changes:
                    eprint("No changes detected. Using cache.")
            # Get new data if we have to
            if key not in d or changes:
                eprint("Please wait while I rebuild the cache... ")
                data = func(*args, **kw)
                d[key] = {
                    'expires_on': datetime.datetime.now() + expiry,
                    'data': data,
                }

            result = d[key].get('data', '')
            print("Number of items in cache: {}".format(len(result)))
            if not result:
                result = scached(cache_file, expiry)
            d.close()
            return result
        except Exception as e:
            eprint("Debug: Problem with cache: {}".format(str(e)))
            expire_cache(cache_file=cache_file)
            scached_closure(func, *args, **kw)
Esempio n. 6
0
def main():
    if options.output == 'syslog':
        logger.addHandler(
            SysLogHandler(address=(options.sysloghostname,
                                   options.syslogport)))
    else:
        sh = logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    #logger.debug(options)
    try:
        es = pyes.ES((list('{0}'.format(s) for s in options.esservers)))
        boto.connect_cloudtrail(
            aws_access_key_id=options.aws_access_key_id,
            aws_secret_access_key=options.aws_secret_access_key)
        #capture the time we start running so next time we catch any files created while we run.
        lastrun = toUTC(datetime.now()).isoformat()
        #in case we don't archive files..only look at today and yesterday's files.
        yesterday = date.strftime(datetime.utcnow() - timedelta(days=1),
                                  '%Y/%m/%d')
        today = date.strftime(datetime.utcnow(), '%Y/%m/%d')
        for region in boto.cloudtrail.regions():
            logger.debug('connecting to AWS region {0}'.format(region.name))
            ct = boto.cloudtrail.connect_to_region(
                region.name,
                aws_access_key_id=options.aws_access_key_id,
                aws_secret_access_key=options.aws_secret_access_key)
            trails = ct.describe_trails()['trailList']

            for trail in trails:
                s3 = boto.connect_s3(
                    aws_access_key_id=options.aws_access_key_id,
                    aws_secret_access_key=options.aws_secret_access_key)
                ctbucket = s3.get_bucket(trail['S3BucketName'])
                #ctbucket.get_all_keys()
                filelist = list()
                for bfile in ctbucket.list():

                    if 'CloudTrail' in bfile.key and 'json' in bfile.key:
                        if today in bfile.key or yesterday in bfile.key:
                            filelist.append(bfile.key)
                        else:
                            if options.purge:  #delete old files so we don't try to keep reading them.
                                s3file = ctbucket.get_key(afile)
                                s3file.delete()
                for afile in filelist:
                    s3file = ctbucket.get_key(afile)
                    logger.debug('{0} {1}'.format(afile, s3file.last_modified))

                    if toUTC(s3file.last_modified) > options.lastrun:
                        compressedData = s3file.read()
                        databuf = StringIO(compressedData)
                        f = gzip.GzipFile(fileobj=databuf)
                        jlog = json.loads(f.read())
                        try:
                            for r in jlog['Records']:
                                r['utctimestamp'] = toUTC(
                                    r['eventTime']).isoformat()
                                jbody = json.dumps(r)
                                res = es.index(index='events',
                                               doc_type='cloudtrail',
                                               doc=jbody)
                                #logger.debug(res)
                        except Exception as e:
                            logger.error(
                                'Error handling log record {0} {1}'.format(
                                    r, e))
                            continue
            setConfig('lastrun', lastrun, options.configfile)
    except boto.exception.NoAuthHandlerFound:
        logger.error("No auth handler found, check your credentials")
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)