Exemplo n.º 1
0
def main():
    if options.output=='syslog':
        logger.addHandler(SysLogHandler(address=(options.sysloghostname,options.syslogport)))
    else:
        sh=logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    #logger.debug(options)
    try:
        es=pyes.ES((list('{0}'.format(s) for s in options.esservers)))
        boto.connect_cloudtrail(aws_access_key_id=options.aws_access_key_id,aws_secret_access_key=options.aws_secret_access_key)
        #capture the time we start running so next time we catch any files created while we run.
        lastrun=toUTC(datetime.now()).isoformat()
        #in case we don't archive files..only look at today and yesterday's files.
        yesterday=date.strftime(datetime.utcnow()-timedelta(days=1),'%Y/%m/%d')
        today = date.strftime(datetime.utcnow(),'%Y/%m/%d')
        for region in boto.cloudtrail.regions():
            logger.debug('connecting to AWS region {0}'.format(region.name))
            ct=boto.cloudtrail.connect_to_region(region.name,aws_access_key_id=options.aws_access_key_id,aws_secret_access_key=options.aws_secret_access_key)
            trails=ct.describe_trails()['trailList']

            for trail in trails:
                s3 = boto.connect_s3(aws_access_key_id=options.aws_access_key_id,aws_secret_access_key=options.aws_secret_access_key)
                ctbucket=s3.get_bucket(trail['S3BucketName'])
                #ctbucket.get_all_keys()
                filelist=list()
                for bfile in ctbucket.list():

                    if 'CloudTrail' in bfile.key and 'json' in bfile.key:
                        if today in bfile.key or yesterday in bfile.key:
                            filelist.append(bfile.key)
                        else:
                            if options.purge:   #delete old files so we don't try to keep reading them.
                                s3file=ctbucket.get_key(afile)
                                s3file.delete()
                for afile in filelist:
                    s3file=ctbucket.get_key(afile)
                    logger.debug('{0} {1}'.format(afile,s3file.last_modified))

                    if toUTC(s3file.last_modified)>options.lastrun:
                        compressedData=s3file.read()
                        databuf=StringIO(compressedData)
                        f=gzip.GzipFile(fileobj=databuf)
                        jlog=json.loads(f.read())
                        try:
                            for r in jlog['Records']:
                                r['utctimestamp']=toUTC(r['eventTime']).isoformat()
                                jbody=json.dumps(r)
                                res=es.index(index='events',doc_type='cloudtrail',doc=jbody)
                                #logger.debug(res)
                        except Exception as e:
                            logger.error('Error handling log record {0} {1}'.format(r, e))
                            continue
            setConfig('lastrun',lastrun,options.configfile)
    except boto.exception.NoAuthHandlerFound:
        logger.error("No auth handler found, check your credentials")
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r"%e)
Exemplo n.º 2
0
def main():
    if options.output=='syslog':
        logger.addHandler(SysLogHandler(address=(options.sysloghostname,options.syslogport)))
    else:
        sh=logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    logger.debug(options)
    try:
        es=pyes.ES((list('{0}'.format(s) for s in options.esservers)))
        #capture the time we start running so next time we catch any files created while we run.
        lastrun=str(options.lastrun.isoformat())
        today=datetime.utcnow().isoformat()+'+00:00'
        # set the max num of items to 50k. At 600kB per item, that's already ~30MB of json body.
        url = options.mighost+'/api/v1/search?type=command&status=success&threatfamily=compliance&report=complianceitems&limit=50000&before='+today+'&after='+lastrun
        url = url.replace('+00:00', 'Z')

        # Prepare the request, make an authorization token using the local keyring
        token = makeToken(options.gpghome, options.keyid)
        r = requests.get(url,
            headers={'X-PGPAUTHORIZATION': token},
            timeout=240, # timeout at 4 minutes. those are big requests.
            verify=True)
        if r.status_code == 200:
            migjson=r.json()
            logger.debug(url)
            cicnt=0
            for items in migjson['collection']['items']:
                for dataitem in items['data']:
                    if dataitem['name'] == 'compliance item':
                        cicnt += 1
                        complianceitem = dataitem['value']
                        # historical data - index the new logs
                        res=es.index(index='complianceitems',doc_type='history',doc=complianceitem)
                        # last_known_state data - update the logs
                        # _id = md5('complianceitems'+check.ref+check.test.value+target)
                        docid=hashlib.md5('complianceitems'+complianceitem['check']['ref']+complianceitem['check']['test']['value']+complianceitem['target']).hexdigest()
                        res=es.index(index='complianceitems',id=docid,doc_type='last_known_state',doc=complianceitem)
            if cicnt == 0:
                logger.debug("No compliance item available, terminating")
            setConfig('lastrun',today,options.configfile)
        elif r.status_code == 500:
            # api returns a 500 with an error body on failures
            migjson=r.json()
            raise Exception("API returned HTTP code %s and error '%s:%s'" %
                                (r.status_code,
                                migjson['collection']['error']['code'],
                                migjson['collection']['error']['message'])
                            )
        else:
            # another type of failure that's unlikely to have an error body
            raise Exception("Failed with HTTP code %s" % r.status_code)
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r"%e)
Exemplo n.º 3
0
def main():
    if options.output == "syslog":
        logger.addHandler(SysLogHandler(address=(options.sysloghostname, options.syslogport)))
    else:
        sh = logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug("started")
    # logger.debug(options)
    try:
        es = pyes.ES((list("{0}".format(s) for s in options.esservers)))
        s = requests.Session()
        s.headers.update({"Accept": "application/json"})
        s.headers.update({"Content-type": "application/json"})
        s.headers.update({"Authorization": "SSWS {0}".format(options.apikey)})

        # capture the time we start running so next time we catch any events created while we run.
        lastrun = toUTC(datetime.now()).isoformat()
        # in case we don't archive files..only look at today and yesterday's files.
        yesterday = date.strftime(datetime.utcnow() - timedelta(days=1), "%Y/%m/%d")
        today = date.strftime(datetime.utcnow(), "%Y/%m/%d")

        r = s.get(
            "https://{0}/api/v1/events?startDate={1}&limit={2}".format(
                options.oktadomain, toUTC(options.lastrun).strftime("%Y-%m-%dT%H:%M:%S.000Z"), options.recordlimit
            )
        )

        if r.status_code == 200:
            oktaevents = json.loads(r.text)
            for event in oktaevents:
                if "published" in event.keys():
                    if toUTC(event["published"]) > options.lastrun:
                        try:
                            mozdefEvent = dict()
                            mozdefEvent["utctimestamp"] = toUTC(event["published"]).isoformat()
                            mozdefEvent["category"] = "okta"
                            mozdefEvent["tags"] = ["okta"]
                            if "action" in event.keys() and "message" in event["action"].keys():
                                mozdefEvent["summary"] = event["action"]["message"]
                            mozdefEvent["details"] = event
                            jbody = json.dumps(mozdefEvent)
                            res = es.index(index="events", doc_type="okta", doc=jbody)
                            logger.debug(res)
                        except Exception as e:
                            logger.error("Error handling log record {0} {1}".format(r, e))
                            continue
                else:
                    logger.error("Okta event does not contain published date: {0}".format(event))
            setConfig("lastrun", lastrun, options.configfile)
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
Exemplo n.º 4
0
def discover():
    es = pyes.ES(server=(list('{0}'.format(s) for s in options.esservers)))
    indicesManager = pyes.managers.Indices(es)
    indices = indicesManager.get_indices()
    config_indices = []
    config_dobackup = []
    config_rotation = []
    config_pruning = []
    for index in indices.keys():
        index_template = index
        freq = 'none'
        pruning = '0'
        if re.search(r'-[0-9]{8}', index):
            freq = 'daily'
            pruning = '20'
            index_template = index[:-9]
        elif re.search(r'-[0-9]{6}', index):
            freq = 'monthly'
            index_template = index[:-7]
        if index_template not in config_indices:
            config_indices.append(index_template)
            config_dobackup.append('1')
            config_rotation.append(freq)
            config_pruning.append(pruning)
    setConfig('backup_indices', ','.join(config_indices), options.configfile)
    setConfig('backup_dobackup', ','.join(config_dobackup), options.configfile)
    setConfig('backup_rotation', ','.join(config_rotation), options.configfile)
    setConfig('backup_pruning', ','.join(config_pruning), options.configfile)
Exemplo n.º 5
0
def main():
    if options.output=='syslog':
        logger.addHandler(SysLogHandler(address=(options.sysloghostname,options.syslogport)))
    else:
        sh=logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    logger.debug(options)
    try:
        es=pyes.ES((list('{0}'.format(s) for s in options.esservers)))
        #capture the time we start running so next time we catch any files created while we run.
        lastrun=str(options.lastrun.isoformat())
        today=datetime.utcnow().isoformat()+'+00:00'
        url = options.mighost+'/api/v1/search?type=command&status=done&threatfamily=compliance&report=complianceitems&limit=1000000&before='+today+'&after='+lastrun
        url = url.replace('+00:00', 'Z')
        r = requests.get(url,
            cert=(options.sslclientcert, options.sslclientkey),
            verify=options.sslcacert)
        if r.status_code == 200:
            migjson=r.json()
            logger.debug(url)
            cicnt=0
            for items in migjson['collection']['items']:
                for dataitem in items['data']:
                    if dataitem['name'] == 'compliance item':
                        cicnt += 1
                        complianceitem = dataitem['value']
                        # historical data - index the new logs
                        res=es.index(index='complianceitems',doc_type='history',doc=complianceitem)
                        # last_known_state data - update the logs
                        # _id = md5('complianceitems'+check.ref+check.test.value+target)
                        docid=hashlib.md5('complianceitems'+complianceitem['check']['ref']+complianceitem['check']['test']['value']+complianceitem['target']).hexdigest()
                        res=es.index(index='complianceitems',id=docid,doc_type='last_known_state',doc=complianceitem)
            if cicnt == 0:
                logger.debug("No compliance item available, terminating")
            setConfig('lastrun',today,options.configfile)
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r"%e)
Exemplo n.º 6
0
def makeAttackers():
    '''
    send events that will be correlated into attackers using pre-defined IPs
    '''
    try:
        #time for us to run?
        timetoRun = toUTC(options.lastattacker) + timedelta(
            minutes=options.attackersminutesinterval)
        if timetoRun > toUTC(datetime.now()):
            #print(timetoRun)
            return

        #print(timetoRun, options.lastalert)
        eventfiles = glob.glob(options.alertsglob)
        #pick a random number of events to send
        for i in range(0, options.alertscount):
            #pick a random type of event to send
            eventfile = random.choice(eventfiles)
            events = json.load(open(eventfile))
            target = random.randint(0, len(events))
            # if there's only one event in the file..use it.
            if len(events) == 1 and target == 1:
                target = 0
            for event in events[target:target + 1]:
                event['timestamp'] = pytz.timezone('UTC').localize(
                    datetime.utcnow()).isoformat()
                #remove stored times
                if 'utctimestamp' in event.keys():
                    del event['utctimestamp']
                if 'receivedtimestamp' in event.keys():
                    del event['receivedtimestamp']

                #add demo to the tags so it's clear it's not real data.
                if 'tags' not in event.keys():
                    event['tags'] = list()

                event['tags'].append('demodata')
                event['tags'].append('demoalert')

                #replace potential <randomipaddress> with a random ip address
                if 'summary' in event.keys(
                ) and '<randomipaddress>' in event['summary']:
                    randomIP = genAttackerIPv4()
                    event['summary'] = event['summary'].replace(
                        "<randomipaddress>", randomIP)
                    if 'details' not in event.keys():
                        event['details'] = dict()
                    event['details']['sourceipaddress'] = randomIP
                    event['details']['sourceipv4address'] = randomIP

                if 'duplicate' in event.keys():
                    # send this event multiple times to trigger an alert
                    for x in range(0, int(event['duplicate'])):
                        logcache.put(json.dumps(event))
                else:
                    logcache.put(json.dumps(event))
            lastattacker = toUTC(datetime.now()).isoformat()
            setConfig('lastattacker', lastattacker, options.configfile)
            if not logcache.empty():
                time.sleep(.01)
                try:
                    postingProcess = Process(target=postLogs,
                                             args=(logcache, ),
                                             name="json2MozdefDemoData")
                    postingProcess.start()
                except OSError as e:
                    if e.errno == 35:  # resource temporarily unavailable.
                        print(e)
                        pass
                    else:
                        logger.error('%r' % e)

    except KeyboardInterrupt as e:
        sys.exit(1)
Exemplo n.º 7
0
def main():
    if options.output == 'syslog':
        logger.addHandler(
            SysLogHandler(address=(options.sysloghostname,
                                   options.syslogport)))
    else:
        sh = logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    #logger.debug(options)
    try:
        es = pyes.ES((list('{0}'.format(s) for s in options.esservers)))
        s = requests.Session()
        s.headers.update({'Accept': 'application/json'})
        s.headers.update({'Content-type': 'application/json'})
        s.headers.update({'Authorization': 'SSWS {0}'.format(options.apikey)})

        #capture the time we start running so next time we catch any events created while we run.
        lastrun = toUTC(datetime.now()).isoformat()
        #in case we don't archive files..only look at today and yesterday's files.
        yesterday = date.strftime(datetime.utcnow() - timedelta(days=1),
                                  '%Y/%m/%d')
        today = date.strftime(datetime.utcnow(), '%Y/%m/%d')

        r = s.get('https://{0}/api/v1/events?startDate={1}&limit={2}'.format(
            options.oktadomain,
            toUTC(options.lastrun).strftime('%Y-%m-%dT%H:%M:%S.000Z'),
            options.recordlimit))

        if r.status_code == 200:
            oktaevents = json.loads(r.text)
            for event in oktaevents:
                if 'published' in event.keys():
                    if toUTC(event['published']) > options.lastrun:
                        try:
                            mozdefEvent = dict()
                            mozdefEvent['utctimestamp'] = toUTC(
                                event['published']).isoformat()
                            mozdefEvent['category'] = 'okta'
                            mozdefEvent['tags'] = ['okta']
                            if 'action' in event.keys(
                            ) and 'message' in event['action'].keys():
                                mozdefEvent['summary'] = event['action'][
                                    'message']
                            mozdefEvent['details'] = event
                            jbody = json.dumps(mozdefEvent)
                            res = es.index(index='events',
                                           doc_type='okta',
                                           doc=jbody)
                            logger.debug(res)
                        except Exception as e:
                            logger.error(
                                'Error handling log record {0} {1}'.format(
                                    r, e))
                            continue
                else:
                    logger.error(
                        'Okta event does not contain published date: {0}'.
                        format(event))
            setConfig('lastrun', lastrun, options.configfile)
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
Exemplo n.º 8
0
def main():
    if options.output == 'syslog':
        logger.addHandler(
            SysLogHandler(address=(options.sysloghostname,
                                   options.syslogport)))
    else:
        sh = logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')

    try:
        # capture the time we start running so next time we catch any events
        # created while we run.
        lastrun = toUTC(datetime.now()).isoformat()

        # get our credentials
        mozdefClient = json.loads(open(options.jsoncredentialfile).read())
        client_email = mozdefClient['client_email']
        private_key = mozdefClient['private_key']

        # set the oauth scope we will request
        scope = [
            'https://www.googleapis.com/auth/admin.reports.audit.readonly',
            'https://www.googleapis.com/auth/admin.reports.usage.readonly'
        ]

        # authorize our http object
        # we do this as a 'service account' so it's important
        # to specify the correct 'sub' option
        # or you will get access denied even with correct delegations/scope

        credentials = SignedJwtAssertionCredentials(client_email,
                                                    private_key,
                                                    scope=scope,
                                                    sub=options.impersonate)
        http = Http()
        credentials.authorize(http)

        # build a request to the admin sdk
        api = build('admin', 'reports_v1', http=http)
        response = api.activities().list(
            userKey='all',
            applicationName='login',
            startTime=toUTC(
                options.lastrun).strftime('%Y-%m-%dT%H:%M:%S.000Z'),
            maxResults=options.recordlimit).execute()

        # fix up the event craziness to a flatter format
        events = []
        if 'items' in response.keys():
            for i in response['items']:
                # flatten the sub dict/lists to pull out the good parts
                event = dict(category='google')
                event['tags'] = ['google', 'authentication']
                event['severity'] = 'INFO'
                event['summary'] = 'google authentication: '

                details = dict()
                for keyValue in flattenDict(i):
                    # change key/values like:
                    # [email protected]
                    # to actor_email=value

                    key, value = keyValue.split('=')
                    key = key.replace('.', '_').lower()
                    details[key] = value

                # find important keys
                # and adjust their location/name
                if 'ipaddress' in details.keys():
                    # it's the source ip
                    details['sourceipaddress'] = details['ipaddress']
                    del details['ipaddress']

                if 'id_time' in details.keys():
                    event['timestamp'] = details['id_time']
                    event['utctimestamp'] = details['id_time']
                if 'events_name' in details.keys():
                    event['summary'] += details['events_name'] + ' '
                if 'actor_email' in details.keys():
                    event['summary'] += details['actor_email'] + ' '

                event['details'] = details
                events.append(event)

        # post events to mozdef
        logger.debug('posting {0} google events to mozdef'.format(len(events)))
        for e in events:
            requests.post(options.url, data=json.dumps(e))

        # record the time we started as
        # the start time for next time.
        setConfig('lastrun', lastrun, options.configfile)
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
Exemplo n.º 9
0
def main():
    if options.output == 'syslog':
        logger.addHandler(
            SysLogHandler(address=(options.sysloghostname,
                                   options.syslogport)))
    else:
        sh = logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    #logger.debug(options)
    try:
        es = pyes.ES((list('{0}'.format(s) for s in options.esservers)))
        boto.connect_cloudtrail(
            aws_access_key_id=options.aws_access_key_id,
            aws_secret_access_key=options.aws_secret_access_key)
        #capture the time we start running so next time we catch any files created while we run.
        lastrun = toUTC(datetime.now()).isoformat()
        #in case we don't archive files..only look at today and yesterday's files.
        yesterday = date.strftime(datetime.utcnow() - timedelta(days=1),
                                  '%Y/%m/%d')
        today = date.strftime(datetime.utcnow(), '%Y/%m/%d')
        for region in boto.cloudtrail.regions():
            logger.debug('connecting to AWS region {0}'.format(region.name))
            ct = boto.cloudtrail.connect_to_region(
                region.name,
                aws_access_key_id=options.aws_access_key_id,
                aws_secret_access_key=options.aws_secret_access_key)
            trails = ct.describe_trails()['trailList']

            for trail in trails:
                s3 = boto.connect_s3(
                    aws_access_key_id=options.aws_access_key_id,
                    aws_secret_access_key=options.aws_secret_access_key)
                ctbucket = s3.get_bucket(trail['S3BucketName'])
                #ctbucket.get_all_keys()
                filelist = list()
                for bfile in ctbucket.list():

                    if 'CloudTrail' in bfile.key and 'json' in bfile.key:
                        if today in bfile.key or yesterday in bfile.key:
                            filelist.append(bfile.key)
                        else:
                            if options.purge:  #delete old files so we don't try to keep reading them.
                                s3file = ctbucket.get_key(afile)
                                s3file.delete()
                for afile in filelist:
                    s3file = ctbucket.get_key(afile)
                    logger.debug('{0} {1}'.format(afile, s3file.last_modified))

                    if toUTC(s3file.last_modified) > options.lastrun:
                        compressedData = s3file.read()
                        databuf = StringIO(compressedData)
                        f = gzip.GzipFile(fileobj=databuf)
                        jlog = json.loads(f.read())
                        try:
                            for r in jlog['Records']:
                                r['utctimestamp'] = toUTC(
                                    r['eventTime']).isoformat()
                                jbody = json.dumps(r)
                                res = es.index(index='events',
                                               doc_type='cloudtrail',
                                               doc=jbody)
                                #logger.debug(res)
                        except Exception as e:
                            logger.error(
                                'Error handling log record {0} {1}'.format(
                                    r, e))
                            continue
            setConfig('lastrun', lastrun, options.configfile)
    except boto.exception.NoAuthHandlerFound:
        logger.error("No auth handler found, check your credentials")
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
Exemplo n.º 10
0
def makeAttackers():
    '''
    send events that will be correlated into attackers using pre-defined IPs
    '''
    try:
        #time for us to run?
        timetoRun=toUTC(options.lastattacker) + timedelta(minutes=options.attackersminutesinterval)
        if timetoRun > toUTC(datetime.now()):
            #print(timetoRun)
            return
        
        #print(timetoRun, options.lastalert)
        eventfiles = glob.glob(options.alertsglob)
        #pick a random number of events to send
        for i in range(0, options.alertscount):
            #pick a random type of event to send
            eventfile = random.choice(eventfiles)
            events = json.load(open(eventfile))
            target = random.randint(0, len(events))
            # if there's only one event in the file..use it.
            if len(events) == 1 and target == 1:
                target = 0
            for event in events[target:target+1]:
                event['timestamp'] = pytz.timezone('UTC').localize(datetime.utcnow()).isoformat()
                #remove stored times
                if 'utctimestamp' in event.keys():
                    del event['utctimestamp']
                if 'receivedtimestamp' in event.keys():
                    del event['receivedtimestamp']
                
                #add demo to the tags so it's clear it's not real data.
                if 'tags' not in event.keys():
                    event['tags'] = list()
                
                event['tags'].append('demodata')
                event['tags'].append('demoalert')
                
                #replace potential <randomipaddress> with a random ip address
                if 'summary' in event.keys() and '<randomipaddress>' in event['summary']:
                    randomIP = genAttackerIPv4()
                    event['summary'] = event['summary'].replace("<randomipaddress>", randomIP)
                    if 'details' not in event.keys():
                        event['details'] = dict()
                    event['details']['sourceipaddress'] = randomIP
                    event['details']['sourceipv4address'] = randomIP

                if 'duplicate' in event.keys():
                    # send this event multiple times to trigger an alert
                    for x in range(0, int(event['duplicate'])):
                        logcache.put(json.dumps(event))
                else:
                    logcache.put(json.dumps(event))
            lastattacker=toUTC(datetime.now()).isoformat()
            setConfig('lastattacker',lastattacker,options.configfile)
            if not logcache.empty():
                time.sleep(.01)
                try:
                    postingProcess=Process(target=postLogs,args=(logcache,),name="json2MozdefDemoData")
                    postingProcess.start()
                except OSError as e:
                    if e.errno==35: #resource temporarily unavailable.
                        print(e)
                        pass
                    else:
                        logger.error('%r'%e)


    except KeyboardInterrupt as e:
        sys.exit(1)   
Exemplo n.º 11
0
def main():
    if options.output=='syslog':
        logger.addHandler(SysLogHandler(address=(options.sysloghostname,options.syslogport)))
    else:
        sh=logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')

    try:
        # capture the time we start running so next time we catch any events
        # created while we run.
        lastrun=toUTC(datetime.now()).isoformat()

        # get our credentials
        mozdefClient=json.loads(open(options.jsoncredentialfile).read())
        client_email = mozdefClient['client_email']
        private_key=mozdefClient['private_key']

        # set the oauth scope we will request
        scope=[
            'https://www.googleapis.com/auth/admin.reports.audit.readonly',
            'https://www.googleapis.com/auth/admin.reports.usage.readonly'
        ]

        # authorize our http object
        # we do this as a 'service account' so it's important
        # to specify the correct 'sub' option
        # or you will get access denied even with correct delegations/scope

        credentials = SignedJwtAssertionCredentials(client_email,
                                                    private_key,
                                                    scope=scope,
                                                    sub=options.impersonate)
        http = Http()
        credentials.authorize(http)

        # build a request to the admin sdk
        api = build('admin', 'reports_v1', http=http)
        response = api.activities().list(userKey='all',
                                         applicationName='login',
                                         startTime=toUTC(options.lastrun).strftime('%Y-%m-%dT%H:%M:%S.000Z'),
                                         maxResults=options.recordlimit).execute()

        # fix up the event craziness to a flatter format
        events=[]
        if 'items' in response.keys():
            for i in response['items']:
                # flatten the sub dict/lists to pull out the good parts
                event=dict(category='google')
                event['tags']=['google','authentication']
                event['severity']='INFO'
                event['summary']='google authentication: '

                details=dict()
                for keyValue in flattenDict(i):
                    # change key/values like:
                    # [email protected]
                    # to actor_email=value

                    key,value =keyValue.split('=')
                    key=key.replace('.','_').lower()
                    details[key]=value

                # find important keys
                # and adjust their location/name
                if 'ipaddress' in details.keys():
                    # it's the source ip
                    details['sourceipaddress']=details['ipaddress']
                    del details['ipaddress']

                if 'id_time' in details.keys():
                    event['timestamp']=details['id_time']
                    event['utctimestamp']=details['id_time']
                if 'events_name' in details.keys():
                    event['summary']+= details['events_name'] + ' '
                if 'actor_email' in details.keys():
                    event['summary']+= details['actor_email'] + ' '

                event['details']=details
                events.append(event)

        # post events to mozdef
        logger.debug('posting {0} google events to mozdef'.format(len(events)))
        for e in events:
            requests.post(options.url,data=json.dumps(e))

        # record the time we started as
        # the start time for next time.
        setConfig('lastrun',lastrun,options.configfile)
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r"%e)