Exemplo n.º 1
0
def ingest(table_name, options: dict):
    landing_table = f'data.{table_name}'
    connection_type = options['connection_type']

    aws_access_key = options.get('aws_access_key')
    aws_secret_key = options.get('aws_secret_key')

    source_role_arn = options.get('source_role_arn')
    destination_role_name = options.get('destination_role_name')
    external_id = options.get('external_id')
    accounts_connection_name = options.get('accounts_connection_name', '')

    if not accounts_connection_name.startswith('data.'):
        accounts_connection_name = 'data.' + accounts_connection_name

    ingest_of_type = {
        'EC2': ec2_dispatch,
        'SG': sg_dispatch,
        'ELB': elb_dispatch,
        'IAM': iam_dispatch,
    }[connection_type]

    if (source_role_arn and destination_role_name and external_id
            and accounts_connection_name):
        # get accounts list, pass list into ingest ec2
        query = (f"SELECT account_id, account_alias "
                 f"FROM {accounts_connection_name} "
                 f"WHERE created_at = ("
                 f"  SELECT MAX(created_at)"
                 f"  FROM {accounts_connection_name}"
                 f")")
        accounts = db.fetch(query)
        count = ingest_of_type(
            landing_table,
            accounts=accounts,
            source_role_arn=source_role_arn,
            destination_role_name=destination_role_name,
            external_id=external_id,
        )

    elif aws_access_key and aws_secret_key:
        count = ingest_of_type(landing_table,
                               aws_access_key=aws_access_key,
                               aws_secret_key=aws_secret_key)
        log.info(f'Inserted {count} rows.')
        yield count
    else:
        log.error()
Exemplo n.º 2
0
async def process_task(task,
                       add_task) -> AsyncGenerator[Tuple[str, dict], None]:
    account_arn = f'arn:aws:iam::{task.account_id}:role/{AUDIT_READER_ROLE}'
    account_info = {'account_id': task.account_id}

    client_name, method_name = task.method.split('.', 1)

    try:
        session = _SESSION_CACHE[account_arn] = (
            _SESSION_CACHE[account_arn]
            if account_arn in _SESSION_CACHE else await aio_sts_assume_role(
                src_role_arn=AUDIT_ASSUMER_ARN,
                dest_role_arn=account_arn,
                dest_external_id=READER_EID,
            ))
        async with session.client(client_name) as client:
            if hasattr(client, 'describe_regions'):
                response = await client.describe_regions()
                region_names = [
                    region['RegionName'] for region in response['Regions']
                ]
            else:
                region_names = API_METHOD_SPECS[task.method].get(
                    'regions', [None])

        for rn in region_names:
            async with session.client(client_name, region_name=rn) as client:
                async for response in load_task_response(client, task):
                    if type(response) is DBEntry:
                        if rn is not None:
                            response.entity['region'] = rn
                        yield (task.method, response.entity)
                    elif type(response) is CollectTask:
                        add_task(response)
                    else:
                        log.info('log response', response)

    except ClientError as e:
        # record missing auditor role as empty account summary
        log.error(e, 'failed processing task')
        yield (
            task.method,
            updated(
                account_info,
                recorded_at=parse_date(
                    e.response['ResponseMetadata']['HTTPHeaders']['date']),
            ),
        )
Exemplo n.º 3
0
def append_to_body(id, alert):
    if not user:
        return
    issue = jira.issue(id)
    description = get_ticket_description(issue)
    log.info(f"Appending data to ticket {id}")
    description = description + "\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"
    alert['SOURCES'] = ', '.join(alert['SOURCES'])
    alert['EVENT_DATA'] = yaml.dump(alert['EVENT_DATA'], indent=4, default_flow_style=False)
    escaped_locals_strings = {k: escape_jira_strings(v) for k, v in alert.items()}
    sources = escaped_locals_strings['SOURCES']
    escaped_locals_strings['SOURCES'] = f'[{sources}|{link_search_todos(f"SOURCES: {sources}")}]'
    jira_body = {**JIRA_TICKET_BODY_DEFAULTS, **escaped_locals_strings}
    description = description + JIRA_TICKET_BODY_FMT.format(**jira_body)

    issue.update(description=description)
Exemplo n.º 4
0
def create_jira_ticket(alert,
                       assignee=None,
                       custom_field=None,
                       project=PROJECT,
                       issue_type=ISSUE_TYPE):
    if not user:
        return

    try:
        alert['EVENT_DATA'] = yaml.dump(alert['EVENT_DATA'],
                                        indent=4,
                                        default_flow_style=False)
    except Exception as e:
        log.error("Error while creating ticket", e)

    body = jira_ticket_body(alert, project)

    log.info(
        f'Creating new JIRA ticket for "{alert["TITLE"]}" in project {project}'
    )

    issue_params = {
        'project': project,
        'issuetype': {
            'name': issue_type
        },
        'summary': alert['TITLE'],
        'description': body,
    }

    env_fields = os.environ.get('SA_JIRA_CUSTOM_FIELDS')
    if env_fields or custom_field:
        custom_fields = [f.split('=') for f in env_fields.split(';')]
        for field_id, field_value in custom_fields:
            issue_params[f'customfield_{field_id}'] = {'value': field_value}

        if custom_field:
            issue_params[f'customfield_{custom_field["id"]}'] = {
                'value': custom_field['value']
            }

    new_issue = jira.create_issue(**issue_params)

    if assignee:
        jira.assign_issue(new_issue, assignee)

    return new_issue
Exemplo n.º 5
0
def get_all_v2_elbs(aws_access_key=None,
                    aws_secret_key=None,
                    session=None,
                    account=None):
    """
    This function grabs each v2 elb from each region and returns
    a list of them.
    """
    regions = boto3.client(
        'ec2',
        aws_access_key_id=aws_access_key,
        aws_secret_access_key=aws_secret_key).describe_regions()['Regions']

    log.info(f"Searching {len(regions)} region(s) for modern load balancers.")

    # get list of all load balancers in each region
    elbs = []
    for region in regions:
        if session:
            elb_client = session.client('elbv2',
                                        region_name=region['RegionName'])
        else:
            elb_client = boto3.client(
                'elbv2',
                aws_access_key_id=aws_access_key,
                aws_secret_access_key=aws_secret_key,
                region_name=region['RegionName'],
            )
        for elb in elb_client.describe_load_balancers()['LoadBalancers']:
            # add region
            elb["Region"] = region
            if account:
                elb["Account"] = account

            # add listeners to see which SSL policies are attached to this elb
            elb_arn = elb['LoadBalancerArn']
            listeners = elb_client.describe_listeners(LoadBalancerArn=elb_arn)
            elb["Listeners"] = listeners  # add listeners as field in the ELB
            elb = json.dumps(elb, default=datetime_serializer).encode("utf-8")
            elb = json.loads(elb)
            elbs.append(elb)

    # return list of load balancers
    log.info(
        f"Successfully serialized {len(elbs)} modern elastic load balancers(s)."
    )
    return elbs
def connection_run(connection_table):
    table_name = connection_table['name']
    table_comment = connection_table['comment']

    log.info(f"-- START DC {table_name} --")
    try:
        metadata = {'START_TIME': datetime.utcnow()}
        options = yaml.load(table_comment) or {}

        if 'module' in options:
            module = options['module']

            metadata.update({
                'RUN_ID': RUN_ID,
                'TYPE': module,
                'LANDING_TABLE': table_name
            })

            connector = importlib.import_module(f"connectors.{module}")

            for module_option in connector.CONNECTION_OPTIONS:
                name = module_option['name']
                if module_option.get('secret') and name in options:
                    options[name] = vault.decrypt_if_encrypted(options[name])
                if module_option.get('type') == 'json':
                    options[name] = json.loads(options[name])
                if module_option.get('type') == 'list':
                    if type(options[name]) is str:
                        options[name] = options[name].split(',')
                if module_option.get('type') == 'int':
                    options[name] = int(options[name])

            if callable(getattr(connector, 'ingest', None)):
                db.record_metadata(metadata, table=DC_METADATA_TABLE)
                result = do_ingest(connector, table_name, options)
                if result is not None:
                    metadata['INGEST_COUNT'] = result
                else:
                    metadata['INGESTED'] = result

            db.record_metadata(metadata, table=DC_METADATA_TABLE)

    except Exception as e:
        log.error(f"Error loading logs into {table_name}: ", e)
        db.record_metadata(metadata, table=DC_METADATA_TABLE, e=e)

    log.info(f"-- END DC --")
Exemplo n.º 7
0
def main(connection_table="%_CONNECTION"):
    for table in db.fetch(f"SHOW TABLES LIKE '{connection_table}' IN data"):
        table_name = table['name']
        table_comment = table['comment']

        log.info(f"-- START DC {table_name} --")
        try:
            metadata = {'START_TIME': datetime.utcnow()}
            options = yaml.load(table_comment) or {}

            if 'module' in options:
                module = options['module']

                metadata.update({
                    'RUN_ID': RUN_ID,
                    'TYPE': module,
                    'LANDING_TABLE': table_name,
                    'INGEST_COUNT': 0
                })

                connector = importlib.import_module(f"connectors.{module}")

                for module_option in connector.CONNECTION_OPTIONS:
                    name = module_option['name']
                    if module_option.get('secret') and name in options:
                        options[name] = vault.decrypt_if_encrypted(options[name])
                        if module_option.get('type') == 'json':
                            options[name] = json.loads(options[name])

                if callable(getattr(connector, 'ingest', None)):
                    ingested = connector.ingest(table_name, options)
                    if isinstance(ingested, int):
                        metadata['INGEST_COUNT'] += ingested
                    elif isinstance(ingested, GeneratorType):
                        for n in ingested:
                            metadata['INGEST_COUNT'] += n
                    else:
                        metadata['INGESTED'] = ingested

                db.record_metadata(metadata, table=DC_METADATA_TABLE)

        except Exception as e:
            log.error(f"Error loading logs into {table_name}: ", e)
            db.record_metadata(metadata, table=DC_METADATA_TABLE, e=e)

        log.info(f"-- END DC --")
def get_data(accounts_list):
    start = datetime.datetime.now()
    instance_list_list = Pool(4).map(get_data_worker, accounts_list)
    instance_list = [x for l in instance_list_list if l for x in l]
    if instance_list:
        sf_client = get_snowflake_client()
        instance_groups = groups_of(15000, instance_list)
        snapshot_time = datetime.datetime.utcnow().isoformat()
        for group in instance_groups:
            query = LOAD_INSTANCE_LIST_QUERY.format(
                snapshotclock=snapshot_time,
                format_string=", ".join(["(%s)"] * len(group)))
            sf_client.cursor().execute(query, group)

    end = datetime.datetime.now()
    log.info(
        f"start: {start} end: {end} total: {(end - start).total_seconds()}")
Exemplo n.º 9
0
async def fetch(session, url, fetch_over=0) -> dict:
    if fetch_over:
        await asyncio.sleep(fetch_over * random())
    async with session.get(f'https://snowflake.jamfcloud.com/JSSResource{url}',
                           headers=HEADERS) as response:
        txt = await response.text()
        date_header = response.headers.get('Date')
        if date_header is None:
            log.info(f'GET {url} -> status({response.status}) text({txt})')
            return {}

        result = {'recorded_at': parse_date(date_header)}
        try:
            return updated(result, json.loads(txt))
        except JSONDecodeError:
            log.info(f'GET {url} -> status({response.status}) text({txt})')
            return result
Exemplo n.º 10
0
def create_jira_ticket(alert):
    if not user:
        return

    try:
        alert['EVENT_DATA'] = yaml.dump(alert['EVENT_DATA'], indent=4, default_flow_style=False)
    except Exception as e:
        log.error("Error while creating ticket", e)

    body = jira_ticket_body(alert)

    log.info(f'Creating new JIRA ticket for "{alert["TITLE"]}" in project {PROJECT}')
    new_issue = jira.create_issue(project=PROJECT,
                                  issuetype={'name': 'Story'},
                                  summary=alert['TITLE'],
                                  description=body)
    return new_issue
Exemplo n.º 11
0
def main(target="all", rule_name=None):
    if target == "connector" and rule_name:
        connectors_runner.main(rule_name.upper())

    elif target == "processor":
        alert_processor.main()

    elif target == "dispatcher":
        alert_dispatcher.main()

    elif rule_name:
        if rule_name.endswith("_ALERT_QUERY"):
            alert_queries_runner.main(rule_name.upper())

        if rule_name.endswith("_ALERT_SUPPRESSION"):
            alert_suppressions_runner.main(rule_name.upper())

        if rule_name.endswith("_VIOLATION_QUERY"):
            violation_queries_runner.main(rule_name.upper())

        if rule_name.endswith("_VIOLATION_SUPPRESSION"):
            violation_suppressions_runner.main(rule_name.upper())

    else:
        log.info(f"STARTING RUN WITH ID {RUN_ID}")
        log.info(f"got command {target}")
        if target in ['alert', 'alerts', 'all']:
            alert_queries_runner.main()
            alert_suppressions_runner.main()
            alert_processor.main()
            alert_dispatcher.main()

        if target in ['violation', 'violations', 'all']:
            violation_queries_runner.main()
            violation_suppressions_runner.main()

        if target in ['ingest']:
            ingest_runner.main()
            connectors_runner.main()

        if target in ['connectors']:
            connectors_runner.main()

        if target in ['baseline', 'baselines']:
            baseline_runner.main()
Exemplo n.º 12
0
def run_suppression_query(squelch_name):
    try:
        query = SUPPRESSION_QUERY.format(suppression_name=squelch_name)
        return next(db.fetch(query, fix_errors=False))['number of rows updated']

    except Exception as e:
        log.info(e, f"{squelch_name} query broken, attempting fallback")
        query = OLD_SUPPRESSION_QUERY.format(suppression_name=squelch_name)
        try:
            result = next(db.fetch(query))
        except StopIteration:
            result = []

        if not result:
            # if neither query worked, re-raise original error
            raise

        return result[0]['number of rows updated']
Exemplo n.º 13
0
def ingest_agents(table_name, options):
    last_export_time = next(
        db.fetch(
            f'SELECT MAX(export_at) as time FROM data.{table_name}'))['TIME']
    timestamp = datetime.now(timezone.utc)

    if (last_export_time is None
            or (timestamp - last_export_time).total_seconds() > 86400):
        all_agents = sorted(get_agent_data(),
                            key=lambda a: a.get('last_connect', 0))
        unique_agents = {a['uuid']: a for a in all_agents}.values()
        rows = [{'raw': ua, 'export_at': timestamp} for ua in unique_agents]
        log.debug(f'inserting {len(unique_agents)} unique (by uuid) agents')
        db.insert(f'data.{table_name}', rows)
        return len(rows)
    else:
        log.info('Not time to import Tenable Agents')
        return 0
Exemplo n.º 14
0
def ingest_agents(table_name, options):
    last_export_time = next(
        db.fetch(
            f'SELECT MAX(export_at) as time FROM data.{table_name}'))['TIME']
    timestamp = datetime.now(timezone.utc)

    if (last_export_time is None
            or (timestamp - last_export_time).total_seconds() > 86400):
        agents = {a['uuid']: a for a in get_agent_data()}.values()
        for page in groups_of(10000, agents):
            db.insert(
                table=f'data.{table_name}',
                values=[(agent, timestamp) for agent in page],
                select=db.derive_insert_select(AGENT_LANDING_TABLE),
                columns=db.derive_insert_columns(AGENT_LANDING_TABLE),
            )
    else:
        log.info('Not time to import Tenable Agents')
Exemplo n.º 15
0
def main():
    url = OKTA_URL
    log.info("starting loop")
    timestamp = get_timestamp()
    while 1:
        log.info(f"url is ${url}")
        try:
            r = requests.get(url=url, headers=HEADERS, params=timestamp)
            if str(r) != '<Response [200]>':
                log.fatal('OKTA REQUEST FAILED: ', r.text)
            process_logs(json.loads(r.text))
            if len(r.text) == 2:
                break
            url = r.headers['Link'].split(', ')[1].split(';')[0][1:-1]
        except Exception as e:
            log.error("Error with Okta logs: ", e)

    alooma_pysdk.terminate()
Exemplo n.º 16
0
def get_all_v1_elbs(aws_access_key=None,
                    aws_secret_key=None,
                    session=None,
                    account=None):
    """
    This function grabs each classic elb from each region and returns
    a list of them.
    """
    regions = boto3.client(
        'ec2',
        aws_access_key_id=aws_access_key,
        aws_secret_access_key=aws_secret_key).describe_regions()['Regions']

    log.info(f"Searching {len(regions)} region(s) for classic load balancers.")

    # get list of all load balancers in each region
    elbs = []
    for region in regions:
        if session:
            elb_client = session.client('elb',
                                        region_name=region['RegionName'])
        else:
            elb_client = boto3.client(
                'elb',
                aws_access_key_id=aws_access_key,
                aws_secret_access_key=aws_secret_key,
                region_name=region['RegionName'],
            )
        for elb in elb_client.describe_load_balancers(
        )['LoadBalancerDescriptions']:
            # add region before adding elb to list of elbs
            elb["Region"] = region
            if account:
                elb["Account"] = account
            elb_str = json.dumps(elb, default=datetime_serializer).encode(
                "utf-8")  # for the datetime ser fix
            elb = json.loads(elb_str)
            elbs.append(elb)

    # return list of load balancers
    log.info(
        f"Successfully serialized {len(elbs)} classic elastic load balancers(s)."
    )
    return elbs
def get_data_worker(account):
    try:
        ec2_session = get_aws_client(account)
        instances = []
        try:
            ec2_regions = [
                region['RegionName'] for region in ec2_session.client(
                    'ec2').describe_regions()['Regions']
            ]
        except Exception as e:
            log.error(
                f"ec2_describe_reserved_instances: account: {account[1]} exception: {e}"
            )
            return None
        for region in ec2_regions:
            try:
                client = ec2_session.client('ec2', region_name=region)
                response = client.describe_reserved_instances()
                region_list = [
                    instance for instance in response['ReservedInstances']
                ]
                for i in region_list:
                    i['region'] = region
                instances.extend(region_list)
            except Exception as e:
                log.error(
                    f"ec2_describe_reserved_instances: account: {account[1]} exception: {e}"
                )
                return None
        instance_list = [
            json.dumps({
                **instance, "AccountId": account[0]
            }, default=str) for instance in instances
        ]
        log.info(
            f"ec2_describe_reserved_instances: account: {account[1]} instances: {len(instance_list)}"
        )
        return instance_list
    except Exception as e:
        log.error(
            f"ec2_describe_reserved_instances: account: {account[1]} exception: {e}"
        )
        return None
Exemplo n.º 18
0
def get_all_security_groups(aws_access_key=None,
                            aws_secret_key=None,
                            session=None,
                            account=None):
    """
    This function grabs each security group from each region and returns
    a list of the security groups.

    Each security group is manually given a 'Region' field for clarity
    """

    regions = boto3.client(
        'ec2',
        aws_access_key_id=aws_access_key,
        aws_secret_access_key=aws_secret_key).describe_regions()['Regions']

    log.info(f"Searching for Security Groups in {len(regions)} region(s).")

    # get list of all groups in each region
    security_groups = []
    for region in regions:
        if session:
            ec2 = session.client('ec2', region_name=region['RegionName'])
        else:
            ec2 = boto3.client(
                'ec2',
                aws_access_key_id=aws_access_key,
                aws_secret_access_key=aws_secret_key,
                region_name=region['RegionName'],
            )
        for group in ec2.describe_security_groups()['SecurityGroups']:
            group["Region"] = region
            if account:
                group["Account"] = account
            group_str = json.dumps(group, default=datetime_serializer).encode(
                "utf-8")  # for the boto3 datetime fix
            group = json.loads(group_str)
            security_groups.append(group)

    # return list of groups
    log.info(
        f"Successfully serialized {len(security_groups)} security group(s).")
    return security_groups
Exemplo n.º 19
0
def get_ec2_instances(aws_access_key=None,
                      aws_secret_key=None,
                      session=None,
                      account=None):
    client = boto3.client(
        'ec2',
        aws_access_key_id=aws_access_key,
        aws_secret_access_key=aws_secret_key,
        region_name=REGION,
    )
    regions = client.describe_regions()['Regions']

    log.info(f"Searching for EC2 instances in {len(regions)} region(s).")

    # get list of all instances in each region
    instances = []
    for region in regions:
        if session:
            client = session.client('ec2', region_name=region['RegionName'])
        else:
            client = boto3.client(
                'ec2',
                aws_access_key_id=aws_access_key,
                aws_secret_access_key=aws_secret_key,
                region_name=region['RegionName'],
            )
        paginator = client.get_paginator('describe_instances')
        page_iterator = paginator.paginate()
        results = [
            instance for page in page_iterator
            for instance_array in page['Reservations']
            for instance in instance_array['Instances']
        ]
        for instance in results:
            instance['Region'] = region
            instance['Name'] = get_ec2_instance_name(instance)
            if account:
                instance['Account'] = account
        instances.extend(results)

    # return list of instances
    log.info(f"Successfully serialized {len(instances)} EC2 instance(s).")
    return instances
Exemplo n.º 20
0
def ingest_vulns(table_name):
    last_export_time = next(
        db.fetch(
            f'SELECT MAX(export_at) as time FROM data.{table_name}'))['TIME']
    timestamp = datetime.now(timezone.utc)

    if (last_export_time is None
            or (timestamp - last_export_time).total_seconds() > 86400):
        log.info("Exporting vulnerabilities...")
        vulns = TIO.exports.vulns()

        for page in groups_of(10000, vulns):
            db.insert(
                table=f'data.{table_name}',
                values=[(vuln, timestamp) for vuln in page],
                select=db.derive_insert_select(VULN_LANDING_TABLE),
                columns=db.derive_insert_columns(AGENT_LANDING_TABLE),
            )
    else:
        log.info('Not time to import Tenable vulnerabilities yet')
Exemplo n.º 21
0
async def main(table_name):
    async with aiohttp.ClientSession() as session:
        cids = [
            c['id']
            for c in (await fetch(session, '/computers')).get('computers', [])
        ]

        log.info(f'loading {len(cids)} computer details')
        computers = await asyncio.gather(
            *[fetch_computer(session, cid) for cid in cids])

        log.info(f'inserting {len(computers)} computers into {table_name}')
        rows = [
            updated(c.get('computer'),
                    computer_id=cid,
                    recorded_at=c.get('recorded_at'))
            for cid, c in zip(cids, computers)
        ]
        db.insert(table_name, rows)
        return len(rows)
Exemplo n.º 22
0
def get_all_elbs(aws_access_key=None, aws_secret_key=None, session=None, account=None):
    v1_elbs = get_all_v1_elbs(
        aws_access_key=aws_access_key,
        aws_secret_key=aws_secret_key,
        session=session,
        account=account,
    )
    v2_elbs = get_all_v2_elbs(
        aws_access_key=aws_access_key,
        aws_secret_key=aws_secret_key,
        session=session,
        account=account,
    )
    elbs = v1_elbs + v2_elbs

    if len(elbs) is 0:
        log.info("no elastic load balancers found")
        return []

    return elbs
Exemplo n.º 23
0
def process_endpoint(endpoint):
    log.info(f"starting {endpoint}")
    json_body = {'links': {'next': {'href': endpoint}}}
    page = 1
    while json_body['links']['next'] is not None:
        log.info(f"Getting page {str(page)}")

        r = get(json_body['links']['next']['href'])
        if r.status_code != 200:
            log.error(f"Ingest request for {endpoint} failed", r.text)
            db.record_failed_ingestion(ZENGRC_TABLE, r, TIMESTAMP)
            break

        json_body = r.json()
        data = [[json.dumps(i), TIMESTAMP] for i in json_body['data']]
        try:
            db.insert(ZENGRC_TABLE, data, select='PARSE_JSON(column1), column2')
            page += 1
        except Exception as e:
            log.error(e)
Exemplo n.º 24
0
def handle(
    alert,
    summary=None,
    source=None,
    dedup_key=None,
    severity=None,
    custom_details=None,
    pd_api_token=None,
):
    if 'PD_API_TOKEN' not in os.environ and pd_api_token is None:
        log.error(f"No PD_API_TOKEN in env, skipping handler.")
        return None

    pd_token_ct = pd_api_token or os.environ['PD_API_TOKEN']
    pd_token = vault.decrypt_if_encrypted(pd_token_ct)

    pds = EventsAPISession(pd_token)

    summary = summary or alert['DESCRIPTION']

    source = source or alert['DETECTOR']

    severity = severity or alert['SEVERITY']
    if severity not in severityDictionary:
        log.warn(
            f"Set severity to {severityDictionary[-1]}, "
            f"supplied {severity} is not in allowed values: {severityDictionary}"
        )
        severity = severityDictionary[-1]

    custom_details = custom_details or alert

    try:
        response = pds.trigger(
            summary, source, dedup_key, severity, custom_details=alert
        )
        log.info(f"triggered PagerDuty alert \"{summary}\" at severity {severity}")
        return response
    except PDClientError as e:
        log.error(f"Cannot trigger PagerDuty alert: {e.msg}")
        return None
Exemplo n.º 25
0
def ingest(table_name, options):
    landing_table = f'data.{table_name}'
    aws_access_key = options['aws_access_key']
    aws_secret_key = options['aws_secret_key']
    connection_type = options['connection_type']

    regions = boto3.client(
        'ec2',
        aws_access_key_id=aws_access_key,
        aws_secret_access_key=aws_secret_key
    ).describe_regions()['Regions']

    ingest_of_type = {
        'EC2': ingest_ec2,
        'SG': ingest_sg,
        'ELB': ingest_elb,
    }[connection_type]

    count = ingest_of_type(aws_access_key, aws_secret_key, landing_table, regions)
    log.info(f'Inserted {count} rows.')
    yield count
Exemplo n.º 26
0
def get_timestamp():

    # Once pipelines are more strongly integrated with the installer, this table should be a variable
    timestamp_query = f"""
        SELECT EVENT_TIME from {OKTA_TABLE}
        WHERE EVENT_TIME IS NOT NULL
        order by EVENT_TIME desc
        limit 1
        """
    try:
        _, ts = db.connect_and_fetchall(timestamp_query)
        log.info(ts)
        ts = ts[0][0]
        ts = ts.strftime("%Y-%m-%dT%H:%M:%S.000Z")
        log.info(ts)
        if len(ts) < 1:
            log.error(
                "The okta timestamp is too short or doesn't exist; defaulting to one hour ago"
            )
            ts = datetime.datetime.now() - datetime.timedelta(hours=1)
            ts = ts.strftime("%Y-%m-%dT%H:%M:%S.000Z")

    except Exception as e:
        log.error(
            "Unable to find a timestamp of most recent okta log, defaulting to one hour ago",
            e,
        )
        ts = datetime.datetime.now() - datetime.timedelta(hours=1)
        ts = ts.strftime("%Y-%m-%dT%H:%M:%S.000Z")

    ret = {'since': ts}
    log.info(ret)

    return ret
Exemplo n.º 27
0
def ingest(table_name, options):
    landing_table = f'data.{table_name}'
    timestamp = datetime.utcnow()
    organization_id = options['organization_id']
    api_secret = options['api_secret']
    api_key = options['api_key']

    params: dict = {"limit": PAGE_SIZE, "page": 1}  # API starts at 1

    while 1:
        devices: dict = get_data(organization_id, api_key, api_secret, params)
        params["page"] += 1

        if len(devices) == 0:
            break

        db.insert(
            landing_table,
            values=[(
                timestamp,
                device,
                device.get('deviceId'),
                device.get('osVersionName', None),
                device.get('lastSyncStatus', None),
                device.get('type', None),
                device.get('version', None),
                device.get('lastSync', None),
                device.get('osVersion', None),
                device.get('name', None),
                device.get('status', None),
                device.get('originId', None),
                device.get('appliedBundle', None),
                device.get('hasIpBlocking', None),
            ) for device in devices],
            select=db.derive_insert_select(LANDING_TABLE_COLUMNS),
            columns=db.derive_insert_columns(LANDING_TABLE_COLUMNS),
        )
        log.info(f'Inserted {len(devices)} rows.')
        yield len(devices)
Exemplo n.º 28
0
def create_alerts(rule_name: str) -> Dict[str, Any]:
    metadata: Dict[str, Any] = {
        'QUERY_NAME': rule_name,
        'RUN_ID': RUN_ID,
        'ATTEMPTS': 1,
        'START_TIME': datetime.datetime.utcnow(),
        'ROW_COUNT': {
            'INSERTED': 0,
            'UPDATED': 0
        },
    }

    try:
        db.execute(
            RUN_ALERT_QUERY.format(
                query_name=rule_name,
                from_time_sql=
                f"DATEADD(minute, {ALERT_CUTOFF_MINUTES}, CURRENT_TIMESTAMP())",
                to_time_sql="CURRENT_TIMESTAMP()",
            ),
            fix_errors=False,
        )
        insert_count, update_count = merge_alerts(rule_name, GROUPING_CUTOFF)
        metadata['ROW_COUNT'] = {
            'INSERTED': insert_count,
            'UPDATED': update_count
        }
        db.execute(f"DROP TABLE results.RUN_{RUN_ID}_{rule_name}")

    except Exception as e:
        db.record_metadata(metadata, table=QUERY_METADATA_TABLE, e=e)
        return metadata

    db.record_metadata(metadata, table=QUERY_METADATA_TABLE)

    log.info(f"{rule_name} done.")

    return metadata
Exemplo n.º 29
0
def ingest(table_name, options):
    landing_table = f'data.{table_name}'
    api_key = options['api_key']
    subdomain = options['subdomain']

    url = f'https://{subdomain}.okta.com/api/v1/logs'
    headers = {
        'Accept': 'application/json',
        'Content-Type': 'application/json',
        'Authorization': f'SSWS {api_key}'
    }

    ts = db.fetch_latest(landing_table, 'event_time')
    if ts is None:
        log.error("Unable to find a timestamp of most recent Okta log, "
                  "defaulting to one hour ago")
        ts = datetime.datetime.now() - datetime.timedelta(hours=1)

    params = {'since': ts.strftime("%Y-%m-%dT%H:%M:%S.000Z")}

    while 1:
        response = requests.get(url=url, headers=headers, params=params)
        if response.status_code != 200:
            log.error('OKTA REQUEST FAILED: ', response.text)
            return

        result = response.json()
        if result == []:
            break

        db.insert(landing_table,
                  values=[(row, row['published']) for row in result],
                  select='PARSE_JSON(column1), column2')

        log.info(f'Inserted {len(result)} rows.')
        yield len(result)

        url = response.headers['Link'].split(', ')[1].split(';')[0][1:-1]
Exemplo n.º 30
0
def get_images(aws_access_key=None,
               aws_secret_key=None,
               session=None,
               account=None):
    client = boto3.client(
        'ec2',
        aws_access_key_id=aws_access_key,
        aws_secret_access_key=aws_secret_key,
        region_name=REGION,
    )
    regions = client.describe_regions()['Regions']

    log.info(f"Searching for images in {len(regions)} region(s).")

    # get list of all images in each region
    images = []
    for region in regions:
        if session:
            client = session.client('ec2', region_name=region['RegionName'])
        else:
            client = boto3.client(
                'ec2',
                aws_access_key_id=aws_access_key,
                aws_secret_access_key=aws_secret_key,
                region_name=region['RegionName'],
            )

        results = client.describe_images(Owners=['self'])['Images']
        for image in results:
            image['Region'] = region
            if account:
                image['Account'] = account

        images.extend(results)

    # return list of images
    log.info(f"Successfully serialized {len(images)} images(s).")
    return images