예제 #1
0
def create_metadata_table(table, cols, addition):
    db.create_table(table, cols, ifnotexists=True)
    db.execute(f"GRANT INSERT, SELECT ON {table} TO ROLE {SA_ROLE}")
    table_names = (row['name'] for row in db.fetch(f'desc table {table}'))
    if any(name == addition[0].upper() for name in table_names):
        return
    db.execute(f'ALTER TABLE {table} ADD COLUMN {addition[0]} {addition[1]}')
def connect(connection_name, options):
    base_name = f"azure_subscription_{connection_name}"
    tenant_id = options['tenant_id']
    client_id = options['client_id']
    client_secret = options['client_secret']

    comment = f'''
---
module: azure_subscription
client_id: {client_id}
tenant_id: {tenant_id}
client_secret: {client_secret}
'''

    db.create_table(name=f'data.{base_name}_connection',
                    cols=LANDING_TABLE_COLUMNS,
                    comment=comment)

    db.execute(
        f'GRANT INSERT, SELECT ON data.{base_name}_connection TO ROLE {SA_ROLE}'
    )

    return {
        'newStage': 'finalized',
        'newMessage': 'Landing table created for collectors to populate.'
    }
예제 #3
0
def record_ticket_id(ticket_id, alert_id):
    query = f"UPDATE results.alerts SET ticket='{ticket_id}' WHERE alert:ALERT_ID='{alert_id}'"
    print('Updating alert table:', query)
    try:
        db.execute(query)
    except Exception as e:
        log.error(e, f"Failed to update alert {alert_id} with ticket id {ticket_id}")
def create_alerts(rule_name: str) -> Dict[str, Any]:
    metadata: Dict[str, Any] = {
        'QUERY_NAME': rule_name,
        'RUN_ID': RUN_ID,
        'ATTEMPTS': 1,
        'START_TIME': datetime.datetime.utcnow(),
        'ROW_COUNT': {
            'INSERTED': 0,
            'UPDATED': 0,
        }
    }

    try:
        db.execute(RUN_ALERT_QUERY.format(
            query_name=rule_name,
            from_time_sql="DATEADD(minute, -90, CURRENT_TIMESTAMP())",
            to_time_sql="CURRENT_TIMESTAMP()",
        ), fix_errors=False)
        insert_count, update_count = merge_alerts(rule_name, GROUPING_CUTOFF)
        metadata['ROW_COUNT'] = {
            'INSERTED': insert_count,
            'UPDATED': update_count,
        }
        db.execute(f"DROP TABLE results.RUN_{RUN_ID}_{rule_name}")

    except Exception as e:
        db.record_metadata(metadata, table=QUERY_METADATA_TABLE, e=e)
        return metadata

    db.record_metadata(metadata, table=QUERY_METADATA_TABLE)

    log.info(f"{rule_name} done.")

    return metadata
예제 #5
0
파일: jira.py 프로젝트: nsrirama/SnowAlert
def bail_out(alert_id):
    query = f"UPDATE results.alerts SET handled='no handler' WHERE alert:ALERT_ID='{alert_id}'"
    print('Updating alert table:', query)
    try:
        db.execute(query)
    except Exception as e:
        log.error(e, f"Failed to update alert {alert_id} with handler status")
예제 #6
0
def connect(connection_name, options):
    base_name = f'AZURE_VM_{connection_name}'
    client_id = options['client_id']
    client_secret = options['client_secret']
    tenant_id = options['tenant_id']
    subscription_connection_name = options['subscription_connection_name']
    comment = (
        f'---',
        f'module: azure_vm',
        f'client_id: {client_id}',
        f'client_secret: {client_secret}',
        f'tenant_id: {tenant_id}',
        f'subscription_connection_name: {subscription_connection_name}',
    )
    db.create_table(name=f'data.{base_name}_CONNECTION',
                    cols=LANDING_TABLE_COLUMNS,
                    comment=comment)
    db.execute(
        f'GRANT INSERT, SELECT ON data.{base_name}_CONNECTION TO ROLE {SA_ROLE}'
    )

    cols = [
        ('SNAPSHOT_AT', 'TIMESTAMP_LTZ'),
        ('RUN_ID', 'STRING(100)'),
        ('SUBSCRIPTION_ID', 'STRING(500)'),
        ('VM_INSTANCE_COUNT', 'NUMBER'),
    ]
    create_metadata_table(AZURE_COLLECTION_METADATA, cols, cols[3])

    return {
        'newStage':
        'finalized',
        'newMessage':
        'Landing and metadata tables created for collectors to populate.',
    }
예제 #7
0
def connect(connection_name, options):
    table_prefix = f'aws_collect' + ('' if connection_name in ('', 'default')
                                     else connection_name)
    table_name = f'{table_prefix}_organizations_list_accounts_connection'
    landing_table = f'data.{table_name}'

    audit_assumer_arn = options['audit_assumer_arn']
    org_account_ids = options['org_account_ids']
    audit_reader_role = options['audit_reader_role']
    reader_eid = options.get('reader_eid', '')

    comment = yaml_dump(
        module='aws_collect',
        audit_assumer_arn=audit_assumer_arn,
        org_account_ids=org_account_ids,
        audit_reader_role=audit_reader_role,
        reader_eid=reader_eid,
        collect_apis='all',
    )

    db.create_table(name=landing_table,
                    cols=LANDING_TABLE_COLUMNS,
                    comment=comment)
    db.execute(f'GRANT INSERT, SELECT ON {landing_table} TO ROLE {SA_ROLE}')

    for table_postfix, cols in SUPPLEMENTARY_TABLES.items():
        supp_table = f'data.{table_prefix}_{table_postfix}'
        db.create_table(name=supp_table, cols=cols)
        db.execute(f'GRANT INSERT, SELECT ON {supp_table} TO ROLE {SA_ROLE}')

    return {
        'newStage': 'finalized',
        'newMessage': "AWS Collect connector tables created.",
    }
예제 #8
0
def record_status(results, alert_id):
    try:
        db.execute(
            f"UPDATE results.alerts "
            f"SET handled=PARSE_JSON(%s) "
            f"WHERE alert:ALERT_ID='{alert_id}'",
            params=[json_dumps(results)])
    except Exception as e:
        log.error(e,
                  f"Failed to update alert {alert_id} with status {results}")
예제 #9
0
def create_user_table(connection_name, options):
    table_name = f'data.TENABLE_SETTINGS_{connection_name}_USER_CONNECTION'
    token = options['token']
    secret = options['secret']
    comment = f"""
---
module: tenable_settings
token: {token}
secret: {secret}
"""

    db.create_table(table_name, cols=USER_LANDING_TABLE, comment=comment)
    db.execute(f'GRANT INSERT, SELECT ON {table_name} TO ROLE {SA_ROLE}')
예제 #10
0
def connect(connection_name, options):
    connection_type = options['connection_type']
    base_name = f'gsuite_logs_{connection_name}_{connection_type}'
    landing_table = f'data.{base_name}_connection'
    comment = yaml_dump(module='gsuite_logs', **options)
    db.create_table(
        name=landing_table, cols=LANDING_TABLES_COLUMNS['login'], comment=comment
    )
    db.execute(f'GRANT INSERT, SELECT ON data.{base_name}_connection TO ROLE {SA_ROLE}')

    return {
        'newStage': 'finalized',
        'newMessage': 'Landing table created for collectors to populate.',
    }
예제 #11
0
def connect(connection_name, options):
    table_name = f'crowdstrike_devices_{connection_name}_connection'
    landing_table = f'data.{table_name}'

    comment = yaml_dump(module='crowdstrike_devices', **options)

    db.create_table(name=landing_table,
                    cols=LANDING_TABLE_COLUMNS,
                    comment=comment)
    db.execute(f'GRANT INSERT, SELECT ON {landing_table} TO ROLE {SA_ROLE}')
    return {
        'newStage': 'finalized',
        'newMessage': "Crowdstrike Devices ingestion table created!",
    }
예제 #12
0
def connect(connection_name, options):
    base_name = f"azure_subscription_{connection_name}"

    comment = yaml_dump(module='azure_subscription', **options)

    db.create_table(
        name=f'data.{base_name}_connection', cols=LANDING_TABLE_COLUMNS, comment=comment
    )

    db.execute(f'GRANT INSERT, SELECT ON data.{base_name}_connection TO ROLE {SA_ROLE}')

    return {
        'newStage': 'finalized',
        'newMessage': 'Landing table created for collectors to populate.',
    }
예제 #13
0
def connect(connection_name, options):
    landing_table = f'data.assetpanda_{connection_name}_connection '

    comment = yaml_dump(module='assetpanda', **options)

    db.create_table(name=landing_table,
                    cols=LANDING_TABLE_COLUMNS,
                    comment=comment)

    db.execute(f'GRANT INSERT, SELECT ON {landing_table} TO ROLE {SA_ROLE}')

    return {
        'newStage': 'finalized',
        'newMessage': "AssetPanda ingestion tables created!",
    }
예제 #14
0
def connect(connection_name, options):
    ctype = options['connection_type']
    ctable = f'data.tenable_io_{connection_name}_{ctype}_connection'
    cols = {
        'user': USER_LANDING_TABLE,
        'agent': AGENT_LANDING_TABLE,
        'vuln': VULN_LANDING_TABLE,
    }[ctype]
    comment = yaml_dump(module='tenable_io', **options)

    db.create_table(ctable, cols=cols, comment=comment)
    db.execute(f'GRANT INSERT, SELECT ON {ctable} TO ROLE {SA_ROLE}')

    return {
        'newStage': 'finalized',
        'newMessage': 'Landing table created for collectors to populate.',
    }
예제 #15
0
def create_asset_table(connection_name, asset_type, columns, options):
    # create the tables, based on the config type (i.e. SG, EC2, ELB)
    table_name = f'aws_asset_inv_{asset_type}_{connection_name}_connection'
    landing_table = f'data.{table_name}'
    aws_access_key = options['aws_access_key']
    aws_secret_key = options['aws_secret_key']

    comment = f'''
---
module: aws_inventory
aws_access_key: {aws_access_key}
aws_secret_key: {aws_secret_key}
'''

    db.create_table(name=landing_table, cols=columns, comment=comment)
    db.execute(f'GRANT INSERT, SELECT ON {landing_table} TO ROLE {SA_ROLE}')

    return f"AWS {asset_type} asset ingestion table created!"
예제 #16
0
def connect(connection_name, options):
    table_name = f'salesforce_events_{connection_name}'
    landing_log_table = f'data.{table_name}_connection'

    comment = yaml_dump(module='salesforce_event_log', **options)

    db.create_table(
        name=landing_log_table,
        cols=LANDING_TABLE_COLUMNS,
        comment=comment,
        stage_file_format='TYPE = JSON STRIP_OUTER_ARRAY = TRUE',
        stage_copy_options='PURGE = TRUE',
    )
    db.execute(
        f'GRANT INSERT, SELECT ON {landing_log_table} TO ROLE {SA_ROLE}')

    return {
        'newStage': 'finalized',
        'newMessage': "Salesforce Event Log ingestion table created!",
    }
예제 #17
0
def ingest(table_name, options):
    base_name = re.sub(r'_CONNECTION$', '', table_name)
    storage_account = options['storage_account']
    sas_token = vault.decrypt_if_encrypted(options['sas_token'])
    suffix = options['suffix']
    container_name = options['container_name']
    snowflake_account = options['snowflake_account']
    sa_user = options['sa_user']
    database = options['database']

    block_blob_service = BlockBlobService(account_name=storage_account,
                                          sas_token=sas_token,
                                          endpoint_suffix=suffix)

    db.execute(f"select SYSTEM$PIPE_FORCE_RESUME('DATA.{base_name}_PIPE');")

    last_loaded = db.fetch_latest(f'data.{table_name}', 'loaded_on')

    log.info(f"Last loaded time is {last_loaded}")

    blobs = block_blob_service.list_blobs(container_name)
    new_files = [
        StagedFile(b.name, None) for b in blobs
        if (last_loaded is None or b.properties.creation_time > last_loaded)
    ]

    log.info(f"Found {len(new_files)} files to ingest")

    # Proxy object that abstracts the Snowpipe REST API
    ingest_manager = SimpleIngestManager(
        account=snowflake_account,
        host=f'{snowflake_account}.snowflakecomputing.com',
        user=sa_user,
        pipe=f'{database}.data.{base_name}_PIPE',
        private_key=load_pkb_rsa(PRIVATE_KEY, PRIVATE_KEY_PASSWORD))

    if len(new_files) > 0:
        for file_group in groups_of(4999, new_files):
            response = ingest_manager.ingest_files(file_group)
            log.info(response)
            yield len(file_group)
def merge_alerts(query_name, from_time_sql):
    log.info(f"{query_name} processing...")

    sql = MERGE_ALERTS.format(
        query_name=query_name,
        from_time_sql=from_time_sql,
        new_alerts_table=f"RUN_{RUN_ID}_{query_name}",
    )
    result = db.execute(sql, fix_errors=False).fetchall()
    created_count, updated_count = result[0]
    log.info(f"{query_name} created {created_count}, updated {updated_count} rows.")
    return created_count, updated_count
예제 #19
0
def connect(connection_name, options):
    landing_table_client = f'data.meraki_devices_{connection_name}_client_connection'
    landing_table_device = f'data.meraki_devices_{connection_name}_device_connection'

    comment = yaml_dump(module='meraki_devices', **options)

    db.create_table(name=landing_table_client,
                    cols=LANDING_TABLE_COLUMNS_CLIENT,
                    comment=comment)
    db.execute(
        f'GRANT INSERT, SELECT ON {landing_table_client} TO ROLE {SA_ROLE}')

    db.create_table(name=landing_table_device,
                    cols=LANDING_TABLE_COLUMNS_DEVICE,
                    comment=comment)
    db.execute(
        f'GRANT INSERT, SELECT ON {landing_table_device} TO ROLE {SA_ROLE}')
    return {
        'newStage': 'finalized',
        'newMessage': "Meraki ingestion tables created!"
    }
예제 #20
0
def connect(connection_name, options):
    table_name = f'okta_{connection_name}_connection'
    landing_table = f'data.{table_name}'
    api_key = options['api_key']
    subdomain = options['subdomain']

    comment = f'''
---
module: okta
api_key: {api_key}
subdomain: {subdomain}
'''

    db.create_table(name=landing_table,
                    cols=LANDING_TABLE_COLUMNS,
                    comment=comment)
    db.execute(f'GRANT INSERT, SELECT ON {landing_table} TO ROLE {SA_ROLE}')
    return {
        'newStage': 'finalized',
        'newMessage': "Okta ingestion table created!",
    }
예제 #21
0
def connect(connection_name, options):
    table_name = f'okta_{connection_name}'
    landing_log_table = f'data.{table_name}_connection'
    landing_user_table = f'data.{table_name}_users_connection'
    landing_group_table = f'data.{table_name}_groups_connection'

    comment = yaml_dump(module='okta', **options)

    db.create_table(name=landing_log_table,
                    cols=LANDING_LOG_TABLE_COLUMNS,
                    comment=comment)
    db.execute(
        f'GRANT INSERT, SELECT ON {landing_log_table} TO ROLE {SA_ROLE}')

    db.create_table(name=landing_user_table,
                    cols=LANDING_USER_TABLE_COLUMNS,
                    comment=comment)
    db.execute(
        f'GRANT INSERT, SELECT ON {landing_user_table} TO ROLE {SA_ROLE}')

    db.create_table(name=landing_group_table,
                    cols=LANDING_GROUP_TABLE_COLUMNS,
                    comment=comment)
    db.execute(
        f'GRANT INSERT, SELECT ON {landing_group_table} TO ROLE {SA_ROLE}')

    return {
        'newStage': 'finalized',
        'newMessage': "Okta ingestion table, user table, group table created!",
    }
예제 #22
0
def create_asset_table(connection_name, asset_type, columns, options):
    # create the tables, based on the config type (i.e. SG, EC2, ELB)
    table_name = f'aws_asset_inv_{asset_type}_{connection_name}_connection'
    landing_table = f'data.{table_name}'

    comment = yaml_dump(module='aws_inventory', **options)

    db.create_table(name=landing_table, cols=columns, comment=comment)
    metadata_cols = [
        ('snapshot_at', 'TIMESTAMP_LTZ'),
        ('run_id', 'VARCHAR(100)'),
        ('account_id', 'VARCHAR(100)'),
        ('account_alias', 'VARCHAR(100)'),
        (f'{asset_type}_count', 'NUMBER'),
        ('error', 'VARCHAR'),
    ]
    create_metadata_table(table=AWS_ACCOUNTS_METADATA,
                          cols=metadata_cols,
                          addition=metadata_cols[4])
    db.execute(f'GRANT INSERT, SELECT ON {landing_table} TO ROLE {SA_ROLE}')

    return f"AWS {asset_type} asset ingestion table created!"
예제 #23
0
def connect(connection_name, options):
    table_name = f'aws_accounts_{connection_name}_connection'
    landing_table = f'data.{table_name}'
    source_role_arn = options['source_role_arn']
    destination_role_arn = options['destination_role_arn']
    destination_role_external_id = options['destination_role_external_id']

    comment = yaml_dump(
        module='aws_accounts',
        source_role_arn=source_role_arn,
        destination_role_arn=destination_role_arn,
        destination_role_external_id=destination_role_external_id,
    )

    db.create_table(name=landing_table,
                    cols=LANDING_TABLE_COLUMNS,
                    comment=comment)
    db.execute(f'GRANT INSERT, SELECT ON {landing_table} TO ROLE {SA_ROLE}')
    return {
        'newStage': 'finalized',
        'newMessage': "AWS Account ingestion table created!",
    }
예제 #24
0
def update_recent_alerts(ctx, alert_map):
    update_array = []
    update_array_length = 0
    for key in alert_map:
        if alert_map[key][2]:
            update_array.extend(
                [alert_map[key][0]['ALERT_ID'], alert_map[key][1]])
            update_array_length = update_array_length + 1
    if update_array_length:
        format_string = ", ".join(["(%s, %s)"] * update_array_length)
        db.execute(ctx,
                   f"DROP TABLE IF EXISTS {RESULTS_SCHEMA}.counter_table;")
        db.execute(
            ctx,
            f"CREATE TEMPORARY TABLE {RESULTS_SCHEMA}.counter_table(ALERT_ID string, COUNTER number);"
        )
        ctx.cursor().execute(
            f"INSERT INTO {RESULTS_SCHEMA}.counter_table (ALERT_ID, COUNTER) VALUES {format_string};",
            update_array)
        ctx.cursor().execute(
            f"MERGE INTO {ALERTS_TABLE} s"
            f" USING {RESULTS_SCHEMA}.counter_table t"
            f" ON s.alert:ALERT_ID = t.ALERT_ID WHEN MATCHED THEN UPDATE"
            f" SET s.COUNTER = t.COUNTER;")
예제 #25
0
def delete_results():
    yield
    db.execute(f"DELETE FROM results.alerts")
    db.execute(f"DELETE FROM results.violations")
    db.execute(f"DELETE FROM results.run_metadata")
    db.execute(f"DELETE FROM results.query_metadata")
예제 #26
0
def connect(connection_name, options):
    connection_type = options['connection_type']

    base_name = f"azure_log_{connection_name}_{connection_type}"
    account_name = options['account_name']
    container_name = options['container_name']
    suffix = options['suffix']
    cloud_type = options['cloud_type']
    sas_token = options['sas_token']

    comment = yaml_dump(module='azure_log')

    db.create_stage(
        name=f'data.{base_name}_stage',
        url=f"azure://{account_name}.blob.{suffix}/{container_name}",
        cloud='azure',
        prefix='',
        credentials=sas_token,
        file_format=FILE_FORMAT,
    )

    db.execute(
        f'GRANT USAGE ON STAGE data.{base_name}_stage TO ROLE {SA_ROLE}')

    db.create_table(
        name=f'data.{base_name}_connection',
        cols=LANDING_TABLES_COLUMNS[connection_type],
        comment=comment,
        ifnotexists=True,
    )

    db.execute(
        f'GRANT INSERT, SELECT ON data.{base_name}_connection TO ROLE {SA_ROLE}'
    )

    external_table_columns = [(
        'timestamp_part',
        'TIMESTAMP_LTZ',
        GET_TIMESTAMP_FROM_FILENAME_SQL[connection_type],
    )]

    db.create_external_table(
        name=f'data.{base_name}_external',
        location=f'@data.{base_name}_stage',
        cols=external_table_columns,
        partition='timestamp_part',
        file_format=db.TypeOptions(type='JSON'),
    )

    db.execute(f'GRANT SELECT ON data.{base_name}_external TO ROLE {SA_ROLE}')

    stored_proc_def = f"""
var sql_command = "ALTER EXTERNAL TABLE data.{base_name}_external REFRESH";
try {{
    snowflake.execute ({{sqlText: sql_command}});
    return "Succeeded.";
}} catch (err)  {{
    return "Failed: " + err;
}}
"""

    db.create_stored_procedure(
        name=f'data.{base_name}_procedure',
        args=[],
        return_type='string',
        executor='OWNER',
        definition=stored_proc_def,
    )

    refresh_task_sql = f'CALL data.{base_name}_procedure()'
    db.create_task(
        name=f'data.{base_name}_refresh_task',
        warehouse=WAREHOUSE,
        schedule='5 minutes',
        sql=refresh_task_sql,
    )

    select_statement_sql = {
        'reg':
        (f"SELECT value "
         f"FROM data.{base_name}_external "
         f"WHERE timestamp_part >= DATEADD(HOUR, -2, CURRENT_TIMESTAMP())"),
        'gov':
        (f"SELECT value FROM ("
         f"  SELECT value AS a "
         f"  FROM data.{base_name}_external"
         f"  WHERE timestamp_part >= DATEADD(HOUR, -2, CURRENT_TIMESTAMP())"
         f"), LATERAL FLATTEN (INPUT => a:records)"),
    }

    insert_task_sql = {
        'operation':
        f"""
INSERT (
    RAW, HASH_RAW, CALLER_IP_ADDRESS, CATEGORY, CORRELATION_ID, DURATION_MS,
    IDENTITY, IDENTITY_AUTHORIZATION, IDENTITY_CLAIMS, LEVEL, LOCATION,
    OPERATION_NAME, PROPERTIES, PROPERTIES_ANCESTORS, PROPERTIES_IS_COMPLIANCE_CHECK,
    PROPERTIES_POLICIES, PROPERTIES_RESOURCE_LOCATION, RESOURCE_ID, RESULT_SIGNATURE,
    RESULT_TYPE, EVENT_TIME, LOADED_ON
) VALUES (
    VALUE, HASH(VALUE), VALUE:callerIpAddress::STRING, VALUE:category::STRING, VALUE:correlationId::STRING,
    VALUE:durationMs::NUMBER, VALUE:identity::VARIANT, VALUE:identity.authorization::VARIANT,
    VALUE:identity.claims::VARIANT, VALUE:level::STRING, VALUE:location::STRING, VALUE:operationName::STRING,
    VALUE:properties::VARIANT, VALUE:properties.ancestors::STRING, VALUE:properties.isComplianceCheck::STRING,
    PARSE_JSON(VALUE:properties.policies),VALUE:properties.resourceLocation::STRING, VALUE:resourceId::STRING,
    VALUE:resultSignature::STRING,VALUE:resultType::STRING, value:time::TIMESTAMP_LTZ, CURRENT_TIMESTAMP()
)
""",
        'audit':
        f"""
INSERT (
    RAW, HASH_RAW, CALLER_IP_ADDRESS, CATEGORY, CORRELATION_ID,
    DURATION_MS, LEVEL, OPERATION_NAME, OPERATION_VERSION, PROPERTIES,
    PROPERTIES_ACTIVITY_DATE_TIME, PROPERTIES_ACTIVITY_DISPLAY_NAME,
    PROPERTIES_ADDITIONAL_DETAILS, PROPERTIES_CATEGORY, PROPERTIES_ID,
    PROPERTIES_INITIATED_BY, PROPERTIES_LOGGED_BY_SERVICE, PROPERTIES_OPERATION_TYPE,
    PROPERTIES_RESULT, PROPERTIES_RESULT_REASON, PROPERTIES_TARGET_RESOURCES,
    RESOURCE_ID, RESULT_SIGNATURE, TENANT_ID, EVENT_TIME, LOADED_ON
) VALUES (
    VALUE, HASH(VALUE), VALUE:callerIpAddress::STRING, VALUE:category::STRING, VALUE:correlationId::STRING,
    VALUE:durationMs::NUMBER, VALUE:level::STRING, VALUE:operationName::STRING, VALUE:operationVersion::STRING,
    VALUE:properties::VARIANT, VALUE:properties.activityDateTime::TIMESTAMP_LTZ,
    VALUE:properties.activityDisplayName::STRING, VALUE:properties.additionalDetails::VARIANT,
    VALUE:properties.category::STRING, VALUE:properties.id::STRING, VALUE:properties.initiatedBy::VARIANT,
    VALUE:properties.loggedByService::STRING, VALUE:properties.operationType::STRING, VALUE:properties.result::STRING,
    VALUE:resultReason::STRING, VALUE:properties.targetResources::VARIANT, VALUE:resourceId::STRING,
    VALUE:resultSignature::STRING, VALUE:tenantId::STRING, VALUE:time::TIMESTAMP_LTZ, CURRENT_TIMESTAMP()
)
""",
        'signin':
        f"""
INSERT (
    RAW, HASH_RAW, LEVEL, CALLER_IP_ADDRESS, CATEGORY, CORRELATION_ID, DURATION_MS,
    IDENTITY, LOCATION, OPERATION_NAME, OPERATION_VERSION, PROPERTIES,
    PROPERTIES_APP_DISPLAY_NAME, PROPERTIES_APP_ID,
    PROPERTIES_APPLIED_CONDITIONAL_ACESS_POLICIES, PROPERTIES_AUTHENTICATION_METHODS_USED,
    PROPERTIES_AUTHENTICATION_PROCESSING_DETAILS, PROPERTIES_CLIENT_APP_USED,
    PROPERTIES_CONDITIONAL_ACCESS_STATUS, PROPERTIES_CREATED_DATE_TIME,
    PROPERTIES_DEVICE_DETAIL, PROPERTIES_ID, PROPERTIES_IP_ADDRESS, PROPERTIES_IS_INTERACTIVE, PROPERTIES_LOCATION,
    PROPERTIES_MFA_DETAIL, PROPERTIES_NETWORK_LOCATION, PROPERTIES_PROCESSING_TIME_IN_MILLISECONDS,
    PROPERTIES_RESOURCE_DISPLAY_NAME, PROPERTIES_RESOURCE_ID, PROPERTIES_RISK_DETAIL,
    PROPERTIES_RISK_EVENT_TYPES, PROPERTIES_RISK_LEVEL_AGGREGATED, PROPERTIES_RISK_LEVEL_DURING_SIGNIN,
    PROPERTIES_RISK_STATE, PROPERTIES_STATUS, PROPERTIES_TOKEN_ISSUER_TYPE, PROPERTIES_USER_DISPLAY_NAME,
    PROPERTIES_USER_ID, PROPERTIES_USER_PRINCIPAL_NAME, RESOURCE_ID, RESULT_DESCRIPTION, RESULT_SIGNATURE,
    RESULT_TYPE, TENANT_ID, EVENT_TIME, LOADED_ON
) VALUES (
    VALUE, HASH(VALUE), VALUE:Level::NUMBER, VALUE:callerIpAddress::STRING, VALUE:category::STRING,
    VALUE:correlationId::STRING, VALUE:durationMs, VALUE:identity::STRING, VALUE:location::STRING,
    VALUE:operationName::STRING, VALUE:operationVersion::STRING, VALUE:properties::VARIANT,
    VALUE:properties.appDisplayName::STRING, VALUE:properties.appId::STRING,
    VALUE:properties.appliedConditionalAccessPolicies::VARIANT, VALUE:properties.authenticationMethodsUsed::VARIANT,
    VALUE:properties.authenticationProcessingDetails::VARIANT, VALUE:properties.clientAppUsed::STRING,
    VALUE:properties.conditionalAccessStatus::STRING, VALUE:properties.createdDateTime::TIMESTAMP_LTZ,
    VALUE:properties.deviceDetail::VARIANT, VALUE:properties.id::STRING, VALUE:properties.ipAddress::STRING,
    VALUE:properties.isInteractive::BOOLEAN, VALUE:properties.location::VARIANT,
    VALUE:properties.mfaDetail::VARIANT, VALUE:properties.networkLocationDetails::VARIANT,
    VALUE:properties.processingTimeInMilliseconds::NUMBER, VALUE:properties.resourceDisplayName::STRING,
    VALUE:properties.resourceId::STRING, VALUE:properties.riskDetail::STRING,
    VALUE:properties.riskEventTypes::VARIANT, VALUE:properties.riskLevelAggregated::STRING,
    VALUE:properties.riskLevelDuringSignIn::STRING, VALUE:properties.riskState::VARIANT,
    VALUE:properties.status::VARIANT, VALUE:properties.tokenIssuerType::STRING,
    VALUE:properties.userDisplayName::STRING, VALUE:properties.userId::STRING,
    VALUE:properties.userPrincipalName::STRING, VALUE:resourceId::STRING, VALUE:resultDescription::STRING,
    VALUE:resultSignature::STRING, VALUE:resultType::STRING, VALUE:tenantId::STRING, VALUE:time::TIMESTAMP_LTZ,
    CURRENT_TIMESTAMP()
)
""",
    }

    ingest_task_sql = f"""
MERGE INTO data.{base_name}_connection a
USING (
  {select_statement_sql[cloud_type]}
) b
ON a.raw = b.value
WHEN NOT MATCHED THEN
{insert_task_sql[connection_type]}
"""

    db.create_task(
        name=f'data.{base_name}_ingest_task',
        warehouse=WAREHOUSE,
        schedule=f'AFTER data.{base_name}_refresh_task',
        sql=ingest_task_sql,
    )

    return {
        'newStage': 'finalized',
        'newMessage': 'Created Stage, Tables, Stored Procedure, and Tasks.',
    }
예제 #27
0
def sample_alert_rule(db_schemas):
    db.execute(TEST_ALERT)
    yield
    db.execute("DROP VIEW rules.simple_alert_query")
예제 #28
0
def finalize(connection_name):
    base_name = f'AWS_CLOUDTRAIL_{connection_name}_EVENTS'.upper()
    pipe = f'data.{base_name}_PIPE'
    landing_table = f'data.{base_name}_CONNECTION'

    # Step two: Configure the remainder once the role is properly configured.
    cloudtrail_ingest_task = f'''
INSERT INTO {landing_table} (
  insert_time, raw, hash_raw, event_time, aws_region, event_id, event_name, event_source, event_type,
  event_version, recipient_account_id, request_id, request_parameters, response_elements, source_ip_address,
  user_agent, user_identity, user_identity_type, user_identity_principal_id, user_identity_arn,
  user_identity_accountid, user_identity_invokedby, user_identity_access_key_id, user_identity_username,
  user_identity_session_context_attributes_mfa_authenticated, user_identity_session_context_attributes_creation_date,
  user_identity_session_context_session_issuer_type, user_identity_session_context_session_issuer_principal_id,
  user_identity_session_context_session_issuer_arn, user_identity_session_context_session_issuer_account_id,
  user_identity_session_context_session_issuer_user_name, error_code, error_message, additional_event_data,
  api_version, read_only, resources, service_event_details, shared_event_id, vpc_endpoint_id
)
SELECT CURRENT_TIMESTAMP() insert_time
    , value raw
    , HASH(value) hash_raw
    --- In the rare event of an unparsable timestamp, the following COALESCE keeps the pipeline from failing.
    --- Compare event_time to TRY_TO_TIMESTAMP(raw:eventTime::STRING) to establish if the timestamp was parsed.
    , COALESCE(
        TRY_TO_TIMESTAMP(value:eventTime::STRING)::TIMESTAMP_LTZ(9),
        CURRENT_TIMESTAMP()
      ) event_time
    , value:awsRegion::STRING aws_region
    , value:eventID::STRING event_id
    , value:eventName::STRING event_name
    , value:eventSource::STRING event_source
    , value:eventType::STRING event_type
    , value:eventVersion::STRING event_version
    , value:recipientAccountId::STRING recipient_account_id
    , value:requestID::STRING request_id
    , value:requestParameters::VARIANT request_parameters
    , value:responseElements::VARIANT response_elements
    , value:sourceIPAddress::STRING source_ip_address
    , value:userAgent::STRING user_agent
    , value:userIdentity::VARIANT user_identity
    , value:userIdentity.type::STRING user_identity_type
    , value:userIdentity.principalId::STRING user_identity_principal_id
    , value:userIdentity.arn::STRING user_identity_arn
    , value:userIdentity.accountId::STRING user_identity_accountid
    , value:userIdentity.invokedBy::STRING user_identity_invokedby
    , value:userIdentity.accessKeyId::STRING user_identity_access_key_id
    , value:userIdentity.userName::STRING user_identity_username
    , value:userIdentity.sessionContext.attributes.mfaAuthenticated::STRING user_identity_session_context_attributes_mfa_authenticated
    , value:userIdentity.sessionContext.attributes.creationDate::STRING user_identity_session_context_attributes_creation_date
    , value:userIdentity.sessionContext.sessionIssuer.type::STRING user_identity_session_context_session_issuer_type
    , value:userIdentity.sessionContext.sessionIssuer.principalId::STRING user_identity_session_context_session_issuer_principal_id
    , value:userIdentity.sessionContext.sessionIssuer.arn::STRING user_identity_session_context_session_issuer_arn
    , value:userIdentity.sessionContext.sessionIssuer.accountId::STRING user_identity_session_context_session_issuer_account_id
    , value:userIdentity.sessionContext.sessionIssuer.userName::STRING user_identity_session_context_session_issuer_user_name
    , value:errorCode::STRING error_code
    , value:errorMessage::STRING error_message
    , value:additionalEventData::VARIANT additional_event_data
    , value:apiVersion::STRING api_version
    , value:readOnly::BOOLEAN read_only
    , value:resources::VARIANT resources
    , value:serviceEventDetails::STRING service_event_details
    , value:sharedEventId::STRING shared_event_id
    , value:vpcEndpointId::STRING vpc_endpoint_id
FROM data.{base_name}_STREAM, table(flatten(input => v:Records))
WHERE ARRAY_SIZE(v:Records) > 0
'''

    db.create_stream(
        name=f'data.{base_name}_STREAM',
        target=f'data.{base_name}_STAGING'
    )

    # IAM change takes 5-15 seconds to take effect
    sleep(5)
    db.retry(
        lambda: db.create_pipe(
            name=pipe,
            sql=f"COPY INTO data.{base_name}_STAGING(v) FROM @data.{base_name}_STAGE/",
            replace=True,
            autoingest=True
        ),
        n=10,
        sleep_seconds_btw_retry=1
    )

    db.create_task(name=f'data.{base_name}_TASK', schedule='1 minute',
                   warehouse=WAREHOUSE, sql=cloudtrail_ingest_task)

    db.execute(f"ALTER PIPE {pipe} REFRESH")

    pipe_description = list(db.fetch(f'DESC PIPE {pipe}'))
    if len(pipe_description) < 1:
        return {
            'newStage': 'error',
            'newMessage': f"{pipe} doesn't exist; please reach out to Snowflake Security for assistance."
        }
    else:
        sqs_arn = pipe_description[0]['notification_channel']

    return {
        'newStage': 'finalized',
        'newMessage': (
            f"Please add this SQS Queue ARN to the bucket event notification"
            f"channel for all object create events: {sqs_arn}"
        )
    }
예제 #29
0
def sample_alert_rules(db_schemas):
    db.execute(TEST_ALERT)
    db.execute(TEST_SUPPRESSED_ALERT)
    db.execute(TEST_SUPPRESSION)
    db.execute(TEST_CORRELATED_ALERT)
    db.execute(TEST_ALERT_WITH_SLACK_HANDLER)
    db.execute(
        f"""
        CREATE OR REPLACE VIEW rules.__suppress_sample_alerts_alert_suppression COPY GRANTS
          COMMENT='this should suppress anything not a test alert'
        AS
        SELECT id
        FROM data.alerts
        WHERE suppressed IS NULL
          AND query_name NOT ILIKE '_TEST%'
    """
    )

    yield

    db.execute(f"DROP VIEW rules._test1_alert_query")
    db.execute(f"DROP VIEW rules._test2_alert_query")
    db.execute(f"DROP VIEW rules._test2_alert_suppression")
    db.execute(f"DROP VIEW rules._test3_alert_query")
    db.execute(f"DROP VIEW rules._test4_alert_query")
    db.execute(f"DROP VIEW rules.__suppress_sample_alerts_alert_suppression")
예제 #30
0
def connect(connection_name, options):
    connection_type = options['connection_type']

    base_name = f"azure_{connection_name}_{connection_type}"
    account_name = options['account_name']
    container_name = options['container_name']
    suffix = options['suffix']
    sas_token = options['sas_token']
    sas_token_ct = vault.encrypt(sas_token)

    comment = f'''
---
module: azure
storage_account: {account_name}
container_name: {container_name}
suffix: {suffix}
sas_token: {sas_token_ct}
sa_user: {USER}
snowflake_account: {ACCOUNT}
database: {DATABASE}
'''

    db.create_stage(
        name=f'data.{base_name}_STAGE',
        url=f"azure://{account_name}.blob.{suffix}/{container_name}",
        cloud='azure',
        prefix='',
        credentials=sas_token,
        file_format=FILE_FORMAT)

    db.execute(
        f'GRANT USAGE ON STAGE data.{base_name}_STAGE TO ROLE {SA_ROLE}')

    db.create_table(name=f'data.{base_name}_CONNECTION',
                    cols=LANDING_TABLES_COLUMNS[connection_type],
                    comment=comment)

    db.execute(
        f'GRANT INSERT, SELECT ON data.{base_name}_CONNECTION TO ROLE {SA_ROLE}'
    )

    pipe_sql = {
        'operation':
        f'''
COPY INTO DATA.{base_name}_CONNECTION(RAW, HASH_RAW, CALLER_IP_ADDRESS, CATEGORY, CORRELATION_ID, DURATION_MS,
                                 IDENTITY, IDENTITY_AUTHORIZATION, IDENTITY_CLAIMS, LEVEL, LOCATION,
                                 OPERATION_NAME, PROPERTIES, PROPERTIES_ANCESTORS, PROPERTIES_IS_COMPLIANCE_CHECK,
                                 PROPERTIES_POLICIES, PROPERTIES_RESOURCE_LOCAATION, RESOURCE_ID, RESULT_SIGNATURE,
                                 RESULT_TYPE, EVENT_TIME, LOADED_ON)
FROM (
    SELECT $1, HASH($1), $1:callerIpAddress::STRING, $1:category::STRING, $1:correlationId::STRING,
        $1:durationMs::NUMBER, $1:identity::VARIANT, $1:identity.authorization::VARIANT, $1:identity.claims::VARIANT,
        $1:level::STRING, $1:location::STRING, $1:operationName::STRING, $1:properties::VARIANT,
        $1:properties.ancestors::STRING, $1:properties.isComplianceCheck::STRING, PARSE_JSON($1:properties.policies),
        $1:properties.resourceLocation::STRING, $1:resourceId::STRING, $1:resultSignature::STRING,
        $1:resultType::STRING, $1:time::TIMESTAMP_LTZ, CURRENT_TIMESTAMP()
    FROM @DATA.{base_name}_STAGE)
''',
        'audit':
        f'''
COPY INTO data.{base_name}_CONNECTION (RAW, HASH_RAW, CALLER_IP_ADDRESS, CATEGORY, CORRELATION_ID,
                                  DURATION_MS, LEVEL, OPERATION_NAME, OPERATION_VERSION, PROPERTIES,
                                  PROPERTIES_ACTIVITY_DATE_TIME, PROPERTIES_ACTIVITY_DISPLAY_NAME,
                                  PROPERTIES_ADDITIONAL_DETAILS, PROPERTIES_CATEGORY, PROPERTIES_ID,
                                  PROPERTIES_INITIATED_BY, PROPERTIES_LOGGED_BY_SERVICE, PROPERTIES_OPERATION_TYPE,
                                  PROPERTIES_RESULT, PROPERTIES_RESULT_REASON, PROPERTIES_TARGET_RESOURCES,
                                  RESOURCE_ID, RESULT_SIGNATURE, TENANT_ID, EVENT_TIME, LOADED_ON)
FROM (
    SELECT $1, HASH($1), $1:callerIpAddress::STRING, $1:category::STRING, $1:correlationId::STRING,
        $1:durationMs::NUMBER, $1:level::STRING, $1:operationName::STRING, $1:operationVersion::STRING,
        $1:properties::VARIANT, $1:properties.activityDateTime::TIMESTAMP_LTZ,
        $1:properties.activityDisplayName::STRING, $1:properties.additionalDetails::VARIANT,
        $1:properties.category::STRING, $1:properties.id::STRING, $1:properties.initiatedBy::VARIANT,
        $1:properties.loggedByService::STRING, $1:properties.operationType::STRING, $1:properties.result::STRING,
        $1:resultReason::STRING, $1:properties.targetResources::VARIANT, $1:resourceId::STRING,
        $1:resultSignature::STRING, $1:tenantId::STRING, $1:time::TIMESTAMP_LTZ, CURRENT_TIMESTAMP()
  FROM @data.{base_name}_STAGE
)
''',
        'signin':
        f'''
COPY INTO DATA.{base_name}_CONNECTION (
    RAW, HASH_RAW, LEVEL, CALLER_IP_ADDRESS, CATEGORY, CORRELATION_ID, DURATION_MS,
    IDENTITY, LOCATION, OPERATION_NAME, OPERATION_VERSION, PROPERTIES,
    PROPERTIES_APP_DISPLAY_NAME, PROPERTIES_APP_ID,
    PROPERTIES_APPLIED_CONDITIONAL_ACESS_POLICIES, PROPERTIES_AUTHENTICATION_METHODS_USED,
    PROPERTIES_AUTHENTICATION_PROCESSING_DETAILS, PROPERTIES_CLIENT_APP_USED,
    PROPERTIES_CONDITIONAL_ACCESS_STATUS, PROPERTIES_CREATED_DATE_TIME,
    PROPERTIES_DEVICE_DETAIL, PROPERTIES_ID, PROPERTIES_IP_ADDRESS, PROPERTIES_IS_INTERACTIVE, PROPERTIES_LOCATION,
    PROPERTIES_MFA_DETAIL, PROPERTIES_NETWORK_LOCATION, PROPERTIES_PROCESSING_TIME_IN_MILLISECONDS,
    PROPERTIES_RESOURCE_DISPLAY_NAME, PROPERTIES_RESOURCE_ID, PROPERTIES_RISK_DETAIL,
    PROPERTIES_RISK_EVENT_TYPES, PROPERTIES_RISK_LEVEL_AGGREGATED, PROPERTIES_RISK_LEVEL_DURING_SIGNIN,
    PROPERTIES_RISK_STATE, PROPERTIES_STATUS, PROPERTIES_TOKEN_ISSUER_TYPE, PROPERTIES_USER_DISPLAY_NAME,
    PROPERTIES_USER_ID, PROPERTIES_USER_PRINCIPAL_NAME, RESOURCE_ID, RESULT_DESCRIPTION, RESULT_SIGNATURE,
    RESULT_TYPE, TENANT_ID, EVENT_TIME, LOADED_ON
)
FROM (
    SELECT $1, HASH($1), $1:Level::NUMBER, $1:callerIpAddress::STRING, $1:category::STRING, $1:correlationId::STRING,
        $1:durationMs, $1:identity::STRING, $1:location::STRING, $1:operationName::STRING,
        $1:operationVersion::STRING, $1:properties::VARIANT, $1:properties.appDisplayName::STRING,
        $1:properties.appId::STRING, $1:properties.appliedConditionalAccessPolicies::VARIANT,
        $1:properties.authenticationMethodsUsed::VARIANT, $1:properties.authenticationProcessingDetails::VARIANT,
        $1:properties.clientAppUsed::STRING, $1:properties.conditionalAccessStatus::STRING,
        $1:properties.createdDateTime::TIMESTAMP_LTZ, $1:properties.deviceDetail::VARIANT, $1:properties.id::STRING,
        $1:properties.ipAddress::STRING, $1:properties.isInteractive::BOOLEAN, $1:properties.location::VARIANT,
        $1:properties.mfaDetail::VARIANT, $1:properties.networkLocationDetails::VARIANT,
        $1:properties.processingTimeInMilliseconds::NUMBER, $1:properties.resourceDisplayName::STRING,
        $1:properties.resourceId::STRING, $1:properties.riskDetail::STRING, $1:properties.riskEventTypes::VARIANT,
        $1:properties.riskLevelAggregated::STRING, $1:properties.riskLevelDuringSignIn::STRING,
        $1:properties.riskState::VARIANT, $1:properties.status::VARIANT, $1:properties.tokenIssuerType::STRING,
        $1:properties.userDisplayName::STRING, $1:properties.userId::STRING, $1:properties.userPrincipalName::STRING,
        $1:resourceId::STRING, $1:resultDescription::STRING, $1:resultSignature::STRING, $1:resultType::STRING,
        $1:tenantId::STRING, $1:time::TIMESTAMP_LTZ,
        CURRENT_TIMESTAMP()
    FROM @DATA.{base_name}_STAGE
)
'''
    }

    db.create_pipe(name=f"data.{base_name}_PIPE",
                   sql=pipe_sql[options['connection_type']],
                   replace=True)

    db.execute(
        f'ALTER PIPE data.{base_name}_PIPE SET PIPE_EXECUTION_PAUSED=true')
    db.execute(
        f'GRANT OWNERSHIP ON PIPE data.{base_name}_PIPE TO ROLE {SA_ROLE}')

    return {
        'newStage': 'finalized',
        'newMessage': 'Table, Stage, and Pipe created'
    }