def main(rule_name=None): RUN_METADATA = { 'RUN_ID': RUN_ID, 'RUN_TYPE': 'ALERT QUERY', 'START_TIME': datetime.datetime.utcnow(), } ctx = db.connect_and_execute("ALTER SESSION SET USE_CACHED_RESULT=FALSE;") if rule_name: create_alerts(ctx, rule_name) else: for rule_name in db.load_rules(ctx, ALERT_QUERY_POSTFIX): create_alerts(ctx, rule_name) RUN_METADATA['ROW_COUNT'] = { 'INSERTED': sum(q['ROW_COUNT']['INSERTED'] for q in QUERY_HISTORY), 'UPDATED': sum(q['ROW_COUNT']['UPDATED'] for q in QUERY_HISTORY), } log.metadata_record(ctx, RUN_METADATA, table=RUN_METADATA_TABLE) try: if CLOUDWATCH_METRICS: log.metric('Run', 'SnowAlert', [{ 'Name': 'Component', 'Value': 'Alert Query Runner' }], 1) except Exception as e: log.error("Cloudwatch metric logging failed: ", e)
def main(rule_name=None): RUN_METADATA = { 'RUN_ID': RUN_ID, 'RUN_TYPE': 'ALERT QUERY', 'START_TIME': datetime.datetime.utcnow(), } if rule_name: metadata = [create_alerts(rule_name)] else: rules = list(db.load_rules(ALERT_QUERY_POSTFIX)) metadata = Pool(POOLSIZE).map(create_alerts, rules) RUN_METADATA['ROW_COUNT'] = { 'INSERTED': sum(q['ROW_COUNT']['INSERTED'] for q in metadata), 'UPDATED': sum(q['ROW_COUNT']['UPDATED'] for q in metadata), } db.record_metadata(RUN_METADATA, table=RUN_METADATA_TABLE) try: if CLOUDWATCH_METRICS: log.metric( 'Run', 'SnowAlert', [{ 'Name': 'Component', 'Value': 'Alert Query Runner' }], 1, ) except Exception as e: log.error("Cloudwatch metric logging failed: ", e)
def main(): RUN_METADATA = { 'RUN_TYPE': 'VIOLATION SUPPRESSION', 'START_TIME': datetime.datetime.utcnow(), 'RUN_ID': RUN_ID, } for squelch_name in db.load_rules(VIOLATION_SQUELCH_POSTFIX): run_suppression(squelch_name) num_violations_passed = next( db.fetch(SET_SUPPRESSED_FALSE))['number of rows updated'] RUN_METADATA['ROW_COUNT'] = { 'SUPPRESSED': sum(rmr['ROW_COUNT']['SUPPRESSED'] for rmr in RULE_METADATA_RECORDS), 'PASSED': num_violations_passed, } db.record_metadata(RUN_METADATA, table=RUN_METADATA_TABLE) if CLOUDWATCH_METRICS: log.metric('Run', 'SnowAlert', [{ 'Name': 'Component', 'Value': 'Violation Suppression Runner' }], 1)
def main(squelch_name=None): RUN_METADATA = { 'RUN_TYPE': 'ALERT SUPPRESSION', 'START_TIME': datetime.datetime.utcnow(), 'RUN_ID': RUN_ID, } rules = ( db.load_rules(ALERT_SQUELCH_POSTFIX) if squelch_name is None else [squelch_name] ) for squelch_name in rules: run_suppressions(squelch_name) num_rows_updated = next(db.fetch(SET_SUPPRESSED_FALSE))['number of rows updated'] log.info( f'All suppressions done, {num_rows_updated} remaining alerts marked suppressed=FALSE.' ) RUN_METADATA['ROW_COUNT'] = { 'PASSED': num_rows_updated, 'SUPPRESSED': sum(m['ROW_COUNT']['SUPPRESSED'] for m in METADATA_HISTORY), } db.record_metadata(RUN_METADATA, table=RUN_METADATA_TABLE) try: if CLOUDWATCH_METRICS: log.metric( 'Run', 'SnowAlert', [{'Name': 'Component', 'Value': 'Alert Suppression Runner'}], 1, ) except Exception as e: log.error("Cloudwatch metric logging failed: ", e)
def main(): RUN_METADATA = {} RUN_METADATA['RUN_TYPE'] = 'ALERT SUPPRESSION' RUN_METADATA['START_TIME'] = datetime.datetime.utcnow() RUN_METADATA['RUN_ID'] = RUN_ID ctx = db.connect() for squelch_name in db.load_rules(ctx, ALERT_SQUELCH_POSTFIX): run_suppressions(squelch_name) flag_remaining_alerts(ctx) log.metadata_record(ctx, RUN_METADATA, table=RUN_METADATA_TABLE) if CLOUDWATCH_METRICS: log.metric('Run', 'SnowAlert', [{'Name': 'Component', 'Value': 'Alert Suppression Runner'}], 1)
def main(): # Force warehouse resume so query runner doesn't have a bunch of queries waiting for warehouse resume RUN_METADATA = {} RUN_METADATA['RUN_TYPE'] = 'VIOLATION QUERY' RUN_METADATA['START_TIME'] = datetime.datetime.utcnow() RUN_METADATA['RUN_ID'] = RUN_ID ctx = db.connect_and_execute("ALTER SESSION SET use_cached_result=FALSE;") for query_name in db.load_rules(ctx, VIOLATION_QUERY_POSTFIX): run_query(query_name) log.metadata_record(ctx, RUN_METADATA, table=RUN_METADATA_TABLE) if CLOUDWATCH_METRICS: log.metric('Run', 'SnowAlert', [{ 'Name': 'Component', 'Value': 'Violation Query Runner' }], 1)
def main(rule_name=None): # Force warehouse resume so query runner doesn't have a bunch of queries waiting for warehouse resume RUN_METADATA = {} RUN_METADATA['RUN_TYPE'] = 'ALERT QUERY' RUN_METADATA['START_TIME'] = datetime.datetime.utcnow() RUN_METADATA['RUN_ID'] = RUN_ID ctx = db.connect_and_execute("ALTER SESSION SET USE_CACHED_RESULT=FALSE;") if rule_name: query_for_alerts(ctx, rule_name) else: for query_name in db.load_rules(ctx, ALERT_QUERY_POSTFIX): query_for_alerts(ctx, query_name) log.metadata_record(ctx, RUN_METADATA, table=RUN_METADATA_TABLE) if CLOUDWATCH_METRICS: log.metric('Run', 'SnowAlert', [{ 'Name': 'Component', 'Value': 'Alert Query Runner' }], 1)