Exemple #1
0
def determine_threshold_trigger(alert_params, events):
    """Given a threshold alert's params, and a set of events
    determine if it should fire and if so, resolve
    it's summary, event snippets, etc.
    """
    # mostCommon the events by the dotted aggregation key
    counts = mostCommon(events, alert_params["aggregation_key"])
    # determine if these events trigger an alert
    # according to the parameters
    logger.debug(counts)
    for i in counts:
        if i[1] >= alert_params["threshold"]:
            alert = alert_params
            alert["triggered"] = True
            # set the summary via chevron/mustache template
            # with the alert plus metadata
            metadata = {"metadata": {"value": i[0], "count": i[1]}}
            alert = merge(alert, metadata)
            # limit events to those matching the aggregation_key value
            # so the alert only gets events that match the count mostCommon results
            alert["events"] = []
            for event in events:
                dotted_event = DotDict(event)
                if i[0] == dotted_event.get(alert_params["aggregation_key"]):
                    alert["events"].append(dotted_event)
            alert["summary"] = chevron.render(alert["summary"], alert)
            # walk the alert events for any requested event snippets
            for event in alert["events"][:alert_params["event_sample_count"]]:
                alert["summary"] += " " + chevron.render(
                    alert_params["event_snippet"], event)
            yield alert
Exemple #2
0
def generate_metadata(context):
    metadata = {
        "lambda_details": {
            "function_version": context.function_version,
            "function_arn": context.invoked_function_arn,
            "function_name": context.function_name.lower(),
            "memory_size": context.memory_limit_in_mb,
        },
    }

    return DotDict(metadata)
Exemple #3
0
def determine_deadman_trigger(alert_params, events):
    """Given a deadman alert's params and a set of events (or lack thereof)
    determine if it should fire and resolve summary/snippets, etc

    Largely the same as a threshold alert, except this accounts
    for a lack of events (altogether missing, or below a count) as the trigger
    """
    counts = mostCommon(events, alert_params["aggregation_key"])
    if not events:
        # deadman alerts are built to notice
        # when expected events are missing
        # but it means we have no events to pass on
        # make a meta event for the fact that events are missing
        events = []
        meta_event = {
            "utctimestamp": utcnow().isoformat(),
            "severity": "INFO",
            "summary": "Expected event not found",
            "category": "deadman",
            "source": "deadman",
            "tags": ["deadman"],
            "plugins": [],
            "details": {},
        }
        events.append(meta_event)

    if not counts:
        # make up a metadata count
        counts = [(alert_params["aggregation_key"], 0)]

    for i in counts:
        # lack of events, or event count below the threshold is a trigger
        if i[1] <= alert_params["threshold"]:
            alert = alert_params
            alert["triggered"] = True
            # set the summary via chevron/mustache template
            # with the alert plus metadata
            metadata = {"metadata": {"value": i[0], "count": i[1]}}
            alert = merge(alert, metadata)
            # limit events to those matching the aggregation_key value
            # so the alert only gets events that match the count mostCommon results
            alert["events"] = []
            for event in events:
                dotted_event = DotDict(event)
                if i[0] == dotted_event.get(alert_params["aggregation_key"]):
                    alert["events"].append(dotted_event)
            alert["summary"] = chevron.render(alert["summary"], alert)
            # walk the alert events for any requested event snippets
            for event in alert["events"][:alert_params["event_sample_count"]]:
                alert["summary"] += " " + chevron.render(
                    alert_params["event_snippet"], event)
            yield alert
Exemple #4
0
 def test_dict_match(self):
     complex_dict1 = {
         "some_key": "some value",
         "sub_key": {
             "some_key": "some other value"
         },
     }
     assert dict_match({"some_key": "some value"}, complex_dict1)
     complex_dot_dict = DotDict(complex_dict1)
     assert dict_match({"sub_key.some_key": "some other value"},
                       complex_dot_dict)
     assert (dict_match({"sub_key.some_key": "not some other value"},
                        complex_dot_dict) == False)
Exemple #5
0
    def onMessage(self, message, metadata):
        # for convenience, make a dot dict version of the message
        dot_message = DotDict(message)

        # double check that this is our target message
        if 'admin#reports#activity' not in dot_message.get('details.kind','')\
            or 'id' not in message.get('details','') \
            or 'etag' not in message.get('details',''):
            return (message, metadata)

        message["source"] = "gsuite"
        message["tags"].append("gsuite")

        # clean up ipaddress field
        if 'ipaddress' in message['details']:
            message['details']['sourceipaddress'] = message['details'][
                'ipaddress']
            del message['details']['ipaddress']

        # set the actual time
        if dot_message.get("details.id.time", None):
            message['utctimestamp'] = toUTC(
                message['details']['id']['time']).isoformat()

        # set the user_name
        if dot_message.get("details.actor.email", None):
            message["details"]["user"] = dot_message.get(
                "details.actor.email", "")

        # set summary
        message["summary"] = chevron.render(
            "{{details.user}} {{details.events.0.name}} from IP {{details.sourceipaddress}}",
            message)

        # set category
        message['category'] = "authentication"

        #success/failure
        if 'fail' in message["summary"]:
            message["details"]["success"] = False
        if 'success' in message["summary"]:
            message["details"]["success"] = True

        #suspicious?
        suspicious = {"boolvalue": True, "name": "is_suspicious"}
        for e in dot_message.get("details.events", []):
            for p in e.get("parameters", []):
                if dict_match(suspicious, p):
                    message["details"]["suspicious"] = True

        return (message, metadata)
Exemple #6
0
 def test_lambda_metadata_generation(self):
     lambda_context = {
         "function_version": "$LATEST",
         "invoked_function_arn":
         "arn:aws:lambda:us-west-2:722455710680:function:processor-prod",
         "function_name": "processor-prod",
         "memory_limit_in_mb": "1024",
     }
     lambda_context = DotDict(lambda_context)
     result = generate_metadata(lambda_context)
     assert type(result.lambda_details) == type(lambda_context)
     assert "function_version" in result.lambda_details
     assert "function_arn" in result.lambda_details
     assert "function_name" in result.lambda_details
     assert "memory_size" in result.lambda_details
Exemple #7
0
def lambda_handler(event, context):
    config = DotDict({})
    config.account = boto3.client("sts").get_caller_identity().get("Account")
    config.athena_workgroup = os.environ.get("ATHENA_WORKGROUP",
                                             "defenda_data_lake")
    config.athena_database = os.environ.get("ATHENA_DATABASE",
                                            "defenda_data_lake")
    config.athena_table = os.environ.get("ATHENA_TABLE", "events")

    # query status/wait for response

    athena_query = get_athena_query(config)
    logger.debug(athena_query)
    cursor = connect(work_group=config.athena_workgroup).cursor()
    cursor.execute(athena_query)
    logger.debug("Query finished: {}".format(cursor.state))
    return
Exemple #8
0
 def test_sub_dict(self):
     complex_dict1 = {
         "some_key": "some value",
         "sub_key": {
             "some_key": "some other value"
         },
     }
     result = sub_dict(complex_dict1, ["some_key"], "nothing")
     assert result == {"some_key": "some value"}
     result = sub_dict(complex_dict1, ["sub_key.some_key"], "nothing")
     assert result == {"sub_key.some_key": "nothing"}
     complex_dot_dict = DotDict(complex_dict1)
     result = sub_dict(complex_dot_dict, ["sub_key.some_key"], "nothing")
     assert result == {"sub_key.some_key": "some other value"}
     result = sub_dict(complex_dot_dict, ["some_key", "sub_key.some_key"])
     assert result == {
         "some_key": "some value",
         "sub_key.some_key": "some other value",
     }
 def observation(self, observation):
     # Convert everything to torch tensors
     for k in observation:
         dtype = torch.long if '_index' in k else torch.float
         observation[k] = torch.tensor(observation[k], dtype=dtype)
     return DotDict(observation)
 def convert_style(self):
     self.set_styles(self.styles)
     self.styles = DotDict(self.styles)
Exemple #11
0
                        for k, v in test_result.items()
                    ]))
                    if i == 15:
                        break

        current_scale_indx += 1


if __name__ == '__main__':

    with open(configFile) as file:
        try:
            params = yaml.load(file, Loader=yaml.FullLoader)
        except yaml.YAMLError as exc:
            print(exc)
            sys.exit(0)

    params = DotDict(params)

    if not osp.isdir(params.xtra.out_path):
        os.makedirs(params.xtra.out_path)

    #SAVING PARAMETERS FOR RESTART....NEEDS WORK
    #np.save(osp.join(params.ip.out_path, 'params'), params)

    experiment_id = osp.basename(params.xtra.out_path)

    print('experiment ID: {}'.format(experiment_id))

    #pprint(params)
    testing(params)
Exemple #12
0
    # for all sequence alerts in the DB
    # if slots are all met, create alert and remove inflight record
    create_sequence_alerts(db)
    # for all sequence alerts in the DB
    # expire any un-met inflight alerts that have exceeded their window
    expire_sequence_alerts(db)

    sys.exit()


if __name__ == "__main__":
    # config options from alerts.yaml or -c <filename>
    parser = argparse.ArgumentParser()
    parser.add_argument("-c", "--config", help="Specify a configuration file")
    args = parser.parse_args()

    with open(args.config
              or "{}".format(sys.argv[0].replace(".py", ".yml"))) as fd:
        config = DotDict(yaml.safe_load(fd))

    logging_config_file_path = Path(__file__).parent.joinpath(
        config.logging_config)
    with open(logging_config_file_path, "r") as fd:
        logging_config = yaml.safe_load(fd)
        logging.config.dictConfig(logging_config)

    logger = logging.getLogger()

    logger.debug("Logging configured")
    logger.debug(f"Configurated as {config}")
    main(config)
from pygame.display import *

from pygame_util import get_clock, random_color
from pygameapps.trackers.classes import Position
from utils.dotdict import DotDict

init()

SIZE = (600, 600)
S = set_mode(SIZE)
CENTER = SIZE[0] // 2, SIZE[1] // 2
CLOCK = get_clock()
DISTANCE = 100
SPEED = 10

colors = DotDict(THECOLORS)
obj = Position(0, 0)

joystick.init()
joy = joystick.Joystick(0)
joy.init()

while True:
    for e in event.get():
        if e.type == QUIT:
            exit()
        elif e.type == KEYDOWN:
            if e.key == K_q:
                exit()
        elif e.type == JOYAXISMOTION:
            print(f'JOY {e.joy} AXIS {e.axis} VALUE {e.value}')
Exemple #14
0
from pygame.draw import *
from pygame.display import set_mode, update, set_caption, flip
from pygame import event, colordict
from pygame import *
import pygame as pg
from utils.dotdict import DotDict

init()
SIZE = 600, 600
S = set_mode(SIZE)
COLORS = DotDict(colordict.THECOLORS)

dragging = False
all_lines = []

while True:
    for e in event.get():
        if e.type == QUIT or e.type == KEYDOWN and e.type == K_q:
            pg.quit()
            exit()
        elif e.type == MOUSEMOTION:
            if all_lines and dragging:
                all_lines[-1].append(e.pos)
        elif e.type == MOUSEBUTTONDOWN:
            all_lines.append([])
            dragging = True
        elif e.type == MOUSEBUTTONUP:
            dragging = False

    for line in all_lines:
        if len(line) > 1:
Exemple #15
0
# -*- coding: utf-8 -*-

from utils.dotdict import DotDict

METHOD = DotDict(DELETE="DELETE", POST="POST", PUT="PUT", GET="GET")

ASYNC_REQUEST_TIMEOUT = 45
CONNECT_TIMEOUT = 30
Exemple #16
0
def lambda_handler(event, context):
    """
        Called on a PUT to s3
        Make every attempt to read in json records
        from the s3 source
    """
    metadata = generate_metadata(context)
    logger.debug("Event is: {}".format(event))

    # make the event easier to traverse
    event = DotDict(event)

    # test harnesses
    if event == {"test": "true"}:
        return {"Hello": "from s3_to_firehose"}
    elif event == {"metadata": "name"}:
        return metadata
    elif "Records" in event:
        # should be triggered by s3 Put/Object created events
        s3 = boto3.client("s3")
        for record in event.Records:
            record = DotDict(record)
            s3_bucket = record.s3.bucket.name
            s3_key = record.s3.object.key
            # a new bucket will fire for folders *and* files, early exit if it's a folder
            if s3_key.endswith("/"):
                continue
            # assume the file is just good ol json
            source = "s3json"
            # if the file name is cloudtrail-ish
            if is_cloudtrail(s3_key):
                source = "cloudtrail"
            # up to 5 attempts to get the object ( in case s3 file commit on write is lagging)
            s3_response = None
            for x in range(1, 6):
                try:
                    s3_response = s3.get_object(Bucket=s3_bucket, Key=s3_key)
                    break
                except Exception as e:
                    logger.error(
                        f"Attempt {x}: {e} while attempting to get_object {s3_bucket} {s3_key}"
                    )
                    sleep(1)
                    continue
            if not s3_response:
                logger.error(
                    f"5 attempts to retrieve {s3_bucket} {s3_key} failed, moving on"
                )
                continue
            s3_data = ""
            # gunzip if zipped
            if s3_key[-3:] == ".gz":
                s3_raw_data = s3_response["Body"].read()
                with gzip.GzipFile(
                        fileobj=BytesIO(s3_raw_data)) as gzip_stream:
                    s3_data += "".join(
                        TextIOWrapper(gzip_stream, encoding="utf-8"))
            else:
                s3_data = s3_response["Body"].read().decode("utf-8")

            # create our list of records to append out findings to
            s3_records = []
            s3_dict = None
            try:
                # load the json we have from either a .json file or a gunziped file
                s3_dict = json.loads(s3_data)
            except JSONDecodeError:
                # file isn't well formed json, see if we can interpret json from it
                for block in emit_json_block(StringIO(s3_data)):
                    if block:
                        record = json.loads(block)
                        record["source"] = source
                        s3_records.append(record)
            # if this is a dict of a single 'Records' list, unroll the list into
            # it's sub records
            if s3_dict and "Records" in s3_dict:
                if type(s3_dict["Records"]) is list:
                    for record in s3_dict["Records"]:
                        record["source"] = source
                        s3_records.append(record)
            # maybe it's just a list already?
            elif s3_dict and type(s3_dict) is list:
                # a list of dicts
                for record in s3_dict:
                    record["source"] = source
                    s3_records.append(record)
            elif s3_dict and type(s3_dict) is dict:
                # a single dict, but lets add it to a list
                # for consistent handling
                s3_dict["source"] = source
                s3_records.append(s3_dict)

            logger.debug("pre-plugins s3_records is: {}".format(s3_records))
            # send off to firehose for further processing
            if s3_records:
                send_to_firehose(s3_records)

        return