def insert_one(self, bucket: str, event: Event) -> Event:
     # .copy is needed because otherwise mongodb inserts a _id field into the event
     dict_event = event.copy()
     dict_event = self._transform_event(dict_event)
     returned = self.db[bucket]["events"].insert_one(dict_event)
     event.id = returned.inserted_id
     return event
def merge_events_by_keys(events, keys) -> List[Event]:
    # The result will be a list of events without timestamp since they are merged
    # Call recursively until all keys are consumed
    if len(keys) < 1:
        return events
    merged_events = {}  # type: Dict[Tuple, Event]
    for event in events:
        composite_key = ()  # type: Tuple
        for key in keys:
            if key in event.data:
                composite_key = composite_key + (event["data"][key],)
        if composite_key not in merged_events:
            merged_events[composite_key] = Event(
                timestamp=event.timestamp,
                duration=event.duration,
                data={}
            )
            for key in keys:
                if key in event.data:
                    merged_events[composite_key].data[key] = event.data[key]
        else:
            merged_events[composite_key].duration += event.duration
    result = []
    for key in merged_events:
        result.append(Event(**merged_events[key]))
    return result
 def insert_one(self, bucket_id: str, event: Event) -> Event:
     c = self.conn.cursor()
     starttime = event.timestamp.timestamp() * 1000000
     endtime = starttime + (event.duration.total_seconds() * 1000000)
     datastr = json.dumps(event.data)
     c.execute("INSERT INTO events(bucketrow, starttime, endtime, datastr) " +
               "VALUES ((SELECT rowid FROM buckets WHERE id = ?), ?, ?, ?)",
               [bucket_id, starttime, endtime, datastr])
     event.id = c.lastrowid
     self.conditional_commit(1)
     return event
def test_period_union():
    now = datetime.now(timezone.utc)

    # Events overlapping
    events1 = [Event(timestamp=now, duration=timedelta(seconds=10))]
    events2 = [
        Event(timestamp=now + timedelta(seconds=9), duration=timedelta(seconds=10))
    ]
    unioned_events = period_union(events1, events2)
    assert len(unioned_events) == 1

    # Events adjacent but not overlapping
    events1 = [Event(timestamp=now, duration=timedelta(seconds=10))]
    events2 = [
        Event(timestamp=now + timedelta(seconds=10), duration=timedelta(seconds=10))
    ]
    unioned_events = period_union(events1, events2)
    assert len(unioned_events) == 1

    # Events not overlapping or adjacent
    events1 = [Event(timestamp=now, duration=timedelta(seconds=10))]
    events2 = [
        Event(timestamp=now + timedelta(seconds=11), duration=timedelta(seconds=10))
    ]
    unioned_events = period_union(events1, events2)
    assert len(unioned_events) == 2
Esempio n. 5
0
    def heartbeat(
        self,
        bucket_id: str,
        event: Event,
        pulsetime: float,
        queued: bool = False,
        commit_interval: Optional[float] = None,
    ) -> None:
        """
        Args:
            bucket_id: The bucket_id of the bucket to send the heartbeat to
            event: The actual heartbeat event
            pulsetime: The maximum amount of time in seconds since the last heartbeat to be merged with the previous heartbeat in aw-server
            queued: Use the aw-client queue feature to queue events if client loses connection with the server
            commit_interval: Override default pre-merge commit interval

        NOTE: This endpoint can use the failed requests retry queue.
              This makes the request itself non-blocking and therefore
              the function will in that case always returns None.
        """

        from aw_transform.heartbeats import heartbeat_merge

        endpoint = f"buckets/{bucket_id}/heartbeat?pulsetime={pulsetime}"
        _commit_interval = commit_interval or self.commit_interval

        if queued:
            # Pre-merge heartbeats
            if bucket_id not in self.last_heartbeat:
                self.last_heartbeat[bucket_id] = event
                return None

            last_heartbeat = self.last_heartbeat[bucket_id]

            merge = heartbeat_merge(last_heartbeat, event, pulsetime)

            if merge:
                # If last_heartbeat becomes longer than commit_interval
                # then commit, else cache merged.
                diff = (last_heartbeat.duration).total_seconds()
                if diff >= _commit_interval:
                    data = merge.to_json_dict()
                    self.request_queue.add_request(endpoint, data)
                    self.last_heartbeat[bucket_id] = event
                else:
                    self.last_heartbeat[bucket_id] = merge
            else:
                data = last_heartbeat.to_json_dict()
                self.request_queue.add_request(endpoint, data)
                self.last_heartbeat[bucket_id] = event
        else:
            self._post(endpoint, event.to_json_dict())
Esempio n. 6
0
def test_replace_last_complex(bucket_cm):
    """
    Tests the replace last event in bucket functionality (complex)
    """
    with bucket_cm as bucket:
        # Create first event
        event1 = Event(data={"label": "test1"},
                       timestamp=now,
                       duration=timedelta(1))
        bucket.insert(event1)
        eventcount = len(bucket.get(-1))
        # Create second event to replace with the first one
        event2 = Event(
            data={"label": "test2"},
            duration=timedelta(0),
            timestamp=now + timedelta(seconds=1),
        )
        bucket.replace_last(event2)
        # Assert length and content
        result = bucket.get(-1)
        assert eventcount == len(result)
        assert event2 == result[0]
Esempio n. 7
0
    def get_events(self, bucket_id: str, limit: int=-1, start: datetime=None, end: datetime=None) -> List[Event]:
        endpoint = "buckets/{}/events".format(bucket_id)

        params = dict()  # type: Dict[str, str]
        if limit is not None:
            params["limit"] = str(limit)
        if start is not None:
            params["start"] = start.isoformat()
        if end is not None:
            params["end"] = end.isoformat()

        events = self._get(endpoint, params=params).json()
        return [Event(**event) for event in events]
Esempio n. 8
0
 def insert_one(self, bucket_id: str, event: Event) -> Event:
     c = self.conn.cursor()
     starttime = event.timestamp.timestamp() * 1000000
     endtime = starttime + (event.duration.total_seconds() * 1000000)
     datastr = json.dumps(event.data)
     c.execute(
         "INSERT INTO events(bucketrow, starttime, endtime, datastr) " +
         "VALUES ((SELECT rowid FROM buckets WHERE id = ?), ?, ?, ?)",
         [bucket_id, starttime, endtime, datastr],
     )
     event.id = c.lastrowid
     self.conditional_commit(1)
     return event
def test_limit(bucket_cm):
    """
    Tests setting the result limit when fetching events
    """
    with bucket_cm as bucket:
        for i in range(5):
            bucket.insert(Event(timestamp=now))

        assert 0 == len(bucket.get(limit=0))
        assert 1 == len(bucket.get(limit=1))
        assert 3 == len(bucket.get(limit=3))
        assert 5 == len(bucket.get(limit=5))
        assert 5 == len(bucket.get(limit=-1))
Esempio n. 10
0
def test_query2_query_categorize(datastore):
    bid = "test_bucket"
    qname = "test"
    starttime = iso8601.parse_date("1970")
    endtime = starttime + timedelta(hours=1)

    example_query = """
    events = query_bucket("{bid}");
    events = sort_by_timestamp(events);
    events = categorize(events, [[["test"], {{"regex": "test"}}], [["test", "subtest"], {{"regex": "test2"}}]]);
    events_by_cat = merge_events_by_keys(events, ["$category"]);
    RETURN = {{"events": events, "events_by_cat": events_by_cat}};
    """.format(bid=bid, bid_escaped=bid.replace("'", "\\'"))
    try:
        bucket = datastore.create_bucket(bucket_id=bid, type="test", client="test", hostname="test", name="asd")
        events = [
            Event(data={"label": "test1"},
                  timestamp=starttime,
                  duration=timedelta(seconds=1)),
            Event(data={"label": "test2"},
                  timestamp=starttime + timedelta(seconds=1),
                  duration=timedelta(seconds=1)),
            Event(data={"label": "test2"},
                  timestamp=starttime + timedelta(seconds=2),
                  duration=timedelta(seconds=1)),
        ]
        bucket.insert(events)
        result = query(qname, example_query, starttime, endtime, datastore)
        print(result)
        assert len(result["events"]) == 3
        assert result["events"][0].data["label"] == "test1"
        assert result["events"][0].data["$category"] == ["test"]
        assert result["events"][1].data["$category"] == ["test", "subtest"]
        assert result["events_by_cat"][0].data["$category"] == ["test"]
        assert result["events_by_cat"][1].data["$category"] == ["test", "subtest"]
        assert result["events_by_cat"][1].duration == timedelta(seconds=2)
    finally:
        datastore.delete_bucket(bid)
Esempio n. 11
0
def createEvents(out, timestamp, wifiname):
    spacesep = out.split(" ")
    maxping = 0
    meanping = 0
    minping = 0
    events = []
    total = int(spacesep[0])
    received = int(spacesep[3])
    failed = total - received

    if (spacesep[-1] == "ms"):
        extract = out.split("/")
        maxping = float(extract[-2])
        meanping = float(extract[-3])
        minping = float(extract[-4].split("= ")[-1])
        events.append(
            Event(timestamp=timestamp,
                  label=["received", "ssid:" + wifiname],
                  count=received,
                  duration=[{
                      "value": meanping,
                      "unit": "ms",
                      "label": meanping
                  }, {
                      "value": maxping,
                      "unit": "ms",
                      "label": maxping
                  }, {
                      "value": minping,
                      "unit": "ms",
                      "label": minping
                  }]))
    events.append(
        Event(timestamp=timestamp,
              label=["failed", "ssid:" + wifiname],
              count=failed))

    return events
Esempio n. 12
0
def test_merge_events_by_keys_2():
    now = datetime.now(timezone.utc)
    events = []
    e1_data = {"k1": "a", "k2": "a"}
    e2_data = {"k1": "a", "k2": "c"}
    e3_data = {"k1": "b", "k2": "a"}
    e1 = Event(data=e1_data, timestamp=now, duration=timedelta(seconds=1))
    e2 = Event(data=e2_data, timestamp=now, duration=timedelta(seconds=1))
    e3 = Event(data=e3_data, timestamp=now, duration=timedelta(seconds=1))
    events = events + [e1] * 10
    events = events + [e2] * 9
    events = events + [e3] * 8
    result = merge_events_by_keys(events, ["k1", "k2"])
    result = sort_by_duration(result)
    print(result)
    print(len(result))
    assert len(result) == 3
    assert result[0].data == e1_data
    assert result[0].duration == timedelta(seconds=10)
    assert result[1].data == e2_data
    assert result[1].duration == timedelta(seconds=9)
    assert result[2].data == e3_data
    assert result[2].duration == timedelta(seconds=8)
Esempio n. 13
0
def test_query2_query_functions(datastore):
    """
    Just test calling all functions just to see something isn't completely broken
    In many cases the functions doesn't change the result at all, so it's not a test
    for testing the validity of the data the functions transform
    """
    bid = "test_'bucket"
    qname = "test"
    starttime = iso8601.parse_date("1970")
    endtime = starttime + timedelta(hours=1)

    example_query = """
    bid = "{bid}";
    events = query_bucket("{bid}");
    events2 = query_bucket('{bid_escaped}');
    events2 = filter_keyvals(events2, "label", ["test1"]);
    events2 = exclude_keyvals(events2, "label", ["test2"]);
    events = filter_period_intersect(events, events2);
    events = filter_keyvals_regex(events, "label", ".*");
    events = limit_events(events, 1);
    events = merge_events_by_keys(events, ["label"]);
    events = chunk_events_by_key(events, "label");
    events = split_url_events(events);
    events = sort_by_timestamp(events);
    events = sort_by_duration(events);
    events = categorize(events, [[["test", "subtest"], {{"regex": "test1"}}]]);
    duration = sum_durations(events);
    eventcount = query_bucket_eventcount(bid);
    asd = nop();
    RETURN = {{"events": events, "eventcount": eventcount}};
    """.format(bid=bid, bid_escaped=bid.replace("'", "\\'"))
    try:
        bucket = datastore.create_bucket(bucket_id=bid,
                                         type="test",
                                         client="test",
                                         hostname="test",
                                         name="asd")
        bucket.insert(
            Event(
                data={"label": "test1"},
                timestamp=starttime,
                duration=timedelta(seconds=1),
            ))
        result = query(qname, example_query, starttime, endtime, datastore)
        assert result["eventcount"] == 1
        assert len(result["events"]) == 1
        assert result["events"][0].data["label"] == "test1"
        assert result["events"][0].data["$category"] == ["test", "subtest"]
    finally:
        datastore.delete_bucket(bid)
Esempio n. 14
0
 def get_event(
     self,
     bucket_id: str,
     event_id: int,
 ) -> Optional[Event]:
     endpoint = f"buckets/{bucket_id}/events/{event_id}"
     try:
         event = self._get(endpoint).json()
         return Event(**event)
     except req.exceptions.HTTPError as e:
         if e.response.status_code == 404:
             return None
         else:
             raise
Esempio n. 15
0
def heartbeat_reduce(events: Event, pulsetime: float) -> List[Event]:
    """Merges consecutive events together according to the rules of `heartbeat_merge`."""
    reduced = []
    if len(events) > 0:
        reduced.append(events.pop(0))
    for heartbeat in events:
        merged = heartbeat_merge(reduced[-1], heartbeat, pulsetime)
        if merged is not None:
            # Heartbeat was merged
            reduced[-1] = merged
        else:
            # Heartbeat was not merged
            reduced.append(heartbeat)
    return reduced
Esempio n. 16
0
    def post(self, bucket_id):
        """
        Create events for a bucket
        """
        logger.debug("Received post request for event in bucket '{}' and data: {}".format(bucket_id, request.get_json()))

        if bucket_id not in app.db.buckets():
            msg = "There's no bucket named {}".format(bucket_id)
            raise BadRequest("NoSuchBucket", msg)

        data = request.get_json()
        events = Event.from_json_obj(data)
        app.db[bucket_id].insert(events)
        return {}, 200
Esempio n. 17
0
 def ping(self,
          afk: bool,
          timestamp: datetime,
          duration: float = 0
          ) -> None:
     data = {"status": "afk" if afk else "not-afk"}
     e = Event(timestamp=timestamp, duration=duration, data=data)
     pulsetime = self.timeout + self.poll_time
     self.client.heartbeat(
         self.bucketname,
         e,
         pulsetime=pulsetime,
         queued=True
     )
Esempio n. 18
0
def test_replace_last(bucket_cm):
    """
    Tests the replace last event in bucket functionality (simple)
    """
    with bucket_cm as bucket:
        # Create two events
        bucket.insert(Event(data={"label": "test1"}, timestamp=now))
        bucket.insert(
            Event(data={"label": "test2"}, timestamp=now + timedelta(seconds=1))
        )
        # Create second event to replace with the second one
        bucket.replace_last(
            Event(
                data={"label": "test2-replaced"}, timestamp=now + timedelta(seconds=1)
            )
        )
        bucket.insert(
            Event(data={"label": "test3"}, timestamp=now + timedelta(seconds=2))
        )
        # Assert data
        result = bucket.get(-1)
        assert 3 == len(result)
        assert result[1]["data"]["label"] == "test2-replaced"
Esempio n. 19
0
def test_insert_many(bucket_cm):
    """
    Tests that you can insert many events at the same time to a bucket
    """
    num_events = 5000
    with bucket_cm as bucket:
        events = num_events * [
            Event(timestamp=now, duration=timedelta(seconds=1), data={"key": "val"})
        ]
        bucket.insert(events)
        fetched_events = bucket.get(limit=-1)
        assert num_events == len(fetched_events)
        for e, fe in zip(events, fetched_events):
            assert e == fe
Esempio n. 20
0
def test_insert_one(bucket_cm):
    """
    Tests inserting one event into a bucket
    """
    with bucket_cm as bucket:
        l = len(bucket.get())
        event = Event(timestamp=now, duration=timedelta(seconds=1), data={"key": "val"})
        bucket.insert(event)
        fetched_events = bucket.get()
        assert l + 1 == len(fetched_events)
        assert isinstance(fetched_events[0], Event)
        assert event == fetched_events[0]
        logging.info(event)
        logging.info(fetched_events[0].to_json_str())
Esempio n. 21
0
def heartbeat_merge(last_event: Event, heartbeat: Event, pulsetime: float) -> Optional[Event]:
    """
    Merges two events if they have identical labels and are
    separated by a time smaller than :code:`pulsetime` seconds.
    """
    if last_event.data == heartbeat.data:
        gap = heartbeat.timestamp - (last_event.timestamp + last_event.duration)

        if gap <= timedelta(seconds=pulsetime):
            # Heartbeat was within pulsetime window, set duration of last event appropriately
            last_event.duration = (heartbeat.timestamp - last_event.timestamp) + heartbeat.duration
            return last_event

    return None
Esempio n. 22
0
 def import_bucket(self, bucket_data: Any):
     bucket_id = bucket_data["id"]
     logger.info("Importing bucket {}".format(bucket_id))
     # TODO: Check that bucket doesn't already exist
     self.db.create_bucket(
         bucket_id,
         type=bucket_data["type"],
         client=bucket_data["client"],
         hostname=bucket_data["hostname"],
         created=(bucket_data["created"]
                  if isinstance(bucket_data["created"], datetime)
                  else iso8601.parse_date(bucket_data["created"])),
     )
     self.create_events(bucket_id, [Event(**e) if isinstance(e, dict) else e for e in bucket_data["events"]])
Esempio n. 23
0
def test_merge_events_by_keys_1():
    now = datetime.now(timezone.utc)
    events = []
    e1_data = {"label": "a"}
    e2_data = {"label": "b"}
    e1 = Event(data=e1_data, timestamp=now, duration=timedelta(seconds=1))
    e2 = Event(data=e2_data, timestamp=now, duration=timedelta(seconds=1))
    events = events + [e1] * 10
    events = events + [e2] * 5

    # Check that an empty key list has no effect
    assert merge_events_by_keys(events, []) == events

    # Check that trying to merge on unavailable key has no effect
    assert len(merge_events_by_keys(events, ["unknown"])) == 1

    result = merge_events_by_keys(events, ["label"])
    result = sort_by_duration(result)
    print(result)
    print(len(result))
    assert len(result) == 2
    assert result[0].duration == timedelta(seconds=10)
    assert result[1].duration == timedelta(seconds=5)
Esempio n. 24
0
def test_union_no_overlap():
    from pprint import pprint

    now = datetime(2018, 1, 1, 0, 0)
    td1h = timedelta(hours=1)
    events1 = [
        Event(timestamp=now + 2 * i * td1h, duration=td1h, data={"test": 1})
        for i in range(3)
    ]
    events2 = [
        Event(timestamp=now + (2 * i + 0.5) * td1h,
              duration=td1h,
              data={"test": 2}) for i in range(3)
    ]

    events_union = union_no_overlap(events1, events2)
    # pprint(events_union)
    dur = sum((e.duration for e in events_union), timedelta(0))
    assert dur == timedelta(hours=4, minutes=30)
    assert sorted(events_union, key=lambda e: e.timestamp)

    events_union = union_no_overlap(events2, events1)
    # pprint(events_union)
    dur = sum((e.duration for e in events_union), timedelta(0))
    assert dur == timedelta(hours=4, minutes=30)
    assert sorted(events_union, key=lambda e: e.timestamp)

    events1 = [
        Event(timestamp=now + (2 * i) * td1h, duration=td1h, data={"test": 1})
        for i in range(3)
    ]
    events2 = [Event(timestamp=now, duration=5 * td1h, data={"test": 2})]
    events_union = union_no_overlap(events1, events2)
    pprint(events_union)
    dur = sum((e.duration for e in events_union), timedelta(0))
    assert dur == timedelta(hours=5, minutes=0)
    assert sorted(events_union, key=lambda e: e.timestamp)
Esempio n. 25
0
def get_events(
    hostname: str,
    since: datetime,
    end: datetime,
    include_smartertime="auto",
    include_toggl=None,
    testing: bool = False,
) -> List[Event]:
    awc = ActivityWatchClient("test", testing=testing)

    query = build_query(hostname)
    logger.debug(f"Query:\n{query}")

    result = awc.query(query, timeperiods=[(since, end)])
    events = [Event(**e) for e in result[0]]

    if include_smartertime:
        events = union_no_overlap(
            events, _get_events_smartertime(since,
                                            filepath=include_smartertime))
        events = sorted(events, key=lambda e: e.timestamp)

    if include_toggl:
        events = union_no_overlap(
            events, _get_events_toggl(since, filepath=include_toggl))
        events = sorted(events, key=lambda e: e.timestamp)

    # Filter by time
    events = [
        e for e in events
        if since.astimezone(timezone.utc) < e.timestamp and e.timestamp +
        e.duration < end.astimezone(timezone.utc)
    ]
    assert all(since.astimezone(timezone.utc) < e.timestamp for e in events)
    assert all(e.timestamp + e.duration < end.astimezone(timezone.utc)
               for e in events)

    # Filter out events without data (which sometimes happens for whatever reason)
    events = [e for e in events if e.data]

    for event in events:
        if "app" not in event.data:
            if "url" in event.data:
                event.data["app"] = urlparse(event.data["url"]).netloc
            else:
                print("Unexpected event: ", event)

    events = [e for e in events if e.data]
    return events
Esempio n. 26
0
def merge_events_by_keys(events, keys) -> List[Event]:
    # The result will be a list of events without timestamp since they are merged
    # Call recursively until all keys are consumed
    if len(keys) < 1:
        return events
    merged_events = {}  # type: Dict[Tuple, Event]
    for event in events:
        composite_key = ()  # type: Tuple
        for key in keys:
            if key in event.data:
                composite_key = composite_key + (event["data"][key], )
        if composite_key not in merged_events:
            merged_events[composite_key] = Event(timestamp=event.timestamp,
                                                 duration=event.duration,
                                                 data={})
            for key in keys:
                if key in event.data:
                    merged_events[composite_key].data[key] = event.data[key]
        else:
            merged_events[composite_key].duration += event.duration
    result = []
    for key in merged_events:
        result.append(Event(**merged_events[key]))
    return result
Esempio n. 27
0
def test_get_last(bucket_cm):
    """
    Tests setting the result limit when fetching events
    """
    now = datetime.now()
    second = timedelta(seconds=1)
    with bucket_cm as bucket:
        events = [Event(data={"label": "test"}, timestamp=ts, duration=timedelta(0)) for ts in [now + second, now + second * 2, now + second * 3]]

        for event in events:
            bucket.insert(event)

        assert bucket.get(limit=1)[0] == events[-1]
        for event in bucket.get(limit=5):
            print(event.timestamp, event.data["label"])
Esempio n. 28
0
 def get_events(self, bucket_id: str, limit: int,
                starttime: Optional[datetime] = None, endtime: Optional[datetime] = None):
     if limit == 0:
         return []
     q = EventModel.select() \
                   .where(EventModel.bucket == self.bucket_keys[bucket_id]) \
                   .order_by(EventModel.timestamp.desc()) \
                   .limit(limit)
     if starttime:
         # Important to normalize datetimes to UTC, otherwise any UTC offset will be ignored
         starttime = starttime.astimezone(timezone.utc)
         q = q.where(starttime <= EventModel.timestamp)
     if endtime:
         endtime = endtime.astimezone(timezone.utc)
         q = q.where(EventModel.timestamp <= endtime)
     return [Event(**e) for e in list(map(EventModel.json, q.execute()))]
Esempio n. 29
0
def test_get_event_with_timezone(bucket_cm):
    """Tries to retrieve an event using a timezone aware datetime."""
    hour = timedelta(hours=1)
    td_offset = 2 * hour
    tz = timezone(td_offset)

    dt_utc = datetime(2017, 10, 27, hour=0, minute=5, tzinfo=timezone.utc)
    dt_with_tz = dt_utc.replace(tzinfo=tz)

    with bucket_cm as bucket:
        bucket.insert(Event(timestamp=dt_with_tz))
        fetched_events = bucket.get(starttime=dt_with_tz - hour, endtime=dt_with_tz + hour)
        assert len(fetched_events) == 1

        fetched_events = bucket.get(starttime=dt_utc - td_offset - hour, endtime=dt_utc - td_offset + hour)
        assert len(fetched_events) == 1
Esempio n. 30
0
def afk_events(client, start_date, end_date):
    print("Generating fake afk events")
    interval = 3000
    base_event = Event(data={"status": "not-afk"},
                       timestamp=start_date,
                       duration=timedelta(seconds=interval))
    afk_events = []
    ts = start_date
    afk_events = []
    while ts < end_date:
        e = copy(base_event)
        e.timestamp = ts - timedelta(seconds=1)
        ts += timedelta(seconds=interval)
        afk_events.append(e)
    print("Sending {} afk events".format(len(afk_events)))
    client.send_events(afk_bucket_name, afk_events)
Esempio n. 31
0
def test_get_event_by_id(bucket_cm):
    """Test that we can retrieve single events by their IDs"""
    with bucket_cm as bucket:
        eventcount = 2
        # Create 1-day long events
        events = [
            Event(timestamp=now + i * td1d, duration=td1d)
            for i in range(eventcount)
        ]
        bucket.insert(events)

        # Retrieve stored events
        events = bucket.get()
        for e in events:
            # Query them one-by-one
            event = bucket.get_by_id(e.id)
            assert e == event
Esempio n. 32
0
def test_get_ordered(bucket_cm):
    """
    Makes sure that received events are ordered
    """
    with bucket_cm as bucket:
        eventcount = 10
        events = []
        for i in range(10):
            events.append(Event(timestamp=now + timedelta(seconds=i)))
        random.shuffle(events)
        print(events)
        bucket.insert(events)
        fetched_events = bucket.get(-1)
        for i in range(eventcount - 1):
            print("1:" + fetched_events[i].to_json_str())
            print("2:" + fetched_events[i + 1].to_json_str())
            assert fetched_events[i].timestamp > fetched_events[i + 1].timestamp
    def heartbeat(self, bucket_id: str, event: Event, pulsetime: float, queued: bool=False, commit_interval: Optional[float]=None) -> Optional[Event]:
        """
        Args:
            bucket_id: The bucket_id of the bucket to send the heartbeat to
            event: The actual heartbeat event
            pulsetime: The maximum amount of time in seconds since the last heartbeat to be merged with the previous heartbeat in aw-server
            queued: Use the aw-client queue feature to queue events if client loses connection with the server
            commit_interval: Override default pre-merge commit interval

        NOTE: This endpoint can use the failed requests retry queue.
              This makes the request itself non-blocking and therefore
              the function will in that case always returns None.
        """

        from aw_transform.heartbeats import heartbeat_merge
        endpoint = "buckets/{}/heartbeat?pulsetime={}".format(bucket_id, pulsetime)
        commit_interval = commit_interval if commit_interval else self.commit_interval

        if queued:
            # Pre-merge heartbeats
            if bucket_id not in self.last_heartbeat:
                self.last_heartbeat[bucket_id] = event
                return None

            last_heartbeat = self.last_heartbeat[bucket_id]

            merge = heartbeat_merge(last_heartbeat, event, pulsetime)

            if merge:
                # If last_heartbeat becomes longer than commit_interval
                # then commit, else cache merged.
                diff = (last_heartbeat.duration).total_seconds()
                if diff > commit_interval:
                    data = merge.to_json_dict()
                    self.request_queue.add_request(endpoint, data)
                    self.last_heartbeat[bucket_id] = event
                else:
                    self.last_heartbeat[bucket_id] = merge
            else:
                data = last_heartbeat.to_json_dict()
                self.request_queue.add_request(endpoint, data)
                self.last_heartbeat[bucket_id] = event

            return None
        else:
            return Event(**self._post(endpoint, event.to_json_dict()).json())
Esempio n. 34
0
def main():
    import argparse

    parser = argparse.ArgumentParser(
        "A watcher for applications with activationPolicy=regular")
    parser.add_argument("--testing", action="store_true")

    args = parser.parse_args()

    logging.basicConfig(level=logging.DEBUG if args.testing else logging.INFO)
    client = ActivityWatchClient("macoswatcher", testing=args.testing)

    bucketname = "{}_{}".format(client.client_name, client.client_hostname)
    eventtype = "currentwindow"
    client.create_bucket(bucketname, eventtype)

    last_app = ""
    last_title = ""
    info = getInfo()
    print(info)
    active_app = getApp(info)
    active_title = getTitle(info)
    if (active_title == ""):
        logger.error(
            "Title of active window not found. Does the terminal have access to accessibility API? See README for how to give access!"
        )

    while True:
        try:
            info = getInfo()
            active_app = getApp(info)
            active_title = getTitle(info)

            if (last_app != active_app or last_title != active_title):
                last_app = active_app
                last_title = active_title
                client.send_event(
                    bucketname,
                    Event(label=[active_app, active_title],
                          timestamp=datetime.now(pytz.utc)))
                print(active_app + ", " + active_title)
        except Exception as e:
            logger.error(
                "Exception thrown while trying to get active applications {}".
                format(e))
        sleep(0.5)
def heartbeat_merge(last_event: Event, heartbeat: Event, pulsetime: float) -> Optional[Event]:
    """
    Merges two events if they have identical data
    and the heartbeat timestamp is within the pulsetime window.
    """
    if last_event.data == heartbeat.data:
        # Seconds between end of last_event and start of heartbeat
        pulseperiod_end = last_event.timestamp + last_event.duration + timedelta(seconds=pulsetime)
        within_pulsetime_window = last_event.timestamp <= heartbeat.timestamp <= pulseperiod_end

        if within_pulsetime_window:
            # Seconds between end of last_event and start of timestamp
            new_duration = (heartbeat.timestamp - last_event.timestamp) + heartbeat.duration
            if last_event.duration < timedelta(0):
                logger.warning("Merging heartbeats would result in a negative duration, refusing to merge.")
            else:
                last_event.duration = new_duration
                return last_event

    return None
 def replace_last(self, bucket_id: str, event: Event):
     last_event = list(self.db[bucket_id]["events"].find().sort([("timestamp", -1)]).limit(1))[0]
     self.db[bucket_id]["events"].replace_one({"_id": last_event["_id"]}, self._transform_event(event.copy()))
 def insert_one(self, bucket_id: str, event: Event) -> Event:
     e = EventModel.from_event(self.bucket_keys[bucket_id], event)
     e.save()
     event.id = e.id
     return event
 def insert_event(self, bucket_id: str, event: Event) -> Event:
     endpoint = "buckets/{}/events".format(bucket_id)
     data = event.to_json_dict()
     return Event(**self._post(endpoint, data).json())
def test_set_invalid_duration() -> None:
    e = Event()
    with pytest.raises(TypeError):
        e.duration = "12"  # type: ignore
def test_json_serialization() -> None:
    e = Event(timestamp=now, duration=timedelta(hours=13, minutes=37), data={"key": "val"})
    assert e == Event(**json.loads(e.to_json_str()))
# First we need a bucket to send events/heartbeats to.
# If the bucket already exists aw-server will simply return 304 NOT MODIFIED,
# so run this every time the clients starts up to verify that the bucket exists.
# If the client was unable to connect to aw-server or something failed
# during the creation of the bucket, an exception will be raised.
client.create_bucket(bucket_id, event_type="test")

# Asynchronous loop example
with client:
    # This context manager starts the queue dispatcher thread and stops it when done, always use it when setting queued=True.
    # Alternatively you can use client.connect() and client.disconnect() instead if you prefer that

    # Create a sample event to send as heartbeat
    heartbeat_data = {"label": "heartbeat"}
    now = datetime.now(timezone.utc)
    heartbeat_event = Event(timestamp=now, data=heartbeat_data)

    # Now we can send some events via heartbeats
    # This will send one heartbeat every second 5 times
    sleeptime = 1
    for i in range(5):
        # The duration between the heartbeats will be less than pulsetime, so they will get merged.
        # TODO: Make a section with an illustration on how heartbeats work and insert a link here
        print("Sending heartbeat {}".format(i))
        client.heartbeat(bucket_id, heartbeat_event, pulsetime=sleeptime+1, queued=True)

        # Sleep a second until next heartbeat
        sleep(sleeptime)

        # Update timestamp for next heartbeat
        heartbeat_event.timestamp = datetime.now(timezone.utc)
 def insert_one(self, bucket: str, event: Event) -> Event:
     self.db[bucket].append(Event(**event))
     event.id = len(self.db[bucket]) - 1
     return event
 def replace(self, bucket_id: str, event_id, event: Event) -> bool:
     self.db[bucket_id]["events"].replace_one({"_id": event_id}, self._transform_event(event.copy()))
     event.id = event_id
     return True