Esempio n. 1
0
    def _poll_status_changes(self):
        next_url = urllib.parse.urljoin(self.provider.base_api_url,
                                        "status_changes")
        if self.provider.api_configuration.get("trailing_slash"):
            next_url += "/"

        params = {}

        # Start where we left, it's all based on providers sorting by start_time
        # (we wouldn't know if the provider recorded late telemetries earlier)
        if self.provider.last_start_time_polled:
            params["start_time"] = utils.to_mds_timestamp(
                self.provider.last_start_time_polled)
        # Otherwise limit polling
        elif PROVIDER_POLLER_LIMIT_DAYS:
            params["start_time"] = utils.to_mds_timestamp(
                timezone.now() -
                datetime.timedelta(PROVIDER_POLLER_LIMIT_DAYS))

        # Provider-specific params to optimise polling
        try:
            params.update(
                self.provider.api_configuration["status_changes_params"])
        except KeyError:
            pass

        if params:
            next_url = "%s?%s" % (next_url, urllib.parse.urlencode(params))

        # Pagination
        while next_url:
            body = self._get_body(next_url)
            # Translate older versions of data
            translated_data = translate_data(body["data"], body["version"])
            status_changes = translated_data["status_changes"]
            if not status_changes:
                break

            # A transaction for each "page" of data
            with transaction.atomic():
                last_start_time_polled = self._process_status_changes(
                    status_changes)
                self.provider.last_start_time_polled = last_start_time_polled
                self.provider.save(update_fields=["last_start_time_polled"])

            next_url = body.get("links", {}).get("next")
Esempio n. 2
0
def test_compliance_list_basic(client, django_assert_num_queries):
    device = factories.Device()
    policy = factories.Policy(published=True)

    # One provider-specific policy
    # provider_policy = factories.ComplianceFactory()
    # And one general-purpose policy
    compliance = factories.ComplianceFactory(
        rule=uuid.UUID("81b1bc92-65b7-4434-8ada-2feeb0b7b223"),
        geography=uuid.UUID("e0e4a085-7a50-43e0-afa4-6792ca897c5a"),
        policy_id=policy.id,
        vehicle_id=device.id,
        start_date=datetime.datetime(2007,
                                     12,
                                     6,
                                     16,
                                     29,
                                     43,
                                     79043,
                                     tzinfo=pytz.UTC),
        end_date=datetime.datetime(2007,
                                   12,
                                   7,
                                   16,
                                   29,
                                   43,
                                   79043,
                                   tzinfo=pytz.UTC),
    )

    compliance_ongoing = factories.ComplianceFactory(
        rule=uuid.UUID("89b5bbb5-ba98-4498-9649-787eb8ddbb8e"),
        geography=uuid.UUID("2cfbdd7f-8ba2-4b48-9826-951fe3249981"),
        policy_id=factories.Policy().id,
        vehicle_id=factories.Device().id,
        start_date=datetime.datetime(2009,
                                     12,
                                     6,
                                     16,
                                     29,
                                     43,
                                     79043,
                                     tzinfo=pytz.UTC),
        end_date=None,
    )

    # Test without auth
    n = 2  # Savepoint and release
    n += 1  # query on policy
    n += 1  # query on related compliances
    n += 1  # query on device
    n += 1  # query on other device
    # query Last compliance
    with django_assert_num_queries(n):
        response = client.get(reverse("agency-0.3:compliance-list"))
    assert response.status_code == 200

    # Check why there is policy more (??? what does it mean?)
    assert response.data[0]["id"] == str(compliance.policy.id)

    # Now test with a provider ID

    response = client.get(reverse("agency-0.3:compliance-list"),
                          {"provider_id": str(device.provider.id)})

    # The provider can fetch a policy that applies to them
    # (to all providers in this case)
    assert response.status_code == 200
    assert response.data[0]["id"] == str(compliance.policy_id)

    response = client.get(
        reverse("agency-0.3:compliance-list"),
        {
            "provider_id":
            str(device.provider.id),
            "end_date":
            utils.to_mds_timestamp(
                datetime.datetime(
                    2009, 12, 7, 16, 29, 43, 79043, tzinfo=pytz.UTC)),
        },
    )

    # provider is OK but timestamp is too high
    assert response.status_code == 200
    assert response.data == []

    response = client.get(
        reverse("agency-0.3:compliance-list"),
        {
            "provider_id":
            str(device.provider.id),
            "end_date":
            utils.to_mds_timestamp(
                datetime.datetime(
                    2007, 12, 7, 16, 29, 43, 79043, tzinfo=pytz.UTC)) -
            60000,  # XXX ?!
        },
    )

    # provider is OK and timestamp is OK
    assert response.status_code == 200
    assert response.data[0]["id"] == str(compliance.policy_id)

    response = client.get(
        reverse("agency-0.3:compliance-list"),
        {
            "end_date":
            utils.to_mds_timestamp(
                datetime.datetime(1970,
                                  1,
                                  14,
                                  0,
                                  47,
                                  3,
                                  900000,
                                  tzinfo=datetime.timezone.utc))
        },
    )

    # too low
    assert response.status_code == 200 and response.data == []

    response = client.get(
        reverse("agency-0.3:compliance-list"),
        {
            "end_date":
            utils.to_mds_timestamp(
                datetime.datetime(2070,
                                  1,
                                  21,
                                  3,
                                  45,
                                  56,
                                  700000,
                                  tzinfo=datetime.timezone.utc))
        },
    )

    # too high but compliance_ongoing is not finished
    assert response.status_code == 200
    assert response.data[0]["id"] == str(compliance_ongoing.policy_id)

    response = client.get(
        reverse("agency-0.3:compliance-list"),
        {"provider_id": "89b5bbb5-ba98-4498-9649-787eb8ddbb8e"},
    )  # this provider don't exist

    assert response.status_code == 200 and response.data == []
Esempio n. 3
0
def test_policy_list_range(client):
    # Policy from last year
    past = factories.Policy(  # noqa: F841
        name="past",
        start_date=timezone.now() - datetime.timedelta(days=365),
        end_date=timezone.now() - datetime.timedelta(days=350),
        published=True,
    )
    # Policy with lower and upper bounds (half of the lifespan)
    bound = factories.Policy(  # noqa: F841
        name="bound",
        start_date=timezone.now() - datetime.timedelta(days=15),
        end_date=timezone.now() + datetime.timedelta(days=15),
        published=True,
    )
    # Same but no upper bound
    ongoing = factories.Policy(  # noqa: F841
        name="ongoing",
        start_date=timezone.now() - datetime.timedelta(days=15),
        published=True,
    )
    # Policy for next year
    future = factories.Policy(  # noqa: F841
        name="future",
        start_date=timezone.now() + datetime.timedelta(days=365),
        published=True,
    )

    # Current and future policies by default
    response = client.get(reverse("agency-0.3:policy-list"))
    assert [p["name"] for p in response.data] == ["bound", "ongoing", "future"]

    # Current only
    response = client.get(
        reverse("agency-0.3:policy-list"),
        {
            "start_time": utils.to_mds_timestamp(timezone.now()),
            "end_time": utils.to_mds_timestamp(timezone.now()),
        },
    )
    assert [p["name"] for p in response.data] == ["bound", "ongoing"]

    # Ongoing is still.. ongoing, and future
    response = client.get(
        reverse("agency-0.3:policy-list"),
        {
            "start_time":
            utils.to_mds_timestamp(timezone.now() +
                                   datetime.timedelta(days=30))
        },
    )
    assert [p["name"] for p in response.data] == ["ongoing", "future"]

    # Past only
    response = client.get(
        reverse("agency-0.3:policy-list"),
        {
            "start_time":
            utils.to_mds_timestamp(timezone.now() -
                                   datetime.timedelta(days=365)),
            "end_time":
            utils.to_mds_timestamp(timezone.now() -
                                   datetime.timedelta(days=30)),
        },
    )
    assert [p["name"] for p in response.data] == ["past"]

    # All
    response = client.get(
        reverse("agency-0.3:policy-list"),
        {
            "start_time":
            utils.to_mds_timestamp(timezone.now() -
                                   datetime.timedelta(days=365)),
            "end_time":
            utils.to_mds_timestamp(timezone.now() +
                                   datetime.timedelta(days=365)),
        },
    )
    assert [p["name"]
            for p in response.data] == ["past", "bound", "ongoing", "future"]
Esempio n. 4
0
                "timestamp": 1_325_376_000_000,
                "gps": {"lat": 33.996_339, "lng": -118.48153},
            },
        },
    )

    expected_event_device1 = {
        "provider_id": str(provider.id),
        "provider_name": "Test provider",
        "device_id": uuid1,
        "vehicle_id": "1AAAAA",
        "propulsion_type": ["combustion"],
        "event_type_reason": "maintenance",
        "event_type": "unavailable",
        "vehicle_type": "car",
        "event_time": utils.to_mds_timestamp(now),
        "event_location": {
            "type": "Feature",
            "properties": {"timestamp": 1_325_376_000_000},
            "geometry": {"type": "Point", "coordinates": [-118.48153, 33.996_339]},
        },
        "associated_trip": "b3da2d46-065f-4036-903c-49d796f09357",
    }
    expected_event_device2 = {
        "provider_id": str(provider2.id),
        "provider_name": "Test another provider",
        "device_id": uuid2,
        "vehicle_id": "3CCCCC",
        "propulsion_type": ["electric"],
        "event_type_reason": "trip_start",
        "event_type": "trip",
Esempio n. 5
0
    def _poll_status_changes_v0_2(self, api_version):
        # Start where we left, it's all based on providers sorting by start_time
        # (but we would miss telemetries older than start_time saved after we polled).
        # For those that support it, use the recorded time field or equivalent.
        polling_cursor = self.cursor or self.provider.api_configuration.get(
            "polling_cursor", POLLING_CURSORS.start_time.name
        )
        logger.info(
            f"Starting polling {self.provider} using the field: {polling_cursor}.\n"
            "Current state:\n"
            + (
                (
                    f"\tLast event_time: {str(self.provider.last_event_time_polled)},\n"
                    + f"\tLast recorded: {str(self.provider.last_recorded_polled)}\n"
                    + f"\tLast skip: {self.provider.last_skip_polled}."
                )
                if not self.cursor
                else f"\tCursor value: {self.from_cursor}."
            )
        )

        params = {}

        # Cursor param (raise if typo)
        param_name = POLLING_CURSORS[polling_cursor].value

        if polling_cursor == POLLING_CURSORS.total_events.name:
            # Resume from the last line fetched
            params[param_name] = self.from_cursor or self.provider.last_skip_polled
        elif polling_cursor == POLLING_CURSORS.start_recorded.name:
            # Resume from the last "recorded" value
            params[param_name] = utils.to_mds_timestamp(
                self.from_cursor or self.provider.last_recorded_polled
            )
        elif polling_cursor == POLLING_CURSORS.start_time.name:
            # Resume from the last "event_time" value
            last_event_time_polled = self.provider.last_event_time_polled
            if not last_event_time_polled:
                last_event_time_polled = timezone.now() - datetime.timedelta(
                    getattr(settings, "PROVIDER_POLLER_LIMIT", 90)
                )

            # But we now apply a "lag" before actually polling,
            # leaving time for the provider to collect data from its devices
            polling_lag = self.provider.api_configuration.get("provider_polling_lag")
            if polling_lag:
                polling_lag = parse_duration(polling_lag)
                if (timezone.now() - last_event_time_polled) < polling_lag:
                    logger.debug("Still under the polling lag, back to sleep.")
                    return

            params[param_name] = utils.to_mds_timestamp(
                self.from_cursor or last_event_time_polled
            )

        # Provider-specific params to optimise polling
        try:
            params.update(self.provider.api_configuration["status_changes_params"])
        except KeyError:
            pass

        next_url = urllib.parse.urljoin(self.provider.base_api_url, "status_changes")
        if self.provider.api_configuration.get("trailing_slash"):
            next_url += "/"

        if params:
            next_url = "%s?%s" % (next_url, urllib.parse.urlencode(params))

        skip_polled = (
            self.from_cursor
            if self.from_cursor and self.cursor == POLLING_CURSORS.total_events.name
            else None
        )

        # Pagination
        while next_url:
            body = self._get_body(next_url, api_version)
            # Translate older versions of data
            translated_data = translate_v0_2_to_v0_4(body["data"])
            status_changes = translated_data["status_changes"]
            if not status_changes:
                break

            next_url = body.get("links", {}).get("next")

            # A transaction for each "page" of data
            with transaction.atomic():
                # We get the maximum of the recorded and event_types
                # from the status changes
                event_time_polled, recorded_polled = self._process_status_changes(
                    status_changes
                )

                if self.cursor:
                    if self.cursor == POLLING_CURSORS.total_events.name:
                        skip_polled += len(status_changes)
                        if skip_polled >= self.to_cursor:
                            break
                    elif self.cursor == POLLING_CURSORS.start_recorded.name:
                        if recorded_polled >= self.to_cursor:
                            break
                    elif self.cursor == POLLING_CURSORS.start_time.name:
                        if event_time_polled >= self.to_cursor:
                            break
                else:
                    # We get the new skip from the number of status changes
                    skip_polled = (
                        self.provider.last_skip_polled + len(status_changes)
                        if self.provider.last_skip_polled
                        else len(status_changes)
                    )
                    self.provider.last_event_time_polled = event_time_polled
                    self.provider.last_recorded_polled = recorded_polled
                    self.provider.last_skip_polled = skip_polled
                    self.provider.save(
                        update_fields=[
                            "last_event_time_polled",
                            "last_recorded_polled",
                            "last_skip_polled",
                        ]
                    )

                logger.info(
                    f"Polled page using cursor: {polling_cursor}. New state:\n"
                    + f"\tLast event_time: {str(event_time_polled)},\n"
                    + f"\tLast recorded: {str(recorded_polled)}\n"
                    + f"\tLast skip: {skip_polled}."
                )
Esempio n. 6
0
    def _poll_status_changes_v0_4(self, api_version):
        # Start where we left, it's all based on providers sorting by start_time
        # (but we would miss telemetries older than start_time saved after we polled).
        # For those that support it, use the recorded time field or equivalent.
        polling_cursor = self.cursor or self.provider.api_configuration.get(
            "polling_cursor", POLLING_CURSORS.start_time.name
        )
        if polling_cursor != POLLING_CURSORS.start_time.name:
            raise ValueError('Only "start_time" cursor is supported in MDS 0.4+')
        logger.info(
            f"Starting polling {self.provider} using the field: {polling_cursor}.\n"
            "Current state:\n"
            + (
                (f"\tLast event_time: {str(self.provider.last_event_time_polled)}.")
                if not self.cursor
                else f"\tCursor value: {self.from_cursor}."
            )
        )

        params = {}

        # Resume from the last "event_time" value
        last_event_time_polled = (
            self.from_cursor or self.provider.last_event_time_polled
        )
        if not last_event_time_polled:
            last_event_time_polled = timezone.now() - datetime.timedelta(
                getattr(settings, "PROVIDER_POLLER_LIMIT", 90)
            )

        # But we now apply a "lag" before actually polling,
        # leaving time for the provider to collect data from its devices
        polling_lag = self.provider.api_configuration.get("provider_polling_lag")
        if polling_lag:
            polling_lag = parse_duration(polling_lag)
            if (timezone.now() - last_event_time_polled) < polling_lag:
                logger.info("Still under the polling lag, back to sleep.")
                return

        # The MDS 0.4 Provider API got two endpoints:
        # - /events for the real-time or so data (formerly /status_changes)
        #   but limited to two weeks of history
        # - /status_changes for all the data except the current hour
        # If we're catching up far in time, begin by polling /status_changes
        realtime_threshold = parse_duration(  # Default is 9 days
            self.provider.api_configuration.get("realtime_threshold", "P9D")
        )
        next_event_time = last_event_time_polled + datetime.timedelta(hours=1)
        if (timezone.now() - next_event_time) > realtime_threshold:
            # We have to query the archived status changes with another format
            logger.info("last_event_time_polled is too old, asking archives")
            # We're done with the events of the last hour, ask the next hour
            params["event_time"] = next_event_time.isoformat()[: len("YYYY-MM-DDTHH")]
        else:
            # Query the real-time endpoint as usual
            params["start_time"] = utils.to_mds_timestamp(last_event_time_polled)
            # Both bounds are mandatory now, use the lag as the event horizon
            # The provider will slice big results using pagination
            end_time = timezone.now()
            if polling_lag:
                # We tested the lag above, so end_time can't be older than start_time
                end_time -= polling_lag
            params["end_time"] = utils.to_mds_timestamp(end_time)

        # Provider-specific params to optimise polling
        try:
            params.update(self.provider.api_configuration["status_changes_params"])
        except KeyError:
            pass

        endpoint = "events"  # The new name for the real-time events endpoint
        if "event_time" in params:
            # We asked the archived status changes instead
            endpoint = "status_changes"
        next_url = urllib.parse.urljoin(self.provider.base_api_url, endpoint)
        if self.provider.api_configuration.get("trailing_slash"):
            next_url += "/"

        if params:
            next_url = "%s?%s" % (next_url, urllib.parse.urlencode(params))

        # Pagination
        while next_url:
            body = self._get_body(next_url, api_version)
            # No translation needed as long as 0.4 is the latest version
            status_changes = body["data"]["status_changes"]
            next_url = body.get("links", {}).get("next")

            # A transaction for each "page" of data
            with transaction.atomic():
                if status_changes:
                    # We get the maximum values from the status changes
                    event_time_polled, _ = self._process_status_changes(status_changes)
                elif endpoint == "status_changes":
                    # This hour frame of archives didn't contain results
                    event_time_polled = next_event_time
                else:
                    # Try again from this point later
                    break

                if self.cursor:
                    if self.cursor == POLLING_CURSORS.start_time.name:
                        if event_time_polled >= self.to_cursor:
                            break
                else:
                    self.provider.last_event_time_polled = event_time_polled
                    self.provider.save(update_fields=["last_event_time_polled"])

                logger.info(
                    f"Polled page using cursor: {polling_cursor}. New state:\n"
                    + f"\tLast event_time: {str(event_time_polled)}."
                )