async def send_operation(self, op_id):
     telemetry = make_message_payload()
     telemetry["op_id"] = op_id
     with self.op_id_list_lock:
         logger("setting op_id {} to not_received".format(op_id))
         self.op_id_list[op_id] = not_received
     await self.client.send_event(telemetry)
Esempio n. 2
0
    async def test_dropped_c2d_2nd_call(self, client, service, before_api_call,
                                        after_api_call):

        # 1st call
        payload = sample_content.make_message_payload()

        await client.enable_c2d()

        test_input_future = asyncio.ensure_future(
            client.wait_for_c2d_message())
        await service.send_c2d(client.device_id, payload)
        received_message = await test_input_future
        assert received_message.body == payload

        # 2nd call
        payload = sample_content.make_message_payload()

        await before_api_call()
        test_input_future = asyncio.ensure_future(
            client.wait_for_c2d_message())
        await after_api_call()

        await service.send_c2d(client.device_id, payload)

        logger("Awaiting input")
        received_message = await test_input_future
        assert received_message.body == payload
Esempio n. 3
0
 def fin():
     adapter_config.default_api_timeout = previous_timeout
     logger(
         "Finishing test class: Replacing old REST timeout of {} seconds".format(
             previous_timeout
         )
     )
async def set_platform_properties(*, client, longhaul_control_device,
                                  system_control):
    wrapper_stats = await client.settings.wrapper_api.get_wrapper_stats()
    system_stats = await system_control.get_system_stats(0)

    properties = PlatformProperties()
    properties.language = wrapper_stats.get("language", "")
    properties.language_version = wrapper_stats.get("languageVersion", "")

    properties.os = system_stats.get("osType", "")
    properties.os_release = system_stats.get("osRelease", "")
    properties.system_architecture = system_stats.get("systemArchitecture", "")

    properties.sdk_repo = system_stats.get("sdkRepo", "")
    properties.sdk_commit = system_stats.get("sdkCommit", "")
    properties.sdk_sha = system_stats.get("sdkSha", "")

    properties.test_hub_name = client.settings.iothub_host_name
    properties.test_device_id = client.device_id
    properties.test_module_id = getattr(client, "module_id", "")

    properties.system_memory_size_in_kb = int(
        system_stats.get("system_MemTotal", 0))

    patch = {"reported": properties.to_dict()}
    logger("reporting: {}".format(patch))
    await longhaul_control_device.patch_twin(patch)

    return wrapper_stats["wrapperPid"]
Esempio n. 5
0
 def print_progress(self):
     now = datetime.datetime.now()
     logger("Start time: {}".format(pretty_time(self.test_start_time)))
     logger("Duration:   {}".format(pretty_time(self.test_run_time)))
     logger("End time:   {}".format(pretty_time(self.test_end_time)))
     logger("now:        {}".format(pretty_time(now)))
     logger("Time left:  {}".format(pretty_time(self.test_end_time - now)))
Esempio n. 6
0
async def wait_for_desired_properties_patch(*,
                                            client,
                                            expected_twin,
                                            mistakes=1):
    mistakes_left = mistakes
    while True:
        patch_received = await client.wait_for_desired_property_patch()
        logger("desired properties sent:     " +
               str(expected_twin["desired"]["foo"]))

        logger("desired properties received: " +
               str(patch_received["desired"]["foo"]))

        if expected_twin["desired"]["foo"] == patch_received["desired"]["foo"]:
            logger("success")
            return
        else:
            if mistakes_left:
                # We sometimes get the old value before we get the new value, and that's
                # perfectly valid (especially with QOS 1 on MQTT).  If we got the wrong
                # value, we just try again.
                mistakes_left = mistakes_left - 1
                logger("trying again.  We still have {} mistakes left".format(
                    mistakes_left))
            else:
                logger("too many mistakes.  Failing")
                assert False
    async def run_one_op(self):
        try:
            measure_send_latency = MeasureLatency(
                tracker=self.average_send_latency)
            measure_verify_latency = MeasureLatency(
                tracker=self.average_verify_latency)

            op_id = self.next_op_id.increment()

            with self.uncompleted_ops_lock:
                self.uncompleted_ops.add(op_id)

            with self.count_sending, measure_send_latency:
                await self.send_operation(op_id)

            with self.count_verifying, measure_verify_latency:
                await self.verify_operation(op_id)

            with self.uncompleted_ops_lock:
                self.uncompleted_ops.remove(op_id)

            self.total_count_completed.increment()

        except Exception as e:
            logger("OP FAILED: Exception running op: {}".format(type(e)))
            logger(traceback.format_exc())
            self.total_count_failed.increment()
    async def test_perf_simple_throughput(self, client):
        """
        Send a large number of messsages all at once and measure how many messages
        can be sent per second.  This can be used to establish a theoretical maximum for
        our library, though the number of messages per second reported by this function
        is higher than typical because of burst effects.
        """
        count = 2500

        payloads = [sample_content.make_message_payload() for x in range(0, count)]

        start_time = datetime.datetime.now()
        await asyncio.gather(*[client.send_event(payload) for payload in payloads])
        duration = (datetime.datetime.now() - start_time).total_seconds()

        mps = math.floor(count / duration)

        logger(
            "{} messages were sent in {} seconds for {} messages/second".format(
                count, duration, mps
            )
        )

        # Arbitrary goal based on experimental evidence.  The goal of this assert is to
        # flag gigantic performance drops.  Experimentally, 46 is typical.  For this assert,
        # even 25 would be acceptable
        assert mps > 25
Esempio n. 9
0
    async def wait_for_disconnection_event(self, *, client):
        status = await client.get_connection_status()
        assert status == "connected"

        logger("waiting for client disconnection event")
        await client.wait_for_connection_status_change("disconnected")
        logger("client disconnection event received")
    async def test_perf_send_event_longhaul(self, client):
        """
        Run a linghaul test to validate that send_event can keep up with a consistent
        and regular cadence of events.  This tests an arbitrary time period, with an arbitrary
        number of messages sent every second and it verifies two things:

        1. The send_event latency doesn't go too high
        2. The maximum number of running threads doesn't go too high.

        Both of these factors can indicae a slowdown in sending.

        The choice for events_per_second is arbitray baased on the results from
        test_perf_measure_send_event_capacity.  That test indicates that we can maintain
        21 messages-per-second for 30 seconds, so this test choses 15 messages per second
        but runs it for 2 hours.
        """

        duration = 7200
        events_per = 15
        max_threads = 5
        max_latency = 11

        threads = await self.do_test_perf_send_event(
            client, events_per, duration, max_threads, max_latency
        )

        logger("FINAL RESULT:")
        logger(
            "Sent {} events per second for {} seconds with max {} threads".format(
                events_per, duration, threads
            )
        )
    async def test_send_5_telemetry_events_to_iothub(self, client, eventhub):
        if not limitations.can_always_overlap_telemetry_messages(client):
            pytest.skip("client's can't reliably overlap telemetry messages")

        payloads = []
        send_futures = []

        # start listening before we send
        await eventhub.connect()
        received_message_future = asyncio.ensure_future(
            eventhub.wait_for_next_event(client.device_id))

        for _ in range(0, 5):
            ensure_send_telemetry_message(client=client,
                                          payloads=payloads,
                                          send_futures=send_futures)

        # wait for the sends to complete, and verify that they arrive
        await asyncio.gather(*send_futures)

        logger("All messages sent.  Awaiting reception")
        await wait_for_all_telemetry_messages_to_arrive(
            received_message_future=received_message_future,
            payloads=payloads,
            eventhub=eventhub,
            client=client,
        )
Esempio n. 12
0
def restart_edgehub(hard=False):
    logger("restarting edgehub")
    sleep(5)
    client = docker.from_env()
    edgeHub = client.containers.get(EDGEHUB_NAME)
    try:
        if hard:
            client = docker.from_env()
            containerList = list(
                map(lambda x: x.name, client.containers.list()))

            for containerName in containerList:
                if "Mod" or "edgeHub" in containerName:
                    currentContainer = client.containers.get(containerName)
                    currentContainer.restart()
            while EDGEHUB_NAME not in list(
                    map(lambda x: x.name, client.containers.list())):
                print("waiting for edge daemon to revive edgehub...")
                sleep(1)
            print("updating pointer to edgehub container")
            edgeHub.reload()
        else:
            edgeHub.restart()
            sleep(5)
    except Exception as e:
        logger("Error: {}".format(sys.exc_info()[0]))
        raise e
Esempio n. 13
0
async def net_control():
    api = getattr(settings.net_control, "api", None)
    try:
        yield api
    finally:
        if api:
            logger(separator("net_control finalizer"))
            await settings.net_control.api.reconnect()
Esempio n. 14
0
 async def reconnect_after_each_test(self, system_control):
     # if this test is going to drop packets, add a finalizer to make sure we always stop
     # stop dropping it when we're done.  Calling reconnect twice in a row is allowed.
     try:
         yield
     finally:
         logger("in finalizer: no longer dropping packets")
         await system_control.reconnect_network()
Esempio n. 15
0
async def system_control():
    adapter = getattr(settings.system_control, "adapter", None)
    try:
        yield adapter
    finally:
        if adapter:
            logger("system_control finalizer".center(132, "-"))
            await adapter.reconnect_network()
Esempio n. 16
0
async def service():
    service = await connections.connect_service_client()
    try:
        yield service
    finally:
        logger(separator("service finalizer"))
        try:
            await service.disconnect()
        except Exception as e:
            logger("exception disconnecting service: {}".format(e))
async def move_blob_status_into_eventhub(service, client):
    """
    get_blob_update_status() returns the status once, so we might receive status from
    a different instance of this test that's running in parallel.  we copy the update
    status into eventhub where it's available to any instance of this test.
    """
    while True:
        status = await service.get_blob_upload_status()
        logger("got upload status = {}".format(status))
        await client.send_event(json.loads(str(status)))
Esempio n. 18
0
async def registry():
    registry = await connections.connect_registry_client()
    try:
        yield registry
    finally:
        logger(separator("registry finalizer"))
        try:
            await registry.disconnect()
        except Exception as e:
            logger("exception disconnecting registry: {}".format(e))
Esempio n. 19
0
async def _log_exception(aw):
    """
    Log any exceptions that happen while running this awaitable
    """
    try:
        await aw
    except Exception:
        logger("Exception raised")
        logger(traceback.format_exc())
        raise
Esempio n. 20
0
    def is_test_done(self):
        if self.error:
            raise self.error

        logger("{}: Change in {},  Remaining Time: {}".format(
            self.prefix,
            pretty_time(self.next_change - datetime.datetime.now()),
            pretty_time(self.test_end_time - datetime.datetime.now()),
        ))
        return datetime.datetime.now() >= self.test_end_time
Esempio n. 21
0
    async def do_test_handle_method_to_friend(self, *, client, friend, count,
                                              time_limit):

        for i in range(0, count):
            if time_limit.is_test_done():
                return

            logger("method_to_friend {}/{}".format(i + 1, count))

            await run_method_call_test(source=client, destination=friend)
Esempio n. 22
0
    async def do_test_handle_method_from_service(self, *, client, service,
                                                 count, time_limit):

        for i in range(0, count):
            if time_limit.is_test_done():
                return

            logger("method_from_service {}/{}".format(i + 1, count))

            # BKTODO: pull enable_methods out of run_method_call_test

            await run_method_call_test(source=service, destination=client)
Esempio n. 23
0
async def eventhub(event_loop):
    eventhub = adapters.create_adapter(settings.eventhub.adapter_address,
                                       "eventhub")
    await eventhub.create_from_connection_string(
        settings.eventhub.connection_string)
    try:
        yield eventhub
    finally:
        logger(separator("eventhub finalizer"))
        try:
            await eventhub.disconnect()
        except Exception as e:
            logger("exception disconnecting eventhub: {}".format(e))
Esempio n. 24
0
def pytest_pyfunc_call(pyfuncitem):

    # this hook wraps test runs.  this yield runs the actual test
    outcome = yield

    try:
        # this will raise if the outcome was an exception
        outcome.get_result()

        logger(separator("TEST PASSED (before cleanup)"))

    except Exception as e:
        logger(separator("TEST FAILED BACAUSE OF {}".format(e)))
Esempio n. 25
0
async def test_module():
    test_module = await connections.get_module_client(settings.test_module)
    try:
        yield test_module
    finally:
        logger(separator("module finalizer"))
        try:
            if test_module.capabilities.v2_connect_group:
                await test_module.destroy()
            else:
                await test_module.disconnect()
        except Exception as e:
            logger("exception disconnecting test module: {}".format(e))
Esempio n. 26
0
    async def patch_desired(self, *, client, registry, mistakes=1):
        twin_sent = sample_content.make_desired_props()
        logger("Patching desired properties to {}".format(twin_sent))

        patch_future = asyncio.ensure_future(
            wait_for_desired_properties_patch(client=client,
                                              expected_twin=twin_sent,
                                              mistakes=mistakes))
        await asyncio.sleep(1)  # wait for async call to take effect

        await patch_desired_props(registry, client, twin_sent)

        await patch_future  # raises if patch not received
Esempio n. 27
0
        async def report_loop():
            stop_after_sending = False
            while True:
                if stop_reporter:
                    stop_after_sending = True

                patch = {"reported": test_report.to_dict()}
                logger("reporting: {}".format(patch))
                await client.patch_twin(patch)

                if stop_after_sending:
                    return

                await asyncio.sleep(5)  # todo: make configurable.
Esempio n. 28
0
    def extend_rest_timeout(self, request):
        previous_timeout = adapter_config.default_api_timeout
        adapter_config.default_api_timeout = max(300, previous_timeout)
        logger(
            "Starting test class: Adjusting REST timeout to {} seconds".format(
                adapter_config.default_api_timeout))

        def fin():
            adapter_config.default_api_timeout = previous_timeout
            logger(
                "Finishing test class: Replacing old REST timeout of {} seconds"
                .format(previous_timeout))

        request.addfinalizer(fin)
Esempio n. 29
0
    async def do_test_get_twin(self, *, client, registry, count, time_limit):
        await client.enable_twin()

        for i in range(0, count):
            if time_limit.is_test_done():
                return

            logger("get_twin {}/{}".format(i + 1, count))
            twin_sent = sample_content.make_desired_props()

            await patch_desired_props(registry, client, twin_sent)

            while True:
                twin_received = await client.get_twin()

                logger("twin sent:    " + str(twin_sent))
                logger("twin received:" + str(twin_received))
                if twin_sent["desired"]["foo"] == twin_received["desired"][
                        "foo"]:
                    break
                else:
                    logger(
                        "Twin does not match.  Sleeping for 2 seconds and retrying."
                    )
                    await asyncio.sleep(2)
    async def test_send_telemetry_to_iothub(self, client, eventhub,
                                            telemetry_payload):
        if len(str(telemetry_payload)
               ) > limitations.get_maximum_telemetry_message_size(client):
            pytest.skip("message is too big")

        await eventhub.connect()

        logger('sending "{}"'.format(telemetry_payload))

        await client.send_event(telemetry_payload)

        received_message = await eventhub.wait_for_next_event(
            client.device_id, expected=telemetry_payload)
        assert received_message is not None, "Message not received"