def test_run(auto_create_group, samples_per_group, num_cores,
             num_flows_per_core, error, error_code, record, reinject_packets):
    """Make sure that the run command carries out an experiment as would be
    expected."""
    system_info = SystemInfo(3, 1, {
        (x, y): ChipInfo(num_cores=18,
                         core_states=[AppState.run] + [AppState.idle] * 17,
                         working_links=set(Links),
                         largest_free_sdram_block=110*1024*1024,
                         largest_free_sram_block=1024*1024)
        for x in range(3)
        for y in range(1)
    })

    mock_mc = Mock()
    mock_mc.get_system_info.return_value = system_info

    mock_application_ctx = Mock()
    mock_application_ctx.__enter__ = Mock()
    mock_application_ctx.__exit__ = Mock()
    mock_mc.application.return_value = mock_application_ctx

    if reinject_packets:
        # If reinjecting, a core is added to every chip
        mock_mc.wait_for_cores_to_reach_state.return_value = len(system_info)
    else:
        # If not reinjecting, only the user-defined cores exist
        mock_mc.wait_for_cores_to_reach_state.return_value = num_cores

    def mock_sdram_file_read(size):
        return error_code + b"\0"*(size - 4)
    mock_sdram_file = Mock()
    mock_sdram_file.read.side_effect = mock_sdram_file_read

    mock_mc.sdram_alloc_as_filelike.return_value = mock_sdram_file

    e = Experiment(mock_mc)
    e.timestep = 1e-6
    e.warmup = 0.01
    e.duration = 0.01
    e.cooldown = 0.01
    e.flush_time = 0.01

    # Record the result of _construct_core_commands to allow checking of
    # memory allocation sizes
    construct_core_commands = e._construct_core_commands
    cores_commands = {}

    def wrapped_construct_core_commands(core, *args, **kwargs):
        commands = construct_core_commands(core=core, *args, **kwargs)
        cores_commands[core] = commands
        return commands
    e._construct_core_commands = Mock(
        side_effect=wrapped_construct_core_commands)

    if record:
        e.record_sent = True

    e.reinject_packets = reinject_packets

    # Create example cores. Cores are placed on sequential chips along the
    # x-axis.
    cores = [e.new_core(x, 0) for x in range(num_cores)]
    for c in cores:
        for _ in range(num_flows_per_core):
            e.new_flow(c, c)

    # Create example groups
    for num_samples in samples_per_group:
        with e.new_group():
            e.record_interval = e.duration / float(num_samples)

    # The run should fail with an exception when expected.
    if error and (num_cores > 0 or reinject_packets):
        with pytest.raises(NetworkTesterError) as exc_info:
            e.run(0x33, create_group_if_none_exist=auto_create_group)
        results = exc_info.value.results
    else:
        results = e.run(0x33, create_group_if_none_exist=auto_create_group)

    # The results should be of the correct type...
    assert isinstance(results, Results)

    # The results returned should be all zeros (since that is what was written
    # back)
    if record and num_cores > 0 and num_flows_per_core > 0:
        assert sum(results.totals()["sent"]) == 0

    # The supplied app ID should be used
    mock_mc.application.assert_called_once_with(0x33)

    # If reinjection is enabled, the binary should have been loaded
    print([call[1][0] for call in mock_mc.load_application.mock_calls])
    reinjector_loaded = any((len(call[1][0]) == 1 and
                             "reinjector.aplx" in list(call[1][0])[0])
                            for call in mock_mc.load_application.mock_calls)
    if reinject_packets:
        assert reinjector_loaded
    else:
        assert not reinjector_loaded

    # Each chip should have been issued with a suitable malloc for any cores
    # on it.
    for x, core in enumerate(cores):
        cmds_size = cores_commands[core].size
        if record:
            # The space required to record deadlines_missed and the sent
            # counters.
            result_size = (1 + ((1 + num_flows_per_core) *
                                sum(samples_per_group))) * 4
        else:
            # Just the status value and deadlines_missed
            result_size = (1 + sum(samples_per_group)) * 4
        size = max(cmds_size, result_size)
        core_num = e._allocations[core][Cores].start
        mock_mc.sdram_alloc_as_filelike.assert_any_call(size, x=x, y=0,
                                                        tag=core_num)

    # The correct number of barriers should have been reached
    num_groups = len(samples_per_group)
    if auto_create_group:
        num_groups = max(1, num_groups)
    assert len(mock_mc.send_signal.mock_calls) == num_groups
Example #2
0
def test_run(auto_create_group, samples_per_group, num_cores,
             num_flows_per_core, error, error_code, record, reinject_packets):
    """Make sure that the run command carries out an experiment as would be
    expected."""
    system_info = SystemInfo(
        3, 1,
        {(x, y): ChipInfo(num_cores=18,
                          core_states=[AppState.run] + [AppState.idle] * 17,
                          working_links=set(Links),
                          largest_free_sdram_block=110 * 1024 * 1024,
                          largest_free_sram_block=1024 * 1024)
         for x in range(3) for y in range(1)})

    mock_mc = Mock()
    mock_mc.get_system_info.return_value = system_info

    mock_application_ctx = Mock()
    mock_application_ctx.__enter__ = Mock()
    mock_application_ctx.__exit__ = Mock()
    mock_mc.application.return_value = mock_application_ctx

    if reinject_packets:
        # If reinjecting, a core is added to every chip
        mock_mc.wait_for_cores_to_reach_state.return_value = len(system_info)
    else:
        # If not reinjecting, only the user-defined cores exist
        mock_mc.wait_for_cores_to_reach_state.return_value = num_cores

    def mock_sdram_file_read(size):
        return error_code + b"\0" * (size - 4)

    mock_sdram_file = Mock()
    mock_sdram_file.read.side_effect = mock_sdram_file_read

    mock_mc.sdram_alloc_as_filelike.return_value = mock_sdram_file

    e = Experiment(mock_mc)
    e.timestep = 1e-6
    e.warmup = 0.01
    e.duration = 0.01
    e.cooldown = 0.01
    e.flush_time = 0.01

    # Record the result of _construct_core_commands to allow checking of
    # memory allocation sizes
    construct_core_commands = e._construct_core_commands
    cores_commands = {}

    def wrapped_construct_core_commands(core, *args, **kwargs):
        commands = construct_core_commands(core=core, *args, **kwargs)
        cores_commands[core] = commands
        return commands

    e._construct_core_commands = Mock(
        side_effect=wrapped_construct_core_commands)

    if record:
        e.record_sent = True

    e.reinject_packets = reinject_packets

    # Create example cores. Cores are placed on sequential chips along the
    # x-axis.
    cores = [e.new_core(x, 0) for x in range(num_cores)]
    for c in cores:
        for _ in range(num_flows_per_core):
            e.new_flow(c, c)

    # Create example groups
    for num_samples in samples_per_group:
        with e.new_group():
            e.record_interval = e.duration / float(num_samples)

    # The run should fail with an exception when expected.
    if error and (num_cores > 0 or reinject_packets):
        with pytest.raises(NetworkTesterError) as exc_info:
            e.run(0x33, create_group_if_none_exist=auto_create_group)
        results = exc_info.value.results
    else:
        results = e.run(0x33, create_group_if_none_exist=auto_create_group)

    # The results should be of the correct type...
    assert isinstance(results, Results)

    # The results returned should be all zeros (since that is what was written
    # back)
    if record and num_cores > 0 and num_flows_per_core > 0:
        assert sum(results.totals()["sent"]) == 0

    # The supplied app ID should be used
    mock_mc.application.assert_called_once_with(0x33)

    # If reinjection is enabled, the binary should have been loaded
    print([call[1][0] for call in mock_mc.load_application.mock_calls])
    reinjector_loaded = any(
        (len(call[1][0]) == 1 and "reinjector.aplx" in list(call[1][0])[0])
        for call in mock_mc.load_application.mock_calls)
    if reinject_packets:
        assert reinjector_loaded
    else:
        assert not reinjector_loaded

    # Each chip should have been issued with a suitable malloc for any cores
    # on it.
    for x, core in enumerate(cores):
        cmds_size = cores_commands[core].size
        if record:
            # The space required to record deadlines_missed and the sent
            # counters.
            result_size = (1 + (
                (1 + num_flows_per_core) * sum(samples_per_group))) * 4
        else:
            # Just the status value and deadlines_missed
            result_size = (1 + sum(samples_per_group)) * 4
        size = max(cmds_size, result_size)
        core_num = e._allocations[core][Cores].start
        mock_mc.sdram_alloc_as_filelike.assert_any_call(size,
                                                        x=x,
                                                        y=0,
                                                        tag=core_num)

    # The correct number of barriers should have been reached
    num_groups = len(samples_per_group)
    if auto_create_group:
        num_groups = max(1, num_groups)
    assert len(mock_mc.send_signal.mock_calls) == num_groups
def test_construct_core_commands(router_access_core):
    # XXX: This test is *very* far from being complete. In particular, though
    # the problem supplied is realistic, the output is not checked thouroughly
    # enough.
    e = Experiment(Mock())

    # A simple example network as follows:
    #       f0
    #  c0 ---+--> c1
    #   |    '--> c2
    #   +-------> c3
    #      f1
    # We'll pretend all of them are on the same chip and core0 is the one
    # designated as the router-value recording core.

    core0 = e.new_core()
    core1 = e.new_core()
    core2 = e.new_core()
    core3 = e.new_core()

    cores = [core0, core1, core2, core3]

    flow0 = e.new_flow(core0, [core1, core2])
    flow1 = e.new_flow(core0, core3)

    # Seed randomly
    e.seed = None

    e.timestep = 1e-6
    e.warmup = 0.001
    e.duration = 1.0
    e.cooldown = 0.001
    e.flush_time = 0.01

    e.record_sent = True

    e.reinject_packets = True

    # By default, nothing should send and everything should consume
    e.probability = 0.0
    e.consume = True
    e.router_timeout = 240

    # In group0, everything consumes and f0 sends 100% packets and f1 sends 50%
    # packets.
    with e.new_group():
        flow0.probability = 1.0
        flow1.probability = 0.5

    # In group1, nothing consumes and f0 and f1 send 100%
    with e.new_group():
        flow0.probability = 1.0
        flow1.probability = 1.0
        e.consume = False
        e.router_timeout = 0

    # In group2, we have a timeout with emergency routing enabled
    with e.new_group():
        e.router_timeout = (16, 16)

    flow_keys = {flow0: 0xAA00, flow1: 0xBB00}

    cores_source_flows = {
        core0: [flow0, flow1],
        core1: [],
        core2: [],
        core3: [],
    }
    cores_sink_flows = {
        core0: [],
        core1: [flow0],
        core2: [flow0],
        core3: [flow1],
    }

    core_commands = {
        core: e._construct_core_commands(
            core=core,
            source_flows=cores_source_flows[core],
            sink_flows=cores_sink_flows[core],
            flow_keys=flow_keys,
            records=[Counters.deadlines_missed, Counters.sent],
            router_access_core=router_access_core).pack()
        for core in cores
    }

    # Make sure all cores have the right number of sources/sinks set
    for core in cores:
        commands = core_commands[core]
        num_sources = len(cores_source_flows[core])
        num_sinks = len(cores_sink_flows[core])
        ref_cmd = struct.pack("<II", NT_CMD.NUM,
                              (num_sources | num_sinks << 16))
        assert ref_cmd in commands

    # Make sure all cores have the right set of sources and sinks
    for core in cores:
        commands = core_commands[core]
        sources = cores_source_flows[core]
        sinks = cores_sink_flows[core]

        for source_num, source_flow in enumerate(sources):
            ref_cmd = struct.pack("<II", NT_CMD.SOURCE_KEY | (source_num << 8),
                                  flow_keys[source_flow])
            assert ref_cmd in commands

        for sink_num, sink_flow in enumerate(sinks):
            ref_cmd = struct.pack("<II", NT_CMD.SINK_KEY | (sink_num << 8),
                                  flow_keys[sink_flow])
            assert ref_cmd in commands

    # Make sure all cores have the right set of timing values
    for core in cores:
        commands = core_commands[core]

        ref_cmd = struct.pack("<II", NT_CMD.TIMESTEP, 1000)
        assert ref_cmd in commands

    # Make sure all cores have the right timeout set
    for core in cores:
        commands = core_commands[core]

        ref_cmd0 = struct.pack("<I", NT_CMD.ROUTER_TIMEOUT)
        ref_cmd1 = struct.pack("<I", NT_CMD.ROUTER_TIMEOUT_RESTORE)
        if router_access_core:
            assert ref_cmd0 in commands
            assert ref_cmd1 in commands
        else:
            assert ref_cmd0 not in commands
            assert ref_cmd1 not in commands

    # Make sure all cores packet reinjection enabled if requested
    for core in cores:
        commands = core_commands[core]

        ref_cmd0 = struct.pack("<I", NT_CMD.REINJECTION_ENABLE)
        ref_cmd1 = struct.pack("<I", NT_CMD.REINJECTION_DISABLE)
        if router_access_core:
            assert ref_cmd0 in commands
            assert ref_cmd1 in commands
        else:
            assert ref_cmd0 not in commands
            assert ref_cmd1 not in commands
Example #4
0
def test_construct_core_commands(router_access_core):
    # XXX: This test is *very* far from being complete. In particular, though
    # the problem supplied is realistic, the output is not checked thouroughly
    # enough.
    e = Experiment(Mock())

    # A simple example network as follows:
    #       f0
    #  c0 ---+--> c1
    #   |    '--> c2
    #   +-------> c3
    #      f1
    # We'll pretend all of them are on the same chip and core0 is the one
    # designated as the router-value recording core.

    core0 = e.new_core()
    core1 = e.new_core()
    core2 = e.new_core()
    core3 = e.new_core()

    cores = [core0, core1, core2, core3]

    flow0 = e.new_flow(core0, [core1, core2])
    flow1 = e.new_flow(core0, core3)

    # Seed randomly
    e.seed = None

    e.timestep = 1e-6
    e.warmup = 0.001
    e.duration = 1.0
    e.cooldown = 0.001
    e.flush_time = 0.01

    e.record_sent = True

    e.reinject_packets = True

    # By default, nothing should send and everything should consume
    e.probability = 0.0
    e.consume = True
    e.router_timeout = 240

    # In group0, everything consumes and f0 sends 100% packets and f1 sends 50%
    # packets.
    with e.new_group():
        flow0.probability = 1.0
        flow1.probability = 0.5

    # In group1, nothing consumes and f0 and f1 send 100%
    with e.new_group():
        flow0.probability = 1.0
        flow1.probability = 1.0
        e.consume = False
        e.router_timeout = 0

    # In group2, we have a timeout with emergency routing enabled
    with e.new_group():
        e.router_timeout = (16, 16)

    flow_keys = {flow0: 0xAA00, flow1: 0xBB00}

    cores_source_flows = {
        core0: [flow0, flow1],
        core1: [],
        core2: [],
        core3: [],
    }
    cores_sink_flows = {
        core0: [],
        core1: [flow0],
        core2: [flow0],
        core3: [flow1],
    }

    core_commands = {
        core: e._construct_core_commands(
            core=core,
            source_flows=cores_source_flows[core],
            sink_flows=cores_sink_flows[core],
            flow_keys=flow_keys,
            records=[Counters.deadlines_missed, Counters.sent],
            router_access_core=router_access_core).pack()
        for core in cores
    }

    # Make sure all cores have the right number of sources/sinks set
    for core in cores:
        commands = core_commands[core]
        num_sources = len(cores_source_flows[core])
        num_sinks = len(cores_sink_flows[core])
        ref_cmd = struct.pack("<II", NT_CMD.NUM,
                              (num_sources | num_sinks << 16))
        assert ref_cmd in commands

    # Make sure all cores have the right set of sources and sinks
    for core in cores:
        commands = core_commands[core]
        sources = cores_source_flows[core]
        sinks = cores_sink_flows[core]

        for source_num, source_flow in enumerate(sources):
            ref_cmd = struct.pack("<II", NT_CMD.SOURCE_KEY | (source_num << 8),
                                  flow_keys[source_flow])
            assert ref_cmd in commands

        for sink_num, sink_flow in enumerate(sinks):
            ref_cmd = struct.pack("<II", NT_CMD.SINK_KEY | (sink_num << 8),
                                  flow_keys[sink_flow])
            assert ref_cmd in commands

    # Make sure all cores have the right set of timing values
    for core in cores:
        commands = core_commands[core]

        ref_cmd = struct.pack("<II", NT_CMD.TIMESTEP, 1000)
        assert ref_cmd in commands

    # Make sure all cores have the right timeout set
    for core in cores:
        commands = core_commands[core]

        ref_cmd0 = struct.pack("<I", NT_CMD.ROUTER_TIMEOUT)
        ref_cmd1 = struct.pack("<I", NT_CMD.ROUTER_TIMEOUT_RESTORE)
        if router_access_core:
            assert ref_cmd0 in commands
            assert ref_cmd1 in commands
        else:
            assert ref_cmd0 not in commands
            assert ref_cmd1 not in commands

    # Make sure all cores packet reinjection enabled if requested
    for core in cores:
        commands = core_commands[core]

        ref_cmd0 = struct.pack("<I", NT_CMD.REINJECTION_ENABLE)
        ref_cmd1 = struct.pack("<I", NT_CMD.REINJECTION_DISABLE)
        if router_access_core:
            assert ref_cmd0 in commands
            assert ref_cmd1 in commands
        else:
            assert ref_cmd0 not in commands
            assert ref_cmd1 not in commands