def reset_simulation_scenario_state(self):
        from miniworld import log
        from miniworld.Config import config
        log.info("resetting simulation_scenario_state")

        # objects may require singletons, hence first garbage collect objects
        for obj in self[KEY_OBJECT]:
            log.debug("resetting '%s'", obj)
            try:
                obj.reset()
            except NotImplementedError:
                log.critical(
                    "Object '%s@%s' did not implement the reset() method!",
                    obj, obj.__class__)
            except Exception as e:
                if config.is_log_cleanup():
                    log.exception(e)

        for singleton in self[KEY_SINGLETON]:
            log.debug("resetting '%s'", singleton)
            try:
                singleton.reset()
            except Exception as e:
                if config.is_log_cleanup():
                    log.exception(e)

        log.info("clearing simulate state objects ...")
        self[KEY_OBJECT] = []
def init_singletons():
    log.info("initializing singletons ...")
    from miniworld.management import ShellHelper
    from miniworld.management.network.manager import NetworkManager
    from miniworld.model.singletons.Singletons import singletons
    from miniworld.errors import SimulationErrors
    from miniworld.management import SimulationStateGarbageCollector
    from miniworld.model.emulation.Qemu import QemuProcessSingletons
    from miniworld.model.spatial.Roads import Roads

    # create singletons here
    singletons.network_manager = NetworkManager.NetworkManager()
    singletons.shell_helper = ShellHelper.ShellHelper()
    singletons.spatial_singleton = Singleton.Singleton()
    singletons.spatial_singleton.roads = Roads()
    singletons.event_system = miniworld.model.events.MyEventSystem.MyEventSystem(
    )
    singletons.simulation_errors = SimulationErrors.SimulationErrors()
    singletons.simulation_state_gc = SimulationStateGarbageCollector.SimulationStateGarbageCollector(
    )
    singletons.protocol = Protocol.factory()()
    singletons.node_distribution_strategy = NodeDistributionStrategy.factory()(
    )

    singletons.simulation_manager = SimulationManager.factory()()
    singletons.qemu_process_singletons = QemuProcessSingletons()

    # they share state which needs to be cleared for a new simulation
    for singleton_with_simulation_scenario_state in [
            singletons.network_manager, singletons.shell_helper,
            singletons.spatial_singleton, singletons.simulation_errors
    ]:
        singletons.simulation_state_gc.add_singleton_with_simulation_scenario_state_(
            singleton_with_simulation_scenario_state)
Beispiel #3
0
    def _run(self):
        def store_last_check_timestamp():
            # store the last timestamp of this check
            self.__last_check = time()

        store_last_check_timestamp()
        while not self.shall_terminate():

            # measure method execution time
            exec_time = timeit(self.execute_this, number=1)
            if exec_time > self.time_step:
                self.logger.critical(
                    "the execution time of the '%s' method is longer than a time step (%s). Took %s",
                    self._run.__name__, self.time_step, exec_time)

            # wait the remaining time until a time step passed
            wait_time = self.time_step - exec_time
            # but not more than a time step
            wait_time = 0 if wait_time > self.time_step else wait_time
            self.logger.debug("sleeping %f", wait_time)
            self.shall_terminate_event.wait(timeout=wait_time)

            self.logger.info("took: %s", time() - self.__last_check)

            store_last_check_timestamp()

        log.info("terminating run loop ...")
Beispiel #4
0
    def _start(self, switch=True, bridge_dev_name=None):
        """
        1. Start hub/switch
        2. Color interfaces (if switch wants so)
        3. Move interface to VLAN

        Parameters
        ----------
        switch : bool, optional (default is True)

        Raises
        ------
        NetworkManagementSwitchBridgeNotExisting
        """

        if bridge_dev_name is None:
            bridge_dev_name = config.get_bridge_tap_name()

        log.info("starting management node/switch ...")
        super(ManagementNodeVDE, self)._start(switch=switch, bridge_dev_name=bridge_dev_name)

        # start and wait for switches
        # self.switch.start(bridge_dev_name = self.bridge_name, switch = switch)

        # color tap/management device
        self.switch.color_interface(PORT_MANAGEMENT, self.interface.node_class)

        log.info("associating management tap device with management vlan ...")
        # associate tap/management device with management vlan
        self.switch.move_interface_to_vlan(self.interface, PORT_MANAGEMENT)

        self.after_pre_shell_commands()
Beispiel #5
0
def log_kvm_usable():
    kvm_usable = is_kvm_usable()
    if kvm_usable:
        log.info("Using kvm for virtualization ...")
    else:
        log.info(
            "Kvm already in use or not supported! Falling back to emulation ..."
        )
Beispiel #6
0
    def create_n_connect_central_nodes(self, interfaces):
        """

        Parameters
        ----------
        interfaces

        Returns
        -------
        dict<int, CentralNode>
        """
        # create CentralNode s but only if there is a HubWiFi interface
        # TODO: REMOVE
        cnt = 0
        central_nodes_dict = {}
        for _if in filter(lambda x: is_central_node_interface(x), interfaces):
            if cnt == 1:
                raise ValueError(
                    "Only one '%s' interface support at the moment!" % HubWiFi)

            # TODO: REFACTOR!
            # TODO: #54: make amount of nodes configurable
            count_central_nodes = 1  # multiprocessing.cpu_count()
            network_backend_bootstrapper = NetworkBackends.get_current_network_backend_bootstrapper(
            )
            for _ in range(0, count_central_nodes):
                # create an own network backend for each node
                # new_emulation_node_network_backend = network_backend_bootstrapper.emulation_node_network_backend_type(network_backend_bootstrapper)

                central_node = network_backend_bootstrapper.central_node_type(
                    network_backend_bootstrapper)
                # TODO: #54 make configurable!
                log.debug("creating CentralNode with id: %s", central_node.id)
                central_node.start(switch=False)
                central_nodes_dict[central_node.id] = central_node

                # create a reference so that the :py:class:`.AbstractConnection` can access it
                # to make a connection
                self.switches[_if] = central_node.switch
            # connect CentralHub s pairwise to each other
            central_nodes = central_nodes_dict.values()
            log.info("connecting CentralHubs pairwise to each other ...")
            # for i1 in range(0, count_central_nodes):
            #     for i2 in range(0, count_central_nodes):
            #         if i1 != i2:
            for i1, i2 in zip(range(0, count_central_nodes),
                              range(1, count_central_nodes)):
                node_x, node_y = central_nodes[i1], central_nodes[i2]
                node_x.connect_to_emu_node(self, node_y)

            cnt += 1

        return central_nodes_dict
    def precalculate(self):

        log.info("precalculating link qualities ...")
        for distance in range(0, sys.maxsize):
            connected, link_quality_dict = self.distance_2_link_quality(distance)
            if not connected:
                self.max_connected_distance = distance
                log.info("max_connected_distance: '%s'", self.max_connected_distance)
                break

        if self.max_connected_distance is None:
            raise RuntimeError("Maximum connected distance could not be calculated!")
Beispiel #8
0
def set_scenario_config(*args, **kwargs):
    """ Set the scenario config.

    Returns
    -------
    dict
        The config as JSON.
    """

    _config = JSONConfig.read_json_config(*args, **kwargs)
    log.info("setting scenario config file '%s'", *args)
    scenario_config.data = deepcopy(_config)
    return _config
Beispiel #9
0
    def show_progress():
        while True:
            progress_dict = OrderedDict(con_progress.get_progress(False))
            if progress_dict is not None:

                cli_display.print_progress(progress_dict)
                if cli_display.is_finished(progress_dict):

                    t2 = time.time()
                    log.info("took '%s'", t2 - t1)
                    log.info("simulation started ...")
                    return

            time.sleep(CLI_REFRESH_RATE)
Beispiel #10
0
    def start(self, switch=False):
        """
        1. Start hub/switch
        2. Color interfaces (if switch wants so)
        3. Move interface to VLAN

        Parameters
        ----------
        switch : bool, optional (default is True)

        Raises
        ------
        NetworkManagementSwitchBridgeNotExisting
        """
        log.info("starting management node/switch ...")
        super(CentralVDENode, self)._start(switch=switch)

        # we want all nodes connected -> disable color patch
        log.info("disabling color patch for %s", self.__class__.__name__)
        self.switch.colorful = False

        # TODO: #54,#55: only for network backend "vde"
        # color tap/management device
        if self.switch.colorful:
            log.info("coloring management tap device ...")
            self.switch.color_interface(port=miniworld.model.network.backends.vde.VDEConstants.PORT_MANAGEMENT, color=self.interface.node_class)

        log.info("associating management tap device with management vlan ...")
        # associate tap/management device with management vlan
        self.switch.move_interface_to_vlan(self.interface, port=miniworld.model.network.backends.vde.VDEConstants.PORT_MANAGEMENT)

        self.after_pre_shell_commands()
Beispiel #11
0
def factory(cnt_nodes):
    from miniworld.management.spatial.MovementDirectorNoMobility import MovementDirectorNoMobility

    walk_model_name = scenario_config.get_walk_model_name()
    if walk_model_name is None:
        topology_mode = TOPOLOGY_MODE_NO_MOBILITY

    else:
        if walk_model_name == ScenarioConfig.WALK_MODEL_NAME_ARMA:
            topology_mode = TOPOLOGY_MODE_ARMA
        elif walk_model_name == ScenarioConfig.WALK_MODEL_NAME_CORE:
            topology_mode = TOPOLOGY_MODE_CORE
        else:
            topology_mode = TOPOLOGY_MODE_DEFAULT

    if topology_mode == TOPOLOGY_MODE_CORE:
        # late import: imports modules that need the MiniWorld working directory being created
        from miniworld.management.spatial import MovementDirectorCoreConfig

        core_scenarios = scenario_config.get_core_scenarios()
        log.info("using topology provided by core scenario configs '%s'",
                 pformat(core_scenarios))
        movement_director = MovementDirectorCoreConfig.MovementDirectorCoreConfig(
            core_scenarios)

    elif topology_mode == TOPOLOGY_MODE_ARMA:
        from miniworld.management.spatial.MovementDirectorArma import MovementDirectorArma
        arma_filepath = scenario_config.get_walk_model_arma_filepath()
        movement_director = MovementDirectorArma(cnt_nodes, arma_filepath)
        # TODO: change in none hardcoded string
        movement_director.set_path_to_replay_file(arma_filepath)
        raise NotImplementedError

    elif topology_mode == TOPOLOGY_MODE_DEFAULT:
        from miniworld.management.spatial.MovementDirector import MovementDirector
        movement_director = MovementDirector(
            {scenario_config.get_walk_model_name(): cnt_nodes}
        )  # , ("MoveOnBigStreets", 20)])#MovementDirector({"RandomWalk" : cnt_nodes})
    elif topology_mode == TOPOLOGY_MODE_NO_MOBILITY:
        movement_director = MovementDirectorNoMobility()
    else:
        raise ValueError("Topology mode is unknown!")

    log.info("created MovementDirector '%s' ...", movement_director)
    return movement_director
Beispiel #12
0
    def connect_to_emu_node(self, network_backend, emulation_node):
        """ Helper function to connect the virtual node to an `EmulationNode`.

        Parameters
        ----------
        network_backend
        emulation_node

        Returns
        -------
        AbstractSwitch, AbstractConnection, Interface, Interface
            The connection between the nodes and the two interfaces
        """
        interface = self.interface
        log.info("connecting '%s' to '%s' ...", emulation_node, self)

        # get the interface with the same type
        emu_node_if = emulation_node.network_mixin.interfaces.filter_type(
            type(interface))[0]

        connection_info = self.init_connection_info()
        # NetworkBackendNotifications
        connected, switch, connection = singletons.network_manager.before_link_initial_start(
            network_backend,
            self,
            emulation_node,
            interface,
            emu_node_if,
            connection_info,
            start_activated=True)
        singletons.network_manager.after_link_initial_start(
            connected,
            switch,
            connection,
            network_backend,
            self,
            emulation_node,
            interface,
            emu_node_if,
            connection_info,
            start_activated=True)

        return switch, connection, interface, emu_node_if
Beispiel #13
0
class LinkQualityModelWiFiLinear(LinkQualityModelNetEm):
    MAX_BANDWIDTH = 54000
    log.info("max_bandwidth: %s" % MAX_BANDWIDTH)

    #####################################################
    # Implement these methods in a subclass
    #####################################################

    def _distance_2_link_quality(self, distance):
        distance = distance * 1.0

        default_link_quality = \
            {self.NETEM_KEY_LOSS: None,
             self.NETEM_KEY_LIMIT: None,
             self.NETEM_KEY_DELAY: None,
             self.NETEM_KEY_CORRUPT: None,
             self.NETEM_KEY_DUPLICATE: None,
             self.NETEM_KEY_REORDER: None,
             self.NETEM_KEY_RATE: None
             }

        # distribute bandwidth linear for dist in [0, 30)
        # TODO: other way than defining maximum bandwidth?
        max_bandwidth = scenario_config.get_link_bandwidth(
        ) or self.MAX_BANDWIDTH
        distance += 1

        if distance >= 0:

            distance = distance / 2
            if distance >= 0:

                bandwidth = 1.0 * max_bandwidth / distance if distance > 1 else max_bandwidth
                default_link_quality[LinkQualityConstants.
                                     LINK_QUALITY_KEY_BANDWIDTH] = bandwidth

                delay_const = (distance - 1) * 2 if distance > 1 else 0
                delay_const_str = '%.2f' % delay_const
                delay_variation = delay_const / 10.0
                delay_variation_str = '%.2f' % delay_variation
                delay_cmd = "{delay_const}ms {delay_var}ms 25%".format(
                    delay_const=delay_const_str, delay_var=delay_variation_str)
                # delay_cmd = "{delay_const} {delay_var} distribution normal".format(delay_const=delay_const, delay_var=delay_variation)
                default_link_quality[self.NETEM_KEY_DELAY] = delay_cmd
                # return bandwidth, delay_const, delay_variation

                if bandwidth >= 1000:
                    return True, default_link_quality

        return False, default_link_quality
Beispiel #14
0
 def tap_link_up_central(self, tap_x, tap_y, up=True):
     log.info("accept all packets in FORWARD chain ...")
     self.run_shell("{ebtables} -P FORWARD ACCEPT".format(
         ebtables=ConnectionEbtables.ebtables_cmd))
Beispiel #15
0
 def set_ebtables_forward_policy(policy):
     log.info("{policy} all packets in FORWARD chain ...".format(
         policy=policy))
     ConnectionEbtables.run_shell(
         "{ebtables} -P FORWARD {policy}".format(
             ebtables=ConnectionEbtables.ebtables_cmd, policy=policy))
Beispiel #16
0
        def shape_device(self, dev_name, connection_id, link_quality_dict):
            """
            Parameters
            ----------
            dev_name : str
            connection_id : str
            rate : int

            tc qdisc add dev $DEV root handle 1:0 htb default 12
            tc class add dev $DEV parent 1:0 classid 1:1 htb rate 190kbit ceil 190kbit
            tc class add dev $DEV parent 1:1 classid 1:12 htb rate 100kbit ceil 190kbit prio 2
            """

            rate = link_quality_dict.get(
                LinkQualityConstants.LINK_QUALITY_KEY_BANDWIDTH)

            if rate is not None:

                # add root
                default_class = self._get_default_class()
                if default_class:
                    default_class = "default %s" % default_class

                if not self.shaped_ifaces[dev_name]:
                    postfix = ' htb {}'.format(default_class)
                    self.add_shell_command(
                        self.EVENT_LINK_SHAPE_ADD_QDISC,
                        # TODO: ADD/REMOVE default 1
                        "tc qdisc replace dev {} root handle 1:0{}".format(
                            dev_name, postfix))
                self.shaped_ifaces[dev_name] = True

                # add first and only class, use htb shaping algorithm
                self.add_shell_command(
                    self.EVENT_LINK_SHAPE_ADD_CLASS,
                    "tc class replace dev {} parent 1:0 classid 1:{id} htb rate {rate}kbit"
                    .format(dev_name, rate=rate, id=connection_id))

                # TODO: DOC
                netem_command = "tc qdisc replace dev {dev_name} parent 1:{id} handle {id}0: netem".format(
                    dev_name=dev_name, id=connection_id)

                def build_netem_options():
                    netem_options = []

                    def build_opt(key):
                        opt = link_quality_dict.get(key)
                        if opt:
                            netem_options.append("%s %s" % (key, opt))

                    for key in LinkQualityModelNetEm.NETEM_KEYS:
                        build_opt(key)

                    return ' '.join(netem_options)

                netem_command += ' {}'.format(build_netem_options())
                self.add_shell_command(self.EVENT_LINK_SHAPE_ADD_CLASS,
                                       netem_command)
                self._add_filter_cmd(dev_name, connection_id)
                self.add_cleanup(dev_name)

            else:
                log.info("not shaping device %s", dev_name)
Beispiel #17
0
    def _start(self, path_qemu_base_image):
        """
        Start the QEMU instance:

        1. Set qemu process ownership
        2. Try to load a snapshot, kill the old process and snapshot if it fails
        3. Build the command line for Qemu
            3.1 Check if KVM is available
            3.2 Build NIC command
            3.3 Create disk overlay
            3.4 Include user command line additions
        4. Start the Qemu process, take process ownership for snapshot mode
            (to keep the snapshots alive in the process)
        5. Enter the Qemu Monitor first, then the serial console
        6. Boot VM
        7. Set event progress
        8. Create snapshot
        9. Store Qemu process in singleton map
        10. Call after_start

        Parameters
        ----------
        path_qemu_base_image : str
            Path to the base image used as read layer.

        Raises
        ------
        QemuBootWaitTimeout
            Timeout while booting the vm.
        REPLTimeout
            Timeout while doing stuff on the shell.
        InvalidImage
        """

        if os.path.getsize(path_qemu_base_image) == 0:
            raise self.InvalidImage()

        es = singletons.event_system
        self.process = None
        snapshot_load_failed = False

        if config.is_qemu_snapshot_boot():
            self.process = singletons.qemu_process_singletons.get(self.id)
            take_process_ownership = False
        else:
            take_process_ownership = True

        def kill_qemu_snapshot_process():

            # kill old qemu process and snapshot
            # terminate old qemu process
            self.process.kill()
            self.process.wait()
            self.process = None

        # check if a snapshot exists
        if self.process is not None:

            # only snapshot boot if scenario did not change
            # TODO: unit test
            if singletons.simulation_manager.scenario_changed:
                snapshot_load_failed = True
                log.info(
                    'scenario config changed -> no snapshot boot possible')
            else:
                id_snapshot = self.get_snapshot_id()
                self.nlog.info("loading vm snapshot %s", id_snapshot)
                t_start = time.time()
                try:
                    self.monitor_repl.loadvm(id_snapshot)
                    t_end = time.time()
                    log.debug("loaded snapshot in %0.2f seconds",
                              t_end - t_start)
                    self.booted_from_snapshot = True

                except QemuMonitorSnapshotLoadError:
                    snapshot_load_failed = True

        if snapshot_load_failed:
            kill_qemu_snapshot_process()

        if self.process is None or snapshot_load_failed:
            self.monitor_repl = QemuMonitorRepl(self)

            # build qemu shell command from template
            qemu_cmd = self._build_qemu_command(path_qemu_base_image)
            # run the qemu command
            self.process = singletons.shell_helper.run_shell_async(
                self.id,
                qemu_cmd,
                prefixes=[self.shell_prefix],
                # we are responsible ourselves for killing the process
                take_process_ownership=take_process_ownership)

            # we need to connect to both sockets once, first to the qemu monitor socket (this creates the serial shell socket)
            self.monitor_repl.run_commands_eager(StringIO("\n"))
            # NetUtil.wait_until_uds_reachable(self.path_uds_socket)

            booted_signal = scenario_config.get_signal_boot_completed(
                node_id=self.id)
            shell_prompt = scenario_config.get_shell_prompt(node_id=self.id)

            # boot signal and shell prompt supplied
            # use boot signal for boot and shell prompt for entering the shell
            if booted_signal is not None and shell_prompt is not None:
                func = NetUtil.wait_for_socket_result
                booted_signal = scenario_config.get_signal_boot_completed(
                    node_id=self.id)
            else:
                booted_signal = scenario_config.get_shell_prompt(
                    node_id=self.id)
                func = NetUtil.wait_for_boot

            if scenario_config.is_provisioning_boot_mode_selectors():
                # connected via unix domain socket
                self.wait_until_qemu_booted(
                    func,
                    self.log_path_qemu_boot,
                    booted_signal=booted_signal,
                    # TODO:
                    timeout=config.get_repl_timeout())

            else:
                raise ValueError("Unknown boot mode!")

        # notify EventSystem that the VM booted successfully
        with es.event_no_init_finish(es.EVENT_VM_BOOT) as ev:
            ev.update([self.id], 1.0)

        if not self.booted_from_snapshot:
            # connect to the serial shell
            self.run_commands_eager(StringIO("\n"))

        # notify EventSystem that the VMs shell is ready
        with es.event_no_init_finish(es.EVENT_VM_SHELL_READY) as ev:
            ev.update([self.id], 1.0)

        self.nlog.info("qemu instance running ...")

        if config.is_qemu_snapshot_boot():
            # store process singleton
            singletons.qemu_process_singletons[self.id] = self.process

        self.after_start()
# we cannot simply call find_empty_group with the same parameters because the method works only if a new group has been created!
# therefore the pass the start parameter!
GROUP_BRIDGES = find_empty_group(start=GROUP_TUNNELS + 1)
GROUP_TAP_DEVS = find_empty_group(start=GROUP_BRIDGES + 1)

GROUPS = {
    "tunnels": GROUP_TUNNELS,
    "bridges": GROUP_BRIDGES,
    "tap_devices": GROUP_TAP_DEVS
}

GROUPS_LOG_FILE = PathUtil.get_temp_file_path("iproute2_groups")

# log iproute2 groups
with open(GROUPS_LOG_FILE, "w") as f:
    log.info("writing the iproute2 groups to '%s'", GROUPS_LOG_FILE)
    f.write('\n'.join(["%s:%s" % (key, val) for key, val in GROUPS.items()]))

for iproute2_group, val in GROUPS.items():
    log.info("%s group is: %d" % (iproute2_group, val))


def get_bridge_add_cmd(bridge_dev_name):
    return "ip link add name {} type bridge".format(bridge_dev_name)


def get_bridge_set_hub_mode_cmd(bridge_dev_name):
    return "ip link set dev {} type bridge ageing_time 0".format(
        bridge_dev_name)

Beispiel #19
0
 def build_cache(self):
     log.info("building interface cache ...")
     self.cache = dict([(x.get_attr('IFLA_IFNAME'), x['index'])
                        for x in self.ipr.get_links()])
Beispiel #20
0
        def do_batch(self):
            log.info("IPBatch sendto()")
            self.ipr.sendto(self.ipb.batch, (0, 0))

            log.info("resetting batch object ...")
            self.ipb.reset()
        def before_link_initial_start(self,
                                      network_backend,
                                      emulation_node_x,
                                      emulation_node_y,
                                      interface_x,
                                      interface_y,
                                      connection_info,
                                      start_activated=False,
                                      **kwargs):

            super(NetworkBackendBridgedSingleDevice,
                  self).before_link_initial_start(network_backend,
                                                  emulation_node_x,
                                                  emulation_node_y,
                                                  interface_x,
                                                  interface_y,
                                                  connection_info,
                                                  start_activated=staticmethod,
                                                  **kwargs)
            # start a single bridge here and add all tap devices to it
            # afterwards use ebtables for connection filtering on layer 2
            connection = None

            connection_type = self.network_backend_bootstrapper.connection_type

            # TODO: DOC
            br_name = self.get_br_name(interface_x.nr_host_interface)
            bridge = None
            if not self.bridges.get(br_name, None):
                log.info("creating bridge %s", br_name)
                bridge = self.bridges[
                    br_name] = self.network_backend_bootstrapper.switch_type(
                        br_name, interface_x)
                # create extra chain for bridge
                self.add_shell_ebtables_command(
                    self.EVENT_EBTABLES_CREATE_CHAINS,
                    connection_type.get_ebtables_chain_cmd(
                        br_name, connection_type.policy_drop))
                # redirect to new chain
                self.add_shell_ebtables_command(
                    self.EVENT_EBTABLES_REDIRECT,
                    connection_type.get_ebtables_redirect_cmd(br_name))

                bridge.start(switch=False, bridge_dev_name=br_name)
            else:
                bridge = self.get_bridge(interface_x)

            connection = connection_type(emulation_node_x, emulation_node_y,
                                         interface_x, interface_y,
                                         connection_info)
            connection.start(self)

            # TODO: #84: improve
            connections = Connections([(emulation_node_x, interface_x),
                                       (emulation_node_y, interface_y)])

            is_one_tap_mode = connection_info.is_central or connection_info.is_mgmt or connection_info.is_remote_conn
            if not is_one_tap_mode:
                tap_x = self.get_tap_name(emulation_node_x.id, interface_x)
                tap_y = self.get_tap_name(emulation_node_y.id, interface_y)

                # TODO: #84: check which devices are already added to the bridge ...
                # add devices to bridge
                bridge.add_if(tap_x, if_up=True)
                bridge.add_if(tap_y, if_up=True)

            # TODO: nearly same code as in NetworkBackendBridgedMultiDevice!
            else:
                virtual_node, _if = None, None
                if connection_info.is_central:
                    virtual_node, _if = connections.filter_central_nodes()[0]
                elif connection_info.is_mgmt:
                    virtual_node, _if = connections.filter_mgmt_nodes()[0]

                tap_dev_name = None
                if connection_info.is_remote_conn:

                    tunnel_dev_name = self.get_tunnel_name(
                        emulation_node_x.id, emulation_node_y.id)
                    # add the tunnel to the bridge
                    bridge.add_if(tunnel_dev_name, if_up=True)
                    remote_node, if_remote_node, local_emu_node, if_local_emu_node = singletons.simulation_manager.get_remote_node(
                        emulation_node_x, emulation_node_y, interface_x,
                        interface_y)
                    # the tap device we want to add to the bridge is the local one, not the remote one!
                    tap_dev_name = self.get_tap_name(local_emu_node.id,
                                                     if_local_emu_node)
                else:
                    emu_node, emu_if = connections.filter_real_emulation_nodes(
                    )[0]
                    tap_dev_name = self.get_tap_name(emu_node.id, emu_if)
                    bridge = virtual_node.switch

                # add the tap device to the bridge
                bridge.add_if(tap_dev_name, if_up=True)

            return True, bridge, connection