def get_next_node(self, flow, sf):
        """
        Get next node using weighted probabilites from the scheduler
        """
        schedule = self.params.schedule
        # Check if scheduling rule exists
        if (flow.current_node_id in schedule) and flow.sfc in schedule[flow.current_node_id]:
            schedule_node = schedule[flow.current_node_id]
            schedule_sf = schedule_node[flow.sfc][sf]
            sf_nodes = [sch_sf for sch_sf in schedule_sf.keys()]
            sf_probability = [prob for name, prob in schedule_sf.items()]
            try:
                next_node = np.random.choice(sf_nodes, p=sf_probability)
                return next_node

            except Exception as ex:

                # Scheduling rule does not exist: drop flow
                log.warning(f'Flow {flow.flow_id}: Scheduling rule at node {flow.current_node_id} not correct'
                            f'Dropping flow!')
                log.warning(ex)
                metrics.dropped_flow()
                self.env.exit()
        else:
            # Scheduling rule does not exist: drop flow
            log.warning(f'Flow {flow.flow_id}: Scheduling rule not found at {flow.current_node_id}. Dropping flow!')
            metrics.dropped_flow()
            self.env.exit()
 def init_flow(self, flow):
     """
     Initialize flows within the network. This function takes the generated flow object at the ingress node
     and handles it according to the requested SFC. We check if the SFC that is being requested is indeed
     within the schedule, otherwise we log a warning and drop the flow.
     The algorithm will check the flow's requested SFC, and will forward the flow through the network using the
     SFC's list of SFs based on the LB rules that are provided through the scheduler's 'flow_schedule()'
     function.
     """
     log.info(
         "Flow {} generated. arrived at node {} Requesting {} - flow duration: {}ms, "
         "flow dr: {}. Time: {}".format(flow.flow_id, flow.current_node_id, flow.sfc, flow.duration, flow.dr,
                                        self.env.now))
     sfc = self.params.sfc_list[flow.sfc]
     # Check to see if requested SFC exists
     if sfc is not None:
         # Iterate over the SFs and process the flow at each SF.
         yield self.env.process(self.pass_flow(flow, sfc))
     else:
         log.info(f"Requested SFC was not found. Dropping flow {flow.flow_id}")
         # Update metrics for the dropped flow
         metrics.dropped_flow()
         self.env.exit()
    def process_flow(self, flow, sfc):
        """
        Process the flow at the requested SF of the current node.
        """
        # Generate a processing delay for the SF
        current_node_id = flow.current_node_id
        sf = sfc[flow.current_position]
        flow.current_sf = sf

        log.info("Flow {} STARTED PROCESSING at node {} for processing. Time: {}"
                 .format(flow.flow_id, flow.current_node_id, self.env.now))

        if sf in self.params.sf_placement[current_node_id]:
            current_sf = flow.current_sf
            vnf_delay_mean = self.params.sf_list[flow.current_sf]["processing_delay_mean"]
            vnf_delay_stdev = self.params.sf_list[flow.current_sf]["processing_delay_stdev"]
            processing_delay = np.absolute(np.random.normal(vnf_delay_mean, vnf_delay_stdev))
            # Update metrics for the processing delay
            # Add the delay to the flow's end2end delay
            metrics.add_processing_delay(processing_delay)
            flow.end2end_delay += processing_delay

            # Calculate the demanded capacity when the flow is processed at this node
            demanded_total_capacity = 0.0
            for sf_i, sf_data in self.params.network.nodes[current_node_id]['available_sf'].items():
                if sf == sf_i:
                    # Include flows data rate in requested sf capacity calculation
                    demanded_total_capacity += self.params.sf_list[sf]['resource_function'](sf_data['load'] + flow.dr)
                else:
                    demanded_total_capacity += self.params.sf_list[sf_i]['resource_function'](sf_data['load'])

            # Get node capacities
            node_cap = self.params.network.nodes[current_node_id]["cap"]
            node_remaining_cap = self.params.network.nodes[current_node_id]["remaining_cap"]
            assert node_remaining_cap >= 0, "Remaining node capacity cannot be less than 0 (zero)!"
            if demanded_total_capacity <= node_cap:
                log.info("Flow {} started processing at sf {} at node {}. Time: {}, Processing delay: {}"
                         .format(flow.flow_id, current_sf, current_node_id, self.env.now, processing_delay))

                # Metrics: Add active flow to the SF once the flow has begun processing.
                metrics.add_active_flow(flow, current_node_id, current_sf)

                # Add load to sf
                self.params.network.nodes[current_node_id]['available_sf'][sf]['load'] += flow.dr
                # Set remaining node capacity
                self.params.network.nodes[current_node_id]['remaining_cap'] = node_cap - demanded_total_capacity
                # Set max node usage
                metrics.calc_max_node_usage(current_node_id, demanded_total_capacity)
                # Just for the sake of keeping lines small, the node_remaining_cap is updated again.
                node_remaining_cap = self.params.network.nodes[current_node_id]["remaining_cap"]

                yield self.env.timeout(processing_delay)
                log.info("Flow {} started departing sf {} at node {}. Time {}"
                         .format(flow.flow_id, current_sf, current_node_id, self.env.now))

                # Check if flow is currently in last SF, if so, then depart flow.
                if (flow.current_position == len(sfc) - 1):
                    yield self.env.timeout(flow.duration)
                    self.depart_flow(flow)
                else:
                    # Increment the position of the flow within SFC
                    flow.current_position += 1
                    self.env.process(self.pass_flow(flow, sfc))
                    yield self.env.timeout(flow.duration)
                    # before departing the SF.
                    # print(metrics.get_metrics()['current_active_flows'])
                    log.info("Flow {} FINISHED ARRIVING at SF {} at node {} for processing. Time: {}"
                             .format(flow.flow_id, current_sf, current_node_id, self.env.now))
                    # Remove the active flow from the SF after it departed the SF
                    metrics.remove_active_flow(flow, current_node_id, current_sf)

                # Remove load from sf
                self.params.network.nodes[current_node_id]['available_sf'][sf]['load'] -= flow.dr
                assert self.params.network.nodes[current_node_id]['available_sf'][sf]['load'] >= 0, \
                    'SF load cannot be less than 0!'
                # Check if SF is not processing any more flows AND if SF is removed from placement. If so the SF will
                # be removed from the load recording. This allows SFs to be handed gracefully.
                if (self.params.network.nodes[current_node_id]['available_sf'][sf]['load'] == 0) and (
                        sf not in self.params.sf_placement[current_node_id]):
                    del self.params.network.nodes[current_node_id]['available_sf'][sf]

                # Recalculation is necessary because other flows could have already arrived or departed at the node
                used_total_capacity = 0.0
                for sf_i, sf_data in self.params.network.nodes[current_node_id]['available_sf'].items():
                    used_total_capacity += self.params.sf_list[sf_i]['resource_function'](sf_data['load'])
                # Set remaining node capacity
                self.params.network.nodes[current_node_id]['remaining_cap'] = node_cap - used_total_capacity
                # Just for the sake of keeping lines small, the node_remaining_cap is updated again.
                node_remaining_cap = self.params.network.nodes[current_node_id]["remaining_cap"]

                # We assert that remaining capacity must at all times be less than the node capacity so that
                # nodes dont put back more capacity than the node's capacity.
                assert node_remaining_cap <= node_cap, "Node remaining capacity cannot be more than node capacity!"
            else:
                log.info(f"Not enough capacity for flow {flow.flow_id} at node {flow.current_node_id}. Dropping flow.")
                # Update metrics for the dropped flow
                metrics.dropped_flow()
                self.env.exit()
        else:
            log.info(f"SF {sf} was not found at {current_node_id}. Dropping flow {flow.flow_id}")
            metrics.dropped_flow()
            self.env.exit()