Exemplo n.º 1
0
  def _do_probe (self):
    """
    Send an ARP to a server to see if it's still up
    """
    self._do_expire()

    server = self.servers.pop(0)
    self.servers.append(server)

    r = arp()
    r.hwtype = r.HW_TYPE_ETHERNET
    r.prototype = r.PROTO_TYPE_IP
    r.opcode = r.REQUEST
    r.hwdst = ETHER_BROADCAST
    r.protodst = server
    r.hwsrc = self.mac
    r.protosrc = self.service_ip
    e = ethernet(type=ethernet.ARP_TYPE, src=self.mac,
                 dst=ETHER_BROADCAST)
    e.set_payload(r)
    #self.log.debug("ARPing for %s", server)
    msg = of.ofp_packet_out()
    msg.data = e.pack()
    msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
    msg.in_port = of.OFPP_NONE
    self.con.send(msg)

    self.outstanding_probes[server] = time.time() + self.arp_timeout

    core.callDelayed(self._probe_wait_time, self._do_probe)
Exemplo n.º 2
0
    def _handle_ConnectionDown(self, event):
        log.info("Killing switch %s" % (event.connection, ))

        pb_id = random.randint(1, 1000000000)
        switches = core.list.get_list()
        for s in switches:
            core.callDelayed(0.001, s.update_down_switches, event.dpid, pb_id)
Exemplo n.º 3
0
    def _do_probe(self):
        """
    Send an ARP to a server to see if it's still up
    """
        self._do_expire()

        server = self.servers.pop(0)
        self.servers.append(server)

        r = arp()
        r.hwtype = r.HW_TYPE_ETHERNET
        r.prototype = r.PROTO_TYPE_IP
        r.opcode = r.REQUEST
        r.hwdst = ETHER_BROADCAST
        r.protodst = server
        r.hwsrc = self.mac
        r.protosrc = self.service_ip
        e = ethernet(type=ethernet.ARP_TYPE, src=self.mac, dst=ETHER_BROADCAST)
        e.set_payload(r)
        #self.log.debug("ARPing for %s", server)
        msg = of.ofp_packet_out()
        msg.data = e.pack()
        msg.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD))
        msg.in_port = of.OFPP_NONE
        self.con.send(msg)

        self.outstanding_probes[server] = time.time() + self.arp_timeout

        core.callDelayed(self._probe_wait_time, self._do_probe)
Exemplo n.º 4
0
 def _handle_ConnectionUp(self, event):
     switch = event.dpid
     self.switchCount -= 1
     if self.switchCount == 0 and not self.installed:
         self.install_policy(event.connection)
         self.installed = True
         core.callDelayed(7, self.migrate)
Exemplo n.º 5
0
  def _do_probe (self):
     #Send ARP to server to see if its still up
    self._do_expire()
    server = IPAddr('10.0.0.1')
    r = arp()
    r.hwtype = r.HW_TYPE_ETHERNET
    r.prototype = r.PROTO_TYPE_IP
    r.opcode = r.REQUEST
    r.hwdst = ETHER_BROADCAST
    r.protodst = server
    r.hwsrc = self.mac
    r.protosrc = IPAddr('10.0.0.6')
    e = ethernet(type=ethernet.ARP_TYPE, src=self.mac,
                 dst=ETHER_BROADCAST)
    e.set_payload(r)
    print("ARPing for %s", server)
    msg = of.ofp_packet_out()
    msg.data = e.pack()
    msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
    msg.in_port = of.OFPP_NONE
    self.connection.send(msg)

    self.outstanding_probes[server] = time.time() + self.arp_timeout

    core.callDelayed(self._probe_wait_time, self._do_probe)
Exemplo n.º 6
0
 def _handle_ConnectionUp(self, event):
     switch = event.dpid
     self.switchCount -= 1
     if self.switchCount == 0 and not self.installed:
         self.install_policy(event.connection)
         self.installed = True
         core.callDelayed(7, self.migrate)
Exemplo n.º 7
0
    def update_down_switches(self, dpid, pb_id):
        log.info("telling switch " + self.graph_name +
                 " that switch with dpid " + str(dpid) + " is down")
        dead_switch_name = 's' + str(int(dpid) - 1)
        self.dead_switches.append(dead_switch_name)
        # log.info("these are the dead switches now " + str(self.dead_switches))

        ip_val = ip_to_val(self.ip)
        lvl, idx, loc = level(ip_val), index(ip_val), location(ip_val)

        failed_val = ip_to_val(self.TOPO['switch_to_ip'][dead_switch_name])
        failed_lvl, failed_idx, failed_loc = level(failed_val), index(
            failed_val), location(failed_val)

        b = int(math.ceil(math.log((self.TOPO['n_ports'] / 2), 2)))
        failed_prefix = (failed_loc >> (b * failed_lvl))

        if failed_lvl != 2:  # specify subtree type
            if failed_prefix % 2 == 0:  # A subtree
                stack = "A"
            else:  # B subtree
                stack = "B"
        else:
            stack = ""

        for k, _ in self.get_lower_neighbors():
            if k == dead_switch_name:
                core.callDelayed(0.025, handle_StartPushback, self, pb_id,
                                 dead_switch_name, failed_prefix, failed_lvl,
                                 stack)
Exemplo n.º 8
0
 def clean_leases(self):
     for k in self.ip_mapping:
         if self.ip_mapping[k].lease_end == 0:
             continue
         if self.ip_mapping[k].lease_end <= time.time() + 0.2 * MAX_ROUTABLE_LEASE:
             self.ip_mapping[k].lease_end = 0
             self.insert_couchdb("del", self.ip_mapping[k].ip, self.ip_mapping[k].mac, None)
     core.callDelayed(60, self.clean_leases)
Exemplo n.º 9
0
def launch ():

  _trace_thread = threading.Thread(target=_trace_thread_proc)
  _trace_thread.daemon = True

  # Start it up a bit in the future so that it doesn't print all over
  # init messages.
  core.callDelayed(3, _trace_thread.start)
def launch ():

  _trace_thread = threading.Thread(target=_trace_thread_proc)
  _trace_thread.daemon = True

  # Start it up a bit in the future so that it doesn't print all over
  # init messages.
  core.callDelayed(3, _trace_thread.start)
Exemplo n.º 11
0
 def _handle_arp_table_ArpEntryAddedEvent(self, event):
   # Send any queued packets destined for the added address.
   ip = str(event.ip)
   packet_queue = self._pending_packets.get(ip)
   if packet_queue and len(packet_queue) > 0:
     NatRouter.logger.info('Scheduling queued packet processing for IP {}'.format(ip))
     core.callDelayed(0, self._send_queued_packets, ip)
     NatRouter.logger.info('Queued packet processing for IP {} scheduled.'.format(ip))
 def open_later(self):
     self.reconnect_delay *= 2
     self.reconnect_delay = int(self.reconnect_delay)
     if self.reconnect_delay > self.max_retry_delay:
         self.reconnect_delay = self.max_retry_delay
     self.kw['reconnect_delay'] = self.reconnect_delay
     self._debug("Try again in %s seconds", self.reconnect_delay)
     from pox.core import core
     core.callDelayed(self.reconnect_delay, self.begin, **self.kw)
 def open_later (self):
   self.reconnect_delay *= 2
   self.reconnect_delay = int(self.reconnect_delay)
   if self.reconnect_delay > self.max_retry_delay:
     self.reconnect_delay = self.max_retry_delay
   self.kw['reconnect_delay'] = self.reconnect_delay
   self._debug("Try again in %s seconds", self.reconnect_delay)
   from pox.core import core
   core.callDelayed(self.reconnect_delay, self.begin, **self.kw)
Exemplo n.º 14
0
  def _handle_ConnectionUp (self, event):
    try:
      str_node = core.Outband.t.dpid(event.dpid).name
    except AttributeError:
      log.warn('Unknown dpid: %s' % dpid_to_str(event.dpid))
      return
    log.debug("ConnectionUp: %s %s" % (event.connection, str_node))

    core.callDelayed(4, self._reset_flowtable, event.dpid)
    return
Exemplo n.º 15
0
    def _handle_ConnectionUp(self, event):
        try:
            str_node = core.Outband.t.dpid(event.dpid).name
        except AttributeError:
            log.warn('Unknown dpid: %s' % dpid_to_str(event.dpid))
            return
        log.debug("ConnectionUp: %s %s" % (event.connection, str_node))

        core.callDelayed(4, self._reset_flowtable, event.dpid)
        return
Exemplo n.º 16
0
 def check_device_status(self):
     remote_stream = ChangesStream(self.selected_db, heartbeat=True, since=self.last_seq, filter='homework-remote/devices_pox')
     for change in remote_stream:
         self.last_seq =change['seq']
         the_id = change['id']
         the_rev = change['changes'][0]['rev']
         current_doc = self.selected_db.open_doc(the_id, rev=the_rev)
         device = {'mac': EthAddr(current_doc['mac_address']), 'action': current_doc['action']}
         devices = [device]
         self.raiseEvent(DeviceStateChange(devices))
     core.callDelayed(1, self.check_device_status)
Exemplo n.º 17
0
    def _handle_timer(self, dpid):
        sw = self.switches.get(dpid)
        if sw is None:
            return

        # send stat request
        body = of.ofp_port_stats_request()
        msg = of.ofp_stats_request(body=body)
        core.openflow.sendToDPID(dpid, msg.pack())

        core.callDelayed(self.poll_period, self._handle_timer, dpid)
Exemplo n.º 18
0
  def _handle_timer (self, dpid):
    sw = self.switches.get(dpid)
    if sw is None:
      return

    # send stat request
    body = of.ofp_port_stats_request()
    body.port_no = of.OFPP_NONE  # request all port statics
    msg = of.ofp_stats_request(body=body)
    core.openflow.sendToDPID(dpid, msg.pack())

    core.callDelayed(self.poll_period, self._handle_timer, dpid)
Exemplo n.º 19
0
    def _handle_timer(self, dpid):
        sw = self.switches.get(dpid)
        if sw is None:
            return

        # send stat request
        body = of.ofp_flow_stats_request()
        ## DAMN! stats can't be requested based on priority.
        #body.priority = of.OFP_DEFAULT_PRIORITY + 1
        msg = of.ofp_stats_request(body=body)
        core.openflow.sendToDPID(dpid, msg.pack())

        core.callDelayed(self.poll_period, self._handle_timer, dpid)
Exemplo n.º 20
0
  def _handle_timer (self, dpid):
    sw = self.switches.get(dpid)
    if sw is None:
      return

    # send stat request
    body = of.ofp_flow_stats_request()
    ## DAMN! stats can't be requested based on priority.
    #body.priority = of.OFP_DEFAULT_PRIORITY + 1
    msg = of.ofp_stats_request(body=body)
    core.openflow.sendToDPID(dpid, msg.pack())

    core.callDelayed(self.poll_period, self._handle_timer, dpid)
    def begin(cls, **kw):
        #if len(args) >= 4:
        #  reconnect_delay = args[3]
        #else:
        reconnect_delay = kw.get('reconnect_delay', cls._default_retry_delay)

        try:
            w = cls(**kw)
            return w
        except:
            raise
            core.callDelayed(reconnect_delay, cls.begin, **kw)
            return None
Exemplo n.º 22
0
def launch():
    """
    core is able to register all the pox components
    core.openflow is the pox openflow component
    _handle_ConnectionUp subscribe to core.openflow's ConnectionUp event

    each Timer instance add a non-default parameter to show Timer
    capabilities.

    """

    TIME_TO_WAKE = 2
    args = ["ciao", "mare"]
    core.callDelayed(TIME_TO_WAKE, timeout_handler, args)

    t = Timer(
            TIME_TO_WAKE,
            timeout_handler,
            args='t1')

    t2 = Timer(
            TIME_TO_WAKE,
            timeout_handler,
            absoluteTime=True,  # use False
            args='t2')

    tr = Timer(
            TIME_TO_WAKE,
            timeout_handler,
            absoluteTime=False,
            recurring=True,  # recur infinitely overtime
            args='tr')

    tw = Timer(
            TIME_TO_WAKE,
            timeout_handler,
            absoluteTime=False,
            recurring=False,
            started=False,  # not start automatically
            args='tw')
    time.sleep(TIME_TO_WAKE)
    tw.start()

    tk = Timer(
            TIME_TO_WAKE,
            timeout_handler_kill,  # handler able to cancel the timer
            absoluteTime=False,
            recurring=True,
            started=False,
            selfStoppable=False,  # timer can be cancelled by the handler
            args='tw')
  def begin (cls, **kw):
    #if len(args) >= 4:
    #  reconnect_delay = args[3]
    #else:
    reconnect_delay = kw.get('reconnect_delay',
        cls._default_retry_delay)

    try:
      w = cls(**kw)
      return w
    except:
      raise
      core.callDelayed(reconnect_delay, cls.begin, **kw)
      return None
Exemplo n.º 24
0
def launch(help=False, graph=False, wifi=False):
    if help:
        print """usage: pox.py openflow.discovery samples.pretty_log dtsa_smart [--help] [--graph] [--wifi]
               \nPOX DTSA-SMART controller with QoS support
               \noptional arguments:\
               \n  --help            show this help message and exit\
               \n  --graph           show network's topology graph\
               \n  --wifi            run controller in Wifi compatible mode"""
        sys.exit(1)
    core.registerNew(DTSA, wifi)
    core.callDelayed(13, addQueues)
    if graph:
        hilo = threading.Thread(target=drawGraph)
        hilo.start()
Exemplo n.º 25
0
    def __init__(self):
        cp = ConfigParser.ConfigParser()
        path = "%s/couchdb.conf" % (expanduser('~'))
        cp.read(path)
        couchdb_server = cp.get('DEFAULT', 'SERVER_NAME')
        couchdb_port = cp.get('DEFAULT', 'PORT')
        couchdb_db = cp.get('DEFAULT', 'DB')
        couchdb_admin = cp.get('DEFAULT', 'ADMIN')
        couchdb_admin_password = cp.get('DEFAULT', 'ADMIN_PASSWORD')
        couchdb_url = "http://%s:%s@%s:%s" % (couchdb_admin, couchdb_admin_password, couchdb_server, couchdb_port)

        self.server = Server(couchdb_url)
        self.selected_db = self.server[couchdb_db]
        self.last_seq = self.selected_db.info()['update_seq']
        core.callDelayed(1, self.check_device_status)
Exemplo n.º 26
0
def launch():
    """
    core is able to register all the pox components
    core.openflow is the pox openflow component
    _handle_ConnectionUp subscribe to core.openflow's ConnectionUp event

    each Timer instance add a non-default parameter to show Timer
    capabilities.

    """

    TIME_TO_WAKE = 2
    args = ["ciao", "mare"]
    core.callDelayed(TIME_TO_WAKE, timeout_handler, args)

    t = Timer(TIME_TO_WAKE, timeout_handler, args='t1')

    t2 = Timer(
        TIME_TO_WAKE,
        timeout_handler,
        absoluteTime=True,  # use False
        args='t2')

    tr = Timer(
        TIME_TO_WAKE,
        timeout_handler,
        absoluteTime=False,
        recurring=True,  # recur infinitely overtime
        args='tr')

    tw = Timer(
        TIME_TO_WAKE,
        timeout_handler,
        absoluteTime=False,
        recurring=False,
        started=False,  # not start automatically
        args='tw')
    time.sleep(TIME_TO_WAKE)
    tw.start()

    tk = Timer(
        TIME_TO_WAKE,
        timeout_handler_kill,  # handler able to cancel the timer
        absoluteTime=False,
        recurring=True,
        started=False,
        selfStoppable=False,  # timer can be cancelled by the handler
        args='tw')
    def cleanupConnections(self):
        '''
        periodically cleanup connections that have timed out
        '''
        log.debug("running cleanup connections!!")
        remove = []
        for port, con in self.natmap:
            if con.state != ESTABLISHED:
                if con.last_time + TCP_TRANSITORY_TIMEOUT < time.time():
                    log.debug("removing connection, transitory timeout")
                    remove.append(con)

        for con in remove:
            log.debug("really removing the connection!!")
            self.natmap.remove(con)
        core.callDelayed(10, self.cleanupConnections)
Exemplo n.º 28
0
    def _handle_ConnectionUp(self, event):
        msg = of.ofp_flow_mod()

        # Default rule: drop all packets destined for private (protected) IPv4 addresses, as identified by prefix
        msg.match.priority = of.OFP_DEFAULT_PRIORITY + 1
        msg.match.dl_type = 0x0800 # EtherType = IP
        msg.match.nw_dst = "10.250.250.0/24"
        msg.actions.append(of.ofp_action_output(port=of.OFPP_NONE))

        event.connection.send(msg)
        log.info("Default private IP drop rule installed on {}".format(event.dpid))
        log.debug(msg.match.show())

        # Wait until both switches are connected before adding rules
        if len(core.openflow.connections) == 2:
            log.info("Adding virtual:private IP rules in 5 seconds...")
            core.callDelayed(5, self.add_test_rules)
Exemplo n.º 29
0
def call_delayed_as_coop_task(func, delay=0, *args, **kwargs):
    """
  Schedule a coop microtask with a given time.

  Use POX core logic directly.

  :param delay: delay of time
  :type delay: int
  :param func: function need to run
  :type func: func
  :param args: nameless arguments
  :type args: tuple
  :param kwargs: named arguments
  :type kwargs: dict
  :return: None
  """
    from pox.core import core
    core.callDelayed(delay, func, *args, **kwargs)
Exemplo n.º 30
0
    def _handle_ConnectionUp(self, event):
        msg = of.ofp_flow_mod()

        # Default rule: drop all packets destined for private (protected) IPv4 addresses, as identified by prefix
        msg.match.priority = of.OFP_DEFAULT_PRIORITY + 1
        msg.match.dl_type = 0x0800  # EtherType = IP
        msg.match.nw_dst = "10.250.250.0/24"
        msg.actions.append(of.ofp_action_output(port=of.OFPP_NONE))

        event.connection.send(msg)
        log.info("Default private IP drop rule installed on {}".format(
            event.dpid))
        log.debug(msg.match.show())

        # Wait until both switches are connected before adding rules
        if len(core.openflow.connections) == 2:
            log.info("Adding virtual:private IP rules in 5 seconds...")
            core.callDelayed(5, self.add_test_rules)
    def __init__(self, connection, firewall):
        # add the nat to this switch
        self.connection = connection
        self.listenTo(connection)
        self.eth2_ip = IPAddr('172.64.3.1')
        self.eth1_ip = IPAddr('10.0.1.1')

        self.outmac = connection.ports[4].hw_addr

        self.arp_table = {} # ip to mac,port
        self.natmap = natmap()

        self.firewall = firewall
        
        #todo...
        self.send_arp(IPAddr('172.64.3.21'))
        self.send_arp(IPAddr('172.64.3.22'))

        core.callDelayed(10, self.cleanupConnections)
def launch(help=False, queues=False, aggr=False, graph=False, wifi=False):
    if help:
        print """usage: pox.py openflow.discovery samples.pretty_log dtsa_smart [--help] [--graph] [--wifi]
               \nPOX DTSA-SMART controller with QoS support
               \noptional arguments:\
               \n  --help            show this help message and exit\
               \n  --queues          automatically manage queues configuration\
               \n  --aggr            workspace's aggregation'\
               \n  --graph           show network's topology graph\
               \n  --wifi            run controller in Wifi compatible mode"""
        sys.exit(1)
    core.registerNew(DTSA, wifi, aggr)
    if queues:
        core.callDelayed(20, addQueues)
    if aggr:
        core.callDelayed(14, startWorkspaceAggregation)
    if graph:
        hilo = threading.Thread(target=drawGraph)
        hilo.start()
Exemplo n.º 33
0
def launch(help=False, queues=False, aggr=False, graph=False, wifi=False):
    if help:
        print """usage: pox.py openflow.discovery samples.pretty_log dtsa_smart [--help] [--graph] [--wifi]
               \nPOX DTSA-SMART controller with QoS support
               \noptional arguments:\
               \n  --help            show this help message and exit\
               \n  --queues          automatically manage queues configuration\
               \n  --aggr            workspace's aggregation'\
               \n  --graph           show network's topology graph\
               \n  --wifi            run controller in Wifi compatible mode"""
        sys.exit(1)
    core.registerNew(DTSA, wifi, aggr)
    if queues:
        core.callDelayed(20, addQueues)
    if aggr:
        core.callDelayed(14, startWorkspaceAggregation)
    if graph:
        hilo = threading.Thread(target=drawGraph)
        hilo.start()
  def __init__ (self,connection):
    # Switch we'll be adding L2 learning switch capabilities to
    self.connection= connection
    self.listenTo(connection)
    self.mac = {}
    
    self.data_ip = SERVICE_IP_ADDR
    self.hosts = HOST_IPS

    self.ip_port = {}
    self._all = False
    self.mymac = dpid_to_mac(self.connection.dpid)
    
    # seed the random number generator
    seed()

    print self.mymac

    # send out ARP requests for all the servers
    core.callDelayed(1, self._send_arps)
Exemplo n.º 35
0
    def __init__(self, path):
        self.path = path
        self.filelist = None
        self.counter = 0
        self.filenum = 0
        self.cmdlist = [
            "disconnect", "wait", "reconnect", "pass", "monitor", "reset",
            "redirect", "unredirect", "passit"
        ]
        self.handlers = handlers()
        self.funclist = None
        self.sig_table = {
            "BAD-TRAFFIC same SRC/DST": "1",
            "ICMP Time-To-Live Exceeded in Transit": "2",
            "ICMP Echo Reply": "3",
            "ICMP PING BSDtype": "4",
            "ICMP PING *NIX": "5",
            "ICMP PING": "6",
            "SNMP AgentX/tcp request": "7",
            "SNMP request tcp": "8"
        }
        self.func_table = {}
        self.alys_cmd()
        self.action_triggered = False

        self.name_process()

        self.mactable = {}
        self.iptable = {}
        self.droplist = {}
        self.monitorlist = {}
        self.redirectlist = {}

        self.ignorelist = []

        self.socket_map = {}
        self.server = secure_server(self.socket_map)
        core.Reminder.addListeners(self)
        core.addListener(pox.core.GoingUpEvent, self.start_server)
        core.call_when_ready(self.start, ["openflow_discovery", "NX"])
        core.callDelayed(1, self.start_watch)
Exemplo n.º 36
0
 def check_device_status(self):
     remote_stream = ChangesStream(self.selected_db, heartbeat=True, since=self.last_seq, filter='homework-remote/devices_pox')
     for change in remote_stream:
         self.last_seq = change['seq']
         the_id = change['id']
         the_rev = change['changes'][0]['rev']
         doc = self.selected_db.open_doc(the_id, rev=the_rev)
         prompt = True if (doc['state'] == 'pending') else False
         doc_arr = [{'doc_id': the_id, 'doc_rev': the_rev, 'doc_collection': 'devices', 'action': 'edit'}]
         strings = self.get_history_strings(doc['device_name'],
                                            doc['action'])
         timestamp = doc['event_timestamp'] if 'event_timestamp' in doc else None
         add_history_item(strings['title'], strings['desc'],
                          docs=doc_arr,
                          undoable=True,
                          prompt=prompt,
                          ts=timestamp)
         device = {'mac': EthAddr(doc['mac_address']),
                   'action': doc['action']}
         devices = [device]
         self.raiseEvent(DeviceStateChange(devices))
     core.callDelayed(1, self.check_device_status)
Exemplo n.º 37
0
    def disconnect(self, msg='disconnected', defer_event=False):
        """
    disconnect this Connection (usually not invoked manually).
    """
        if self.disconnected:
            self.msg("already disconnected")
        if self.dpid is None:
            # If we never got a DPID, log later (coalesce the messages)
            Connection._aborted_connections += 1
            if Connection._aborted_connections == 1:
                core.callDelayed(20, self._do_abort_message)
        else:
            self.info(msg)
        self.disconnected = True
        try:
            self.ofnexus._disconnect(self.dpid)
        except:
            pass
        if self.dpid is not None:
            if not self.disconnection_raised and not defer_event:
                self.disconnection_raised = True
                self.ofnexus.raiseEventNoErrors(ConnectionDown, self)
                self.raiseEventNoErrors(ConnectionDown, self)

        try:
            #deferredSender.kill(self)
            pass
        except:
            pass
        try:
            self.sock.shutdown(socket.SHUT_RDWR)
        except:
            pass
        try:
            pass
            #TODO disconnect notification
        except:
            pass
Exemplo n.º 38
0
  def disconnect (self, msg = 'disconnected', defer_event = False):
    """
    disconnect this Connection (usually not invoked manually).
    """
    if self.disconnected:
      self.msg("already disconnected")
    if self.dpid is None:
      # If we never got a DPID, log later (coalesce the messages)
      Connection._aborted_connections += 1
      if Connection._aborted_connections == 1:
        core.callDelayed(20, self._do_abort_message)
    else:
      self.info(msg)
    self.disconnected = True
    try:
      self.ofnexus._disconnect(self.dpid)
    except:
      pass
    if self.dpid is not None:
      if not self.disconnection_raised and not defer_event:
        self.disconnection_raised = True
        self.ofnexus.raiseEventNoErrors(ConnectionDown, self)
        self.raiseEventNoErrors(ConnectionDown, self)

    try:
      #deferredSender.kill(self)
      pass
    except:
      pass
    try:
      self.sock.shutdown(socket.SHUT_RDWR)
    except:
      pass
    try:
      pass
      #TODO disconnect notification
    except:
      pass
Exemplo n.º 39
0
    def connectionClosed(self, *args, **kwargs):
        """Called when it is ready to be removed.  Removes the connection."""
        if len(args) == 0:
            return

        conn = args[0]
        if not conn:
            return

        socPair = conn.get_socket_pair()
        socPair = socPair[::-1]
        key = self.agent.socPairInt(socPair)

        try:
            conn = self.connections[key]
            core.callDelayed(1, self.delConnection, key)

            if not conn.closed:
                conn.close()

        except KeyError:
            log.warn("Tried to remove connection which is not in our dictionary: %s" % str(key))
            pass
Exemplo n.º 40
0
    def __init__(self, path):
        self.path = path
        self.filelist=None
        self.counter=0
        self.filenum=0
        self.cmdlist = ["disconnect", "wait", "reconnect", "pass", "monitor", "reset", "redirect", "unredirect", "passit"]
	self.handlers = handlers()
        self.funclist = None
        self.sig_table= {"BAD-TRAFFIC same SRC/DST":"1",
                "ICMP Time-To-Live Exceeded in Transit":"2",
                "ICMP Echo Reply":"3",
                "ICMP PING BSDtype":"4",
                "ICMP PING *NIX":"5",
                "ICMP PING":"6",
                "SNMP AgentX/tcp request":"7",
                "SNMP request tcp":"8"}
        self.func_table={}
        self.alys_cmd()
        self.action_triggered = False 
        
        self.name_process()

        self.mactable = {}
        self.iptable = {}
        self.droplist = {}
        self.monitorlist = {}
        self.redirectlist = {}
        
        self.ignorelist = []
        
        self.socket_map = {}
        self.server = secure_server(self.socket_map)
        core.Reminder.addListeners(self)
        core.addListener(pox.core.GoingUpEvent, self.start_server)
        core.call_when_ready(self.start, ["openflow_discovery", "NX"])
        core.callDelayed(1, self.start_watch)
Exemplo n.º 41
0
 def __init__(self, key, action, expiration=0):
     self.key = key
     self.action = action
     if not expiration and not key is None:
         self.expiration = self.key.expiration()
     else:
         self.expiration = expiration
     removed = []
     with Connection.lock:
         now = time.time()
         if now > Connection.lastPurge + Connection.PURGE_PERIOD:
             while len(Connection.s) > 0:
                 k, c = Connection.s.peekitem()
                 if not c.expiration < now:
                     break
                 k, c = Connection.s.popitem()
                 removed.append(k)
             Connection.lastPurge = now
         nextPurge = Connection.lastPurge + Connection.PURGE_PERIOD - now
         core.callDelayed(nextPurge, Connection.purge)
         if not key is None and self.expiration > now:
             Connection.s[key] = self
     for k in removed:
         clearFlows(k)
Exemplo n.º 42
0
    def processTcpPkt(self, packet, tcpPkt):
        ethPkt = packet
        ipPkt = ethPkt.find("ipv4")

        if tcpPkt.payload_len > 0 and not (ipPkt.iplen == tcpPkt.hdr_len + len(ipPkt.hdr(""))):
            self.add_segment(TcpSegment(tcpPkt.seq, tcpPkt.next))

        if self.agent.isFin(tcpPkt):
            if not self.closed:
                self.fin_received(tcpPkt.seq)

        # remember window and latest ACK
        self.window = max(1460, tcpPkt.win)  # ignore requests to shrink the window below an MTU

        if not self.agent.isAck(tcpPkt):
            return

        if not self.my_first_syn_acked:
            self.my_first_syn_acked = True
            self.my_syn_acked = True
            self.need_to_send_ack = True
            self.first_unacked_seq = tcpPkt.ack
            self.next_seq_needed = tcpPkt.seq + 1

        if self.agent.isFin(tcpPkt) and self.closed:
            # it means we already sent a fin, ack and we just received a fin, ack
            self.need_to_send_ack = True
            self.last_seq_sent += 1
            self.next_seq_needed += 1
            self.set_ack(tcpPkt.ack)

        else:
            if self.my_first_syn_acked and not self.connected:
                self.connected = True
                core.callDelayed(0.01, self.connectionEstablished)
            self.set_ack(tcpPkt.ack)
Exemplo n.º 43
0
  def _handle_PacketIn (self, event):
    dpid = event.connection.dpid
    packet = event.parsed

    if not packet.parsed:
      log.warn("%s %i ignoring unparsed packet", dpid_to_str(dpid), inport.num)
      return

    if packet.find('lldp'):
      return

    ip = packet.find('ipv4')
    if ip and packet.find('tcp'):
      node_str = core.Outband.t.dpid(dpid).name
      src_str  = core.Outband.t.ip(ip.srcip).name
      dst_str  = core.Outband.t.ip(ip.dstip).name

      #log.info('%s.%s.%s', node_str, src_str, dst_str)
      route = None
      for i,r in enumerate(core.Outband.routes):
        #log.info('r-%s', r)
        if r[0] == src_str and r[1] == node_str and r[-1] == dst_str:
          route = r
          break
      if route:
        if not ('permanent' in core.Outband.route_properties[i]):
          del(core.Outband.routes[i])
        log.info('r-%s', route)

        if self.first:
          start = 10
          duration = 1
          core.callDelayed(start, self._install_port_down, 1, 1, 1, duration=0)
          core.callDelayed(start, self._install_port_down, 2, 3, 1, duration=0)
          core.callDelayed(start+duration, self._install_port_down, 1, 1, 0, duration=0)
          core.callDelayed(start+duration, self._install_port_down, 2, 3, 0, duration=0)
          self.first = False

        hops = zip(route[0:],route[1:],route[2:])
        hops.reverse()
        for p,c,n in hops:
          self._install_flow(event, p, c, n)
        return

    super( PredefinedRouting, self )._handle_PacketIn(event)
 def open_later(self):
     core.callDelayed(self.reconnect_delay, self.begin, **self.kw)
Exemplo n.º 45
0
def _do_sleep(self):
r = self._do_remove()
if r == False:
core.callDelayed(12, self._do_sleep)
else:
s = min(attackswitch, key=attackswitch.get)
sleep_time = 12 - int(time.time() - attackswitch[s])
core.callDelayed(sleep_time, self._do_sleep)
class Switch (EventMixin):
entDic = {} #Table for the IP address and its occurrence
all_ip = {}
dstEnt = [] #List of entropies
count1 = 0
start_time = 0
end_time = 0
ftimer = 0
count3 = 0
max_path = []
Entth = 1
Entc = 0
@staticmethod
def cleaning_sent_sw ():
del sent_sw[:]
print "deleting sent_sw"
def statcolect(self, path_stat, element, element_src):
global my_counter
# my-counter : counts the number of packets. We collect 50 packets
global ipList
#my_start is used as the starting point for the timer.
global my_start
global entropy_counter
global frate_th
global frate2
global Entc2
print "Packet Counter:", my_counter
#This function collects IP statistics
ipList.append(element)
#Increment until we reach 50
if my_counter == 0:
#we need to calculate the time it takes o collect 50 packets so we could use in calculating the rate
self.start_time = time.time()
my_start = self.start_time
print "start time" ,my_start
my_counter +=1
#keep the path statistics so that we could find the switches in the attack path when an attack is suspected
if element in self.all_ip:
self.all_ip[element].append(path_stat)
else:
self.all_ip[element]= (path_stat)
if my_counter == 50:
self.end_time = time.time()
self.ftimer = self.end_time - my_start
print "we reach 50 and our start_time %s end_time %s and timer is %s" % (str(my_start), str(self.end_time), str(self.ftimer))
self.start_time = 0
self.entDic = {}
for i in ipList:
if i not in self.entDic:
self.entDic[i] =0
self.entDic[i] +=1
#print the hash table and clear all
print self.entDic
max_ip = max(self.entDic, key=self.entDic.get)
print "max seen ip=", max_ip
self.max_path = self.all_ip[max_ip]
#call the entropy function
self.Entc = self.entropy(self.entDic)
print "Entc", self.Entc
print "Entth", self.Entth
#using math.floor to compare the integer part of the entropies
if math.floor(self.Entc) >= math.floor(self.Entth):
frate = 50 / self.ftimer
#frate2 is used to pass the receiving rate. frate is reset before being passed so a new variable is defined to pass the value
frate2 = frate
Entc2 = self.Entc
print "frate2 is updated:",frate2
if frate <= frate_th:
print "Be happy frate<=frate_th frate= ",frate
print "frate_th=",frate_th
self.Entth Entc = self.
print "Entth is updated to Entth=",self.Entth
if frate >= 20:
self.frate_th = frate
print "frate_th is updated to",frate_th
frate = 0
entropy_counter = 0
print "entropy_counter is reset",entropy_counter
self.count1 = 0
self.ftimer = 0
else:
self.count1 +=1
print "frate=", frate
print "frate_th=",frate_th
print "count1=",self.count1
#count1 is used to detect attacks using the receiving rate of new flows. when count1 is 5 we suspect an attack.
if self.count1 == 5:
self.max_path = self.all_ip[max_ip]
#eliminating duplicate paths
self.max_path = sorted(self.max_path)
dedup = [self.max_path[i] for i in
range(len(self.max_path)) if i == 0 or self.max_path[i] !=
self.max_path[i-1]]
print "we suspect an attack because count1=5 so we will go to test_flow_stat3"
print ""
dtm = datetime.datetime.now()
msg = "we suspect an attack because counter1=5, we will query switches" + " Time:" + str(dtm) with
open('/home/mininet/pox/pox/forwarding/logfile.txt','a+') as
flog:
flog.write('\n')
flog.write(msg)
flog.write('\n')
#although the duplicate paths are eliminated but still a list of individual switches apear in the list. Since these switches will be also in the switch path list we will not consider them and will only look a the list type members of our sitch path list.
for raha in dedup:
if type(raha) == type(list()):
dtm = datetime.datetime.now()
msg= "The switches suspected of being in the
Attack path are:" +str(raha) +" Time:" + str(dtm)
with
open('/home/mininet/pox/pox/forwarding/logfile.txt','a+') as
flog:
flog.write(msg)
flog.write('\n')
print "The switches suspected of being in the Attack path are:", raha print ""
for raha in dedup:
if type(raha) == type(list()):
self.flow_stat(raha) 
#calling the flow_stat function that will send request to all the switches in the Raha list to send their flow tables to the controller
self.count1 = 0
self.ftimer = 0
frate = 0
else:
self.ftimer = 0
self.frate = 0
else:
self.count1 = 0
self.ftimer = 0
frate = 0
entropy_counter +=1
print "count3=",entropy_counter
#The entropy changes continue for 5 times so we suspect an attack.
if entropy_counter == 5:
self.max_path = self.all_ip[max_ip]
self.max_path = sorted(self.max_path)
dedup = [self.max_path[i] for i in
range(len(self.max_path)) if i == 0 or self.max_path[i] !=
self.max_path[i-1]] #deleting the duplicate paths
dtm = datetime.datetime.now()
msg = "we suspect an attack because entropy_counter=5, we will query switches" + " Time:" + str(dtm)
with
open('/home/mininet/pox/pox/forwarding/logfile.txt','a+') as flog:
flog.write('\n')
flog.write(msg)
flog.write('\n')
print "we suspect an attack because entropy_counter=5 so we will go to test_flow_stat3"
print ""
for raha in dedup:
if type(raha) == type(list()):
dtm = datetime.datetime.now()
msg= "The switches suspected of being in the Attack path are:" +str(raha) +" Time:" + str(dtm)
with
open('/home/mininet/pox/pox/forwarding/logfile.txt','a+') as flog:
flog.write(msg)
flog.write('\n')
print "The switches suspected of being in the Attack path are:",raha
print""
for raha in dedup:
if type(raha) == type(list()):
self.flow_stat(raha)
self.count1 = 0
entropy_counter = 0
self.ftimer = 0
frate = 0
self.entDic = {}
ipList = []
#l =0
my_counter = 0
def entropy (self, lists):
#this function computes entropy
#l = 50
elist = []
print lists.values()
print sum(lists.values())
for p in lists.values():
print p
c = float(p)/50
print "c=",c
elist.append(-c * math.log(c, 2))
Ec = sum(elist)
print 'Entropy = ',sum(elist)
self.dstEnt.append(sum(elist))
print len(self.dstEnt)
return Ec
# handler for timer function that sends the requests to the switches in the attack path that a request is not sent to them in the last 10 seconds.
def _timer_func (self, attack_p):
sent_connection = 0
for connection in core.openflow._connections.values():
for item in attack_p:
if dpidToStr(connection.dpid) == str(item[0]):
if dpidToStr(connection.dpid) not in sent_sw:
print"sending flow request to switch", dpidToStr(connection.dpid) dtm = datetime.datetime.now() msg= "Sending flow request to:" +dpidToStr(connection.dpid) + " Time:" + str(dtm)
with
open('/home/mininet/pox/pox/forwarding/logfile.txt','a+') as flog:
flog.write(msg)
flog.write('\n')
connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))
sent_connection +=1
sent_sw.append(dpidToStr(connection.dpid))#the sent_sw is the list used to prevent sending duplicate request for statistics to same switch. This list is cleared every 10 sec.
log.info("Sent %i flow stats request(s)", sent_connection)
dtm = datetime.datetime.now()
msg= "Sent Switches list:" +str(sent_sw) +" Time:" + str(dtm)
with open('/home/mininet/pox/pox/forwarding/logfile.txt','a+') as flog:
flog.write(msg)
flog.write('\n')
print "sent switches",sent_sw
#function used to analyze the flow tables received from switches. Having too many short flows, flows with small number of bytes or packets are considered as signs of attack.
def _handle_flowstats_received (self, event):
global frate2
global Entc2
global frate_th
stats = flow_stats_to_list(event.stats)
log.info("FlowStatsReceived from %s",dpidToStr(event.connection.dpid))
flowlist = []
for flow in event.stats:
flowlist.append({
"table_id": flow.table_id,
"duration_sec": flow.duration_sec,
"duration_nsec": flow.duration_nsec,
"idle_timeout": flow.idle_timeout,
"hard_timeout": flow.hard_timeout,
"packet_count": flow.packet_count,
"byte_count": flow.byte_count,
})
# print flowlist
count_flow = 1
count_3 = 0
for f in event.stats:
count_2 = 0
count_flow +=1
if f.byte_count <20:
count_2 +=1
if f.packet_count <4:
count_2 +=1
if ((f.duration_sec*pow(10,9)) + f.duration_nsec)
<9999999999:
count_2 +=1
if count_2 >=2:
count_3 +=1
rate = (float(count_3)/count_flow) * 100
log.info("on switch %s: we have count_3 %s count_flow %s with a rate of %s percent",
dpidToStr(event.connection.dpid), count_3, count_flow, rate)
if rate>87:
dtm = datetime.datetime.now()
print "WE HAVE AN ATTACK!!!"
msg = "There is an attack at switch :" + dpidToStr(event.connection.dpid) + "with rate of:" + str(rate) + " Time: " + str(dtm) attackswitch[dpidToStr(event.connection.dpid)] = time.time()
#sub = "Attack!!!"
#m = Mail(msg, sub)
#m.send_email()
with
open('/home/mininet/pox/pox/forwarding/logfile.txt','a+') as flog:
flog.write(msg)
flog.write('\n')
frate_th = 20 
Entth = 1# Since we have an attack the system is in alert status and so the threshold values are reset.
print "frate_th is updated to:",frate_th
else:
self.Entth = Entc2
print "we didnt have an attack on switch %s so the Entth is updated=",self.Entth
print "frate2",frate2
frate_th = frate2
print "frate_th is updated to",frate_th 
dtm = datetime.datetime.now()
msg= "We didn't have an attack on switch" + dpidToStr(event.connection.dpid) + "rate=" +str(rate) + "and the new entth=" +str(self.Entth) + "New frate_th=" +str(frate_th) + "Time:" + str(dtm)
with
open('/home/mininet/pox/pox/forwarding/logfile.txt','a+') as
flog:
flog.write(msg)
flog.write('\n')
# main functiont to launch the module
def flow_stat (self, attack):
from pox.lib.recoco import Timer
self._timer_func(attack)
def __init__ (self):
self.connection = None
self.ports = None
self.dpid = None
self._listeners = None
self._connected_at = None
def __repr__ (self):
return dpid_to_str(self.dpid)
def _install (self, switch, in_port, out_port, match, buf = None):
if len(attackswitch) == 0:
FLOW_IDLE_TIMEOUT = 15
else:
FLOW_IDLE_TIMEOUT = 11
msg = of.ofp_flow_mod()
msg.match = match
msg.match.in_port = in_port
msg.idle_timeout = FLOW_IDLE_TIMEOUT
msg.hard_timeout = FLOW_HARD_TIMEOUT
msg.actions.append(of.ofp_action_output(port = out_port))
msg.buffer_id = buf
switch.connection.send(msg)
def _install_path (self, p, match, packet_in=None):
wp = WaitingPath(p, packet_in)
for sw,in_port,out_port in p:
self._install(sw, in_port, out_port, match)
msg = of.ofp_barrier_request()
sw.connection.send(msg)
wp.add_xid(sw.dpid,msg.xid)
def install_path (self, dst_sw, last_port, match, event):
"""
Attempts to install a path between this switch and some
destination
"""
p = _get_path(self, dst_sw, event.port, last_port)
if p is None:
log.warning("Can't get from %s to %s", match.dl_src, match.dl_dst)
import pox.lib.packet as pkt
if (match.dl_type == pkt.ethernet.IP_TYPE and event.parsed.find('ipv4')):
# It's IP -- let's send a destination unreachable
log.debug("Dest unreachable (%s -> %s)", match.dl_src, match.dl_dst)
from pox.lib.addresses import EthAddr
e = pkt.ethernet()
e.src = EthAddr(dpid_to_str(self.dpid)) #FIXME: Hmm...
e.dst = match.dl_src
e.type = e.IP_TYPE
ipp = pkt.ipv4()
ipp.protocol = ipp.ICMP_PROTOCOL
ipp.srcip = match.nw_dst #FIXME: Ridiculous
ipp.dstip = match.nw_src
icmp = pkt.icmp()
icmp.type = pkt.ICMP.TYPE_DEST_UNREACH
icmp.code = pkt.ICMP.CODE_UNREACH_HOST
orig_ip = event.parsed.find('ipv4')
d = orig_ip.pack()
d = d[:orig_ip.hl * 4 + 8]
import struct
d = struct.pack("!HH", 0,0) + d #FIXME: MTU
icmp.payload = d
ipp.payload = icmp
e.payload = ipp
msg = of.ofp_packet_out()
msg.actions.append(of.ofp_action_output(port =
event.port))
msg.data = e.pack()
self.connection.send(msg)
return
log.debug("Installing path for %s -> %s %04x (%i hops)",
match.dl_src, match.dl_dst, match.dl_type, len(p))
print "maryam dest ip is" , match.nw_dst
#calling the statcolect function when a new flow is to be installed. This function collects statisctics to monitor the network behavior to detect DDOS attacks.
send_path = p
tuple(send_path)
self.statcolect(send_path, match.nw_dst, match.nw_src)
# We have a path -- install it
self._install_path(p, match, event.ofp)
# Now reverse it and install it backwards
# (we'll just assume that will work)
p = [(sw,out_port,in_port) for sw,in_port,out_port in p]
self._install_path(p, match.flip())
def _handle_PacketIn (self, event):
def flood ():
""" Floods the packet """
if self.is_holding_down:
log.warning("Not flooding -- holddown active")
msg = of.ofp_packet_out()
# OFPP_FLOOD is optional; some switches may need OFPP_ALL
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
self.connection.send(msg)
def drop ():
# Kill the buffer
if event.ofp.buffer_id is not None:
msg = of.ofp_packet_out()
msg.buffer_id = event.ofp.buffer_id
event.ofp.buffer_id = None # Mark is dead
msg.in_port = event.port
self.connection.send(msg)
packet = event.parsed
loc = (self, event.port) # Place we saw this ethaddr
oldloc = mac_map.get(packet.src) # Place we last saw this
ethaddr
if packet.effective_ethertype == packet.LLDP_TYPE:
drop()
return
if oldloc is None:
if packet.src.is_multicast == False:
mac_map[packet.src] = loc # Learn position for ethaddr
log.debug("Learned %s at %s.%i", packet.src, loc[0], loc[1])
elif oldloc != loc:
# ethaddr seen at different place!
if loc[1] not in adjacency[loc[0]].values():
# New place is another "plain" port (probably)
log.debug("%s moved from %s.%i to %s.%i?", packet.src, dpid_to_str(oldloc[0].connection.dpid), oldloc[1], dpid_to_str( loc[0].connection.dpid), loc[1])
if packet.src.is_multicast == False:
mac_map[packet.src] = loc # Learn position for ethaddr
log.debug("Learned %s at %s.%i", packet.src, loc[0], loc[1])
elif packet.dst.is_multicast == False:
# New place is a switch-to-switch port!
#TODO: This should be a flood. It'd be nice if we knew. We could
# check if the port is in the spanning tree if it's available.
# Or maybe we should flood more carefully?
log.warning("Packet from %s arrived at %s.%i without flow", packet.src, dpid_to_str(self.dpid), event.port)
#drop()
#return
if packet.dst.is_multicast:
log.debug("Flood multicast from %s", packet.src)
flood()
else:
if packet.dst not in mac_map:
log.debug("%s unknown -- flooding" % (packet.dst,))
flood()
else:
dest = mac_map[packet.dst]
match = of.ofp_match.from_packet(packet)
self.install_path(dest[0], dest[1], match, event)
def disconnect (self):
if self.connection is not None:
log.debug("Disconnect %s" % (self.connection,))
self.connection.removeListeners(self._listeners)
self.connection = None
self._listeners = None
def connect (self, connection):
if self.dpid is None:
self.dpid = connection.dpid
assert self.dpid == connection.dpid
if self.ports is None:
self.ports = connection.features.ports
self.disconnect()
log.debug("Connect %s" % (connection,))
self.connection = connection
self._listeners = self.listenTo(connection)
self._connected_at = time.time()
@property
def is_holding_down (self):
if self._connected_at is None: return True
if time.time() - self._connected_at > FLOOD_HOLDDOWN:
return False
return True
def _handle_ConnectionDown (self, event):
self.disconnect()
class l2_multi (EventMixin):
_eventMixin_events = set([
PathInstalled,
])
def __init__ (self):
# Listen to dependencies
def startup ():
core.openflow.addListeners(self, priority=0)
core.openflow_discovery.addListeners(self)
core.call_when_ready(startup,
('openflow','openflow_discovery'))
def _handle_LinkEvent (self, event):
def flip (link):
return Discovery.Link(link[2],link[3], link[0],link[1])
l = event.link
sw1 = switches[l.dpid1]
sw2 = switches[l.dpid2]
# Invalidate all flows and path info.
# For link adds, this makes sure that if a new link leads to an
# improved path, we use it.
# For link removals, this makes sure that we don't use a
# path that may have been broken.
#NOTE: This could be radically improved! (e.g., not *ALL* paths break)
clear = of.ofp_flow_mod(command=of.OFPFC_DELETE)
for sw in switches.itervalues():
if sw.connection is None: continue
sw.connection.send(clear)
path_map.clear()
if event.removed:
# This link no longer okay
if sw2 in adjacency[sw1]: del adjacency[sw1][sw2]
if sw1 in adjacency[sw2]: del adjacency[sw2][sw1]
# But maybe there's another way to connect these...
for ll in core.openflow_discovery.adjacency:
if ll.dpid1 == l.dpid1 and ll.dpid2 == l.dpid2:
if flip(ll) in core.openflow_discovery.adjacency:
# Yup, link goes both ways
adjacency[sw1][sw2] = ll.port1
adjacency[sw2][sw1] = ll.port2
# Fixed -- new link chosen to connect these
break
else:
# If we already consider these nodes connected, we can
# ignore this link up.
# Otherwise, we might be interested...
if adjacency[sw1][sw2] is None:
# These previously weren't connected. If the link
# exists in both directions, we consider them connected now.
if flip(l) in core.openflow_discovery.adjacency:
# Yup, link goes both ways -- connected!
adjacency[sw1][sw2] = l.port1
adjacency[sw2][sw1] = l.port2
# If we have learned a MAC on this port which we now know to
# be connected to a switch, unlearn it.
bad_macs = set()
for mac,(sw,port) in mac_map.iteritems():
#print sw,sw1,port,l.port1
if sw is sw1 and port == l.port1:
if mac not in bad_macs:
log.debug("Unlearned %s", mac)
bad_macs.add(mac)
if sw is sw2 and port == l.port2:
if mac not in bad_macs:
log.debug("Unlearned %s", mac)
bad_macs.add(mac)
for mac in bad_macs:
del mac_map[mac]
def _handle_ConnectionUp (self, event):
sw = switches.get(event.dpid)
if sw is None:
# New switch
sw = Switch()
switches[event.dpid] = sw
sw.connect(event.connection)
else:
sw.connect(event.connection)
def _handle_BarrierIn (self, event):
wp = waiting_paths.pop((event.dpid,event.xid), None)
if not wp:
#log.info("No waiting packet %s,%s", event.dpid, event.xid)
return
#log.debug("Notify waiting packet %s,%s", event.dpid,event.xid)
wp.notify(event)
def launch ():
core.registerNew(l2_multi)
core.registerNew(Cleanswitch)
core.Cleanswitch._do_sleep()
timeout = min(max(PATH_SETUP_TIME, 5) * 2, 15)
Timer(timeout, WaitingPath.expire_waiting_paths, recurring=True)
print "will go to execute the timer for sent_sw"
#we will call the cleaning_sent_sw function in the switch class to erase the list of the switches that have been already polled for statistics. As long as the switches are in the sent_sw list no statistics request will be sent to them.
Timer(10, Switch.cleaning_sent_sw, recurring=True)
core.openflow.addListenerByName("FlowStatsReceived",
Switch()._handle_flowstats_received)
Exemplo n.º 46
0
def _handle_PacketIn (event):

  global forward_rule_set
  global backward_rule_set
  global mac_port_dict
  global watermark_samples
  global protected_resources
  global tainted_hosts
  global watermark_count
  skip_add_to_dict_dest = 0
  skip_add_to_dict_src = 0
  mu_sigma_vals = [0,0]
  is_correlated = 0

  packet =event.parsed

  log.debug("packet in buffer_id check : " +str(event.ofp.buffer_id))

  dest_eth_addr = str(packet.dst)
  src_eth_addr = str(packet.src)

  ipv4_pack = packet.find("ipv4")
  if ipv4_pack:
    log.debug("IP packet in transit from  "+str(ipv4_pack.srcip)+"<->"+str(ipv4_pack.dstip))

  log.debug("packet forwarding  " + src_eth_addr + "  " + dest_eth_addr)
  if (dest_eth_addr in protected_resources):
    log.debug("***traffic going to protected resource***")
    log.debug("***FLow rule not added to switches. Send to controller***")
    #send_packet(event, packet)
    skip_add_to_dict_dest = 1

  elif (tainted_hosts.has_key(dest_eth_addr)):
    log.debug("***traffic going to Tainted host ***")
    log.debug("***FLow rule not added to switches. Send to controller***")
    #send_packet(event, packet)
    skip_add_to_dict_dest = 1

  if (src_eth_addr in protected_resources):
    if(dest_eth_addr in protected_resources):
      log.debug("protected to protected communication")
      skip_add_to_dict_dest = 0
    else:
      log.debug("*** traffic from protected resource***")
      log.debug("***FLow rule not added to switches. Send to controller***")
      add_to_tainted_hosts(dest_eth_addr)
      add_to_watermarks_received_on_hosts(dest_eth_addr, 0)
      index = random.randint(0,1000)
      log.debug("index %i", index)
      log.debug("****inserting  "+str(watermark_samples[0][index])+" seconds delay here - src Protected***")
      #Timer(watermark_samples[0][index], delay_and_flood, event)
      core.callDelayed(watermark_samples[0][index], delay_and_flood, event)
      skip_add_to_dict_src = 1
      #flood_packet(event, of.OFPP_ALL)
      delete_flow_entries(event, packet, packet.dst)
       #send_packet(event, of.OFPP_ALL)

  elif(tainted_hosts.has_key(src_eth_addr)):
    update_ipd_arrays(src_eth_addr, dest_eth_addr)
    flow_ipd_array = flow_ipds.get(src_eth_addr+dest_eth_addr)

    if (len(flow_ipd_array) >= 60):
      print flow_ipd_array
      if (check_distribution(flow_ipd_array) == 1):
        mu_sigma_vals = find_mu_sigma(flow_ipd_array)
        is_correlated = find_correlation(src_eth_addr, dest_eth_addr, mu_sigma_vals)
        if is_correlated == 1:
          log.debug(" #######@@@@@@@@ correlated flows - Take appropriate actions @@@@@@@@########")

    if (dest_eth_addr in protected_resources):
      log.debug("tainted to protected communication")
      skip_add_to_dict_dest = 0
    else:
      log.debug("***** traffic from  a tainted host *********")
      log.debug("***FLow rule not added to switches. Send to controller***")

      add_to_tainted_hosts(dest_eth_addr)
      watermark = create_watermark(src_eth_addr)
      add_to_watermarks_received_on_hosts(dest_eth_addr, watermark)
      index = random.randint(0,1000)
      log.debug("index %i", index)
      log.debug("****inserting  "+str(watermark_samples[watermark][index])+" seconds delay here - src Tainted***")
      #Timer(watermark_samples[watermark][index], delay_and_flood , event)
      core.callDelayed(watermark_samples[watermark][index], delay_and_flood , event)
      skip_add_to_dict_src = 1
      #flood_packet(event, of.OFPP_ALL)
      delete_flow_entries(event, packet, packet.dst)

  if (skip_add_to_dict_dest == 0) and (skip_add_to_dict_src == 0):
    log.debug("  adding to dictionary skip_add_to_dict_src is %i and skip_add_to_dict_dest is %i", skip_add_to_dict_src, skip_add_to_dict_dest)
    mac_port_dict[packet.src] = event.port
    if packet.dst not in mac_port_dict:
      log.debug("flooding to all ports as no entry in dictionary")
      flood_packet(event, of.OFPP_ALL)
    else:
      port = mac_port_dict[packet.dst]
      log.debug("setting a flow table entry as matching entry found in dict - " + src_eth_addr + "    " + dest_eth_addr)
      msg = of.ofp_flow_mod()
      msg.match = of.ofp_match.from_packet(packet, event.port)
      msg.priority = 1009
      msg.actions.append(of.ofp_action_output(port = port))
      msg.data = event.ofp
      event.connection.send(msg)
  elif (skip_add_to_dict_dest == 1) and (skip_add_to_dict_src == 0):
    log.debug("  ready to flood. skip_add_to_dict_src is %i and skip_add_to_dict_dest is %i", skip_add_to_dict_src, skip_add_to_dict_dest)
    flood_packet(event, of.OFPP_ALL)
Exemplo n.º 47
0
 def connectionClosed(self, *args, **kwargs):
     core.callDelayed(0.01, self.finished)
Exemplo n.º 48
0
 def delayed_wrapper(*args, **kwargs):
     # Use POX internal thread-safe wrapper for scheduling
     return core.callDelayed(delay, func, *args, **kwargs)
 def open_later (self):
   core.callDelayed(self.reconnect_delay, self.begin, **self.kw)
Exemplo n.º 50
0
  def emulate_link_failure (self, start, duration = 0, link='nl-hr',
                            reroute = 0, restore_reroute = True):
      self.prev_change_port = None
      self.log_file_last = open("/tmp/pox_log_last", "w")
      self.barrier_log_file_last = open("/tmp/barrier_log_last", "w")

      n1, n2 = link.split('-')
      node_n1 = core.Outband.t.name(n1)
      node_n2 = core.Outband.t.name(n2)
      n1_dpid = node_n1.dpid
      n2_dpid = node_n2.dpid
      n1_port_to_n2 = node_n1.port_num(node_n2)
      n2_port_to_n1 = node_n2.port_num(node_n1)
      log.warn("Going down in %is, link:%ss, dur:%ss, reroute:%ss, restore after dur:%s" %
               (start, link, duration, reroute, restore_reroute))
      end = start + duration
      self._prev_barrier_arrived = {}
      f = self._install_port_down
      core.callDelayed(start, f, n1_dpid, n1_port_to_n2, 1, duration)
      core.callDelayed(start, f, n2_dpid, n2_port_to_n1, 1, duration)
      core.callDelayed(end,   f, n1_dpid, n1_port_to_n2, 0, duration, True)
      core.callDelayed(end,   f, n2_dpid, n2_port_to_n1, 0, duration)
      if reroute:
        core.callDelayed(start+reroute, self._install_failover_entries, n1)
        core.callDelayed(start+reroute, self._install_failover_entries, n2)
      if reroute and restore_reroute:
        core.callDelayed(end+reroute, self._delete_failover_entries, n1)
        core.callDelayed(end+reroute, self._delete_failover_entries, n2)
      core.callDelayed(end+reroute+1, self._flush_logs)
Exemplo n.º 51
0
def _handle_PacketIn (event):

  global forward_rule_set
  global backward_rule_set
  global mac_port_dict
  global watermark_samples
  global protected_resources
  global tainted_hosts
  global watermark_count
  skip_add_to_dict_dest = 0
  skip_add_to_dict_src = 0
  mu_sigma_vals = [0,0]
  is_correlated = 0
  is_tcp_ack = 0

  packet =event.parsed

  log.debug("packet in buffer_id check : " +str(event.ofp.buffer_id))

  dest_eth_addr = str(packet.dst)
  src_eth_addr = str(packet.src)
  key = src_eth_addr + dest_eth_addr

  ipv4_pack = packet.find("ipv4")
  if ipv4_pack:
    log.debug("IP packet in transit from  "+str(ipv4_pack.srcip)+"<->"+str(ipv4_pack.dstip))

  tcp = packet.find("tcp")
  if tcp:
    #log.debug("TCP pakcet! - SYN : %d   FIN: %d  ACK: %d ", tcp.SYN, tcp.FIN, tcp.ACK)
    if tcp.ACK:
      log.debug("!!!!!!   TCP ack packet  %s   !!!!!!", key)
      flood_packet(event, of.OFPP_ALL)
      is_tcp_ack = 1


  #log.debug("packet forwarding  " + src_eth_addr + "  " + dest_eth_addr)
  if is_tcp_ack == 0:
    if (dest_eth_addr in protected_resources):
      log.debug("***traffic going to protected resource***")
      #log.debug("***FLow rule not added to switches. Send to controller***")
      #send_packet(event, packet)
      #skip_add_to_dict_dest = 1

    elif (tainted_hosts.has_key(dest_eth_addr)):
      log.debug("***traffic going to Tainted host ***")
      #log.debug("***FLow rule not added to switches. Send to controller***")
      #send_packet(event, packet)
      #skip_add_to_dict_dest = 1

    if (src_eth_addr in protected_resources):
      if(dest_eth_addr in protected_resources):
        log.debug("protected to protected communication")
        skip_add_to_dict_dest = 0
      else:
        #log.debug("*** traffic from protected resource***")
        #log.debug("***FLow rule not added to switches. Send to controller***")
        add_to_tainted_hosts(dest_eth_addr)
        #add_to_watermarks_received_on_hosts(dest_eth_addr, 0)
        if flow_packets_queues.has_key(key):
          (flow_packets_queues.get(key)).insert(0,event)
        else:
          flow_packets_queues[key] = [event]
        watermark = create_watermark(src_eth_addr)
        log.debug("*** traffic from protected resource and watermark creation result : %i", watermark)
        add_to_watermarks_received_on_hosts(dest_eth_addr, watermark)
        index = random.randint(0,1000)
        log.debug("index %i", index)
        induced_delay = watermark_samples[watermark][index]
        absolute_delay = 0
        if flow_last_packet_sent_time.has_key(src_eth_addr+dest_eth_addr):
          absolute_delay = flow_last_packet_sent_time[src_eth_addr+dest_eth_addr]
        else:
          flow_last_packet_sent_time[src_eth_addr+dest_eth_addr] = induced_delay
        absolute_delay = absolute_delay + induced_delay
        log.debug("****inserting  "+str(watermark_samples[watermark][index])+" seconds delay here - src Protected***")
        #log.debug("***** absolute packet release time after delay addition since t0 : " + str(absolute_delay))
        #Timer(watermark_samples[0][index], delay_and_flood, event)
        #core.callDelayed(absolute_delay, delay_and_flood, event)
        core.callDelayed(induced_delay, release_packets, key)
        flow_last_packet_sent_time[src_eth_addr + dest_eth_addr] = absolute_delay
        skip_add_to_dict_src = 1
        #flood_packet(event, of.OFPP_ALL)
        delete_flow_entries(event, packet, packet.dst)
         #send_packet(event, of.OFPP_ALL)

    elif(tainted_hosts.has_key(src_eth_addr) and (dest_eth_addr not in protected_resources)):
      update_ipd_arrays(src_eth_addr, dest_eth_addr)
      flow_ipd_array = flow_ipds.get(src_eth_addr+dest_eth_addr)
      if (len(flow_ipd_array) > 0 and (len(flow_ipd_array)) % 50 == 0):
        print flow_ipd_array
        if (check_distribution(flow_ipd_array, src_eth_addr, dest_eth_addr) == 1):
          mu_sigma_vals = find_mu_sigma(flow_ipd_array)
          is_correlated = find_correlation(src_eth_addr, dest_eth_addr, mu_sigma_vals)
          if is_correlated == 1:
            log.debug(" #######@@@@@@@@ correlated flows - Take appropriate actions @@@@@@@@########")
          else:
            log.debug(" -------- No correlation. Adding flow entry to the flow tables")
            skip_add_to_dict_src = 0
            skip_add_to_dict_dest = 0
        else:
          log.debug(" -------- No normal distribution. Adding flow entry to the flow tables")
          skip_add_to_dict_src = 0
          skip_add_to_dict_dest = 0
      else:
        if (dest_eth_addr in protected_resources):
          log.debug("tainted to protected communication")
          skip_add_to_dict_dest = 0
        else:
          #log.debug("***** traffic from  a tainted host *********")
          #log.debug("***FLow rule not added to switches. Send to controller***")

          #add_to_tainted_hosts(dest_eth_addr)
          if flow_packets_queues.has_key(key):
            (flow_packets_queues.get(key)).insert(0,event)
          else:
            flow_packets_queues[key] = [event]
          watermark = create_watermark(src_eth_addr)
          log.debug("*** traffic from tainted host and watermark creation result : %i", watermark)
          add_to_watermarks_received_on_hosts(dest_eth_addr, watermark)
          index = random.randint(0,1000)
          log.debug("index %i", index)
          induced_delay = watermark_samples[watermark][index]
          absolute_delay = 0
          if flow_last_packet_sent_time.has_key(src_eth_addr+dest_eth_addr):
            absolute_delay = flow_last_packet_sent_time[src_eth_addr+dest_eth_addr]
          else:
            flow_last_packet_sent_time[src_eth_addr+dest_eth_addr] = induced_delay
          absolute_delay = absolute_delay + induced_delay
          #log.debug("****inserting  "+str(absolute_delay)+" seconds delay here - src Protected***")
          log.debug("****inserting  "+str(watermark_samples[watermark][index])+" seconds delay here - src Protected***")
          #log.debug("***** absolute packet release time after delay addition since t0 : " + str(absolute_delay))
          #Timer(watermark_samples[0][index], delay_and_flood, event)
          #core.callDelayed(absolute_delay, delay_and_flood, event)
          core.callDelayed(induced_delay, release_packets, key)
          flow_last_packet_sent_time[src_eth_addr+dest_eth_addr] = absolute_delay
          skip_add_to_dict_src = 1
          #flood_packet(event, of.OFPP_ALL)
          #delete_flow_entries(event, packet, packet.dst)

    if (skip_add_to_dict_dest == 0) and (skip_add_to_dict_src == 0):
      log.debug("  adding to dictionary skip_add_to_dict_src is %i and skip_add_to_dict_dest is %i", skip_add_to_dict_src, skip_add_to_dict_dest)
      mac_port_dict[packet.src] = event.port
      if packet.dst not in mac_port_dict:
        log.debug("flooding to all ports as no entry in dictionary")
        flood_packet(event, of.OFPP_ALL)
      else:
        port = mac_port_dict[packet.dst]
        log.debug("setting a flow table entry as matching entry found in dict - " + src_eth_addr + "    " + dest_eth_addr)
        msg = of.ofp_flow_mod()
        msg.match = of.ofp_match.from_packet(packet, event.port)
        msg.priority = 1009
        msg.actions.append(of.ofp_action_output(port = port))
        msg.data = event.ofp
        event.connection.send(msg)
    elif (skip_add_to_dict_dest == 1) and (skip_add_to_dict_src == 0):
      log.debug("  ready to flood. skip_add_to_dict_src is %i and skip_add_to_dict_dest is %i", skip_add_to_dict_src, skip_add_to_dict_dest)
      flood_packet(event, of.OFPP_ALL)
Exemplo n.º 52
0
    def _handle_PacketIn(self, event):
        dpid = event.connection.dpid
        packet = event.parsed

        if not packet.parsed:
            log.warn("%s %i ignoring unparsed packet", dpid_to_str(dpid),
                     inport.num)
            return

        if packet.find('lldp'):
            return

        ip = packet.find('ipv4')
        if ip and packet.find('tcp'):
            node_str = core.Outband.t.dpid(dpid).name
            src_str = core.Outband.t.ip(ip.srcip).name
            dst_str = core.Outband.t.ip(ip.dstip).name

            #log.info('%s.%s.%s', node_str, src_str, dst_str)
            route = None
            for i, r in enumerate(core.Outband.routes):
                #log.info('r-%s', r)
                if r[0] == src_str and r[1] == node_str and r[-1] == dst_str:
                    route = r
                    break
            if route:
                if not ('permanent' in core.Outband.route_properties[i]):
                    del (core.Outband.routes[i])
                log.info('r-%s', route)

                if self.first:
                    start = 10
                    duration = 1
                    core.callDelayed(start,
                                     self._install_port_down,
                                     1,
                                     1,
                                     1,
                                     duration=0)
                    core.callDelayed(start,
                                     self._install_port_down,
                                     2,
                                     3,
                                     1,
                                     duration=0)
                    core.callDelayed(start + duration,
                                     self._install_port_down,
                                     1,
                                     1,
                                     0,
                                     duration=0)
                    core.callDelayed(start + duration,
                                     self._install_port_down,
                                     2,
                                     3,
                                     0,
                                     duration=0)
                    self.first = False

                hops = zip(route[0:], route[1:], route[2:])
                hops.reverse()
                for p, c, n in hops:
                    self._install_flow(event, p, c, n)
                return

        super(PredefinedRouting, self)._handle_PacketIn(event)
Exemplo n.º 53
0
    def emulate_link_failure(self,
                             start,
                             duration=0,
                             link='nl-hr',
                             reroute=0,
                             restore_reroute=True):
        self.prev_change_port = None
        self.log_file_last = open("/tmp/pox_log_last", "w")
        self.barrier_log_file_last = open("/tmp/barrier_log_last", "w")

        n1, n2 = link.split('-')
        node_n1 = core.Outband.t.name(n1)
        node_n2 = core.Outband.t.name(n2)
        n1_dpid = node_n1.dpid
        n2_dpid = node_n2.dpid
        n1_port_to_n2 = node_n1.port_num(node_n2)
        n2_port_to_n1 = node_n2.port_num(node_n1)
        log.warn(
            "Going down in %is, link:%ss, dur:%ss, reroute:%ss, restore after dur:%s"
            % (start, link, duration, reroute, restore_reroute))
        end = start + duration
        self._prev_barrier_arrived = {}
        f = self._install_port_down
        core.callDelayed(start, f, n1_dpid, n1_port_to_n2, 1, duration)
        core.callDelayed(start, f, n2_dpid, n2_port_to_n1, 1, duration)
        core.callDelayed(end, f, n1_dpid, n1_port_to_n2, 0, duration, True)
        core.callDelayed(end, f, n2_dpid, n2_port_to_n1, 0, duration)
        if reroute:
            core.callDelayed(start + reroute, self._install_failover_entries,
                             n1)
            core.callDelayed(start + reroute, self._install_failover_entries,
                             n2)
        if reroute and restore_reroute:
            core.callDelayed(end + reroute, self._delete_failover_entries, n1)
            core.callDelayed(end + reroute, self._delete_failover_entries, n2)
        core.callDelayed(end + reroute + 1, self._flush_logs)
Exemplo n.º 54
0
 def _handle_ConnectionUp (self, event):
   sw = self.switches.get(event.dpid)
   if sw is None:
     # New switch
     self.switches[event.dpid] = {}
     core.callDelayed(1, self._handle_timer, event.dpid)
Exemplo n.º 55
0
def launch():
    log.info("####Starting...####")
    core.callDelayed(10, install_flows)
    log.info("### Waiting for switches to connect.. ###")
Exemplo n.º 56
0
def alive_querry():
    msg = of.ofp_stats_request(body=of.ofp_flow_stats_request())
    for switch in switches: 
        switch.connection.send(msg)
    core.callDelayed(2, checkResponses)
    self.update_distances()
Exemplo n.º 57
0
def launch():
    core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp)
    core.openflow.addListenerByName("PacketIn", _handle_PacketIn)
    core.callDelayed(45, sendFlowMod)