def register_chassis(self):
        # Get all chassis from nb db to db store.
        # 获取所有的chassis,更新到cache
        for c in self.nb_api.get_all(core.Chassis):
            self.db_store.update(c)

        #取出缓存的自身chassis
        old_chassis = self.db_store.get_one(core.Chassis(id=self.chassis_name))

        #用当前新的配置更新自身chassis
        chassis = core.Chassis(
            id=self.chassis_name,
            ip=self.ip,
            tunnel_types=self.tunnel_types,
        )
        if cfg.CONF.df.external_host_ip:
            chassis.external_host_ip = cfg.CONF.df.external_host_ip

        #更新至cache中
        self.db_store.update(chassis)

        # REVISIT (dimak) Remove skip_send_event once there is no bind conflict
        # between publisher service and the controoler, see bug #1651643
        # 触发create or update event
        if old_chassis is None:
            self.nb_api.create(chassis, skip_send_event=True)
        elif old_chassis != chassis:
            self.nb_api.update(chassis, skip_send_event=True)
Example #2
0
    def test__is_physical_chassis(self, get_one):
        # real chassis
        chassis_real = core.Chassis(id='ch1', ip='10.0.0.3')
        self.assertTrue(self.controller._is_physical_chassis(chassis_real))

        self.db_store = mock.MagicMock()
        get_one.return_value = core.Chassis(id='ch2', ip='10.0.0.4')
        chassis_ref = model_proxy.create_reference(core.Chassis, 'ch2')
        self.assertTrue(self.controller._is_physical_chassis(chassis_ref))

        get_one.return_value = None
        chassis_bad_ref = model_proxy.create_reference(core.Chassis, 'ch3')
        self.assertFalse(self.controller._is_physical_chassis(chassis_bad_ref))
Example #3
0
    def _get_external_ip_by_host(self, host):
        chassis = self.nb_api.get(core.Chassis(id=host))
        if not chassis:
            LOG.warning('Unable to find chassis %s', host)
            return

        # If chassis's external_host_ip is not specified,
        # fall back to chassis's ip. This is based on the assumption
        # that they are routable to each other.
        return chassis.external_host_ip or chassis.ip
    def test_delete_chassis(self, mock_db_store_delete, mock_get_ports,
                            mock_controller_delete):
        lport_id = 'fake_lport_id'
        chassis = core.Chassis(id='fake_chassis_id')
        lport = mock.Mock()
        lport.id = lport_id
        mock_get_ports.return_value = [lport]

        self.controller.delete(chassis)
        mock_controller_delete.assert_called_once_with(lport)
        mock_db_store_delete.assert_called_once_with(chassis)
    def test_generate_service_id(self):
        with mock.patch.object(uuid, 'uuid5') as uuid5:
            service_model.generate_service_id('test_host1', 'test_binary')
            uuid5.assert_called_once_with(service_model.SERVICE_ID_NAMESPACE,
                                          'test_host1test_binary')
            uuid5.reset_mock()

            chassis = core.Chassis(id='test_host2')
            service_model.generate_service_id(chassis, 'test_binary')
            uuid5.assert_called_once_with(service_model.SERVICE_ID_NAMESPACE,
                                          'test_host2test_binary')
    def test_register_chassis(self):
        cfg.CONF.set_override('external_host_ip', '172.24.4.100', group='df')
        self.controller.register_chassis()
        expected_chassis = core.Chassis(
            id=self.controller.chassis_name,
            ip=self.controller.ip,
            external_host_ip="172.24.4.100",
            tunnel_types=self.controller.tunnel_types,
        )

        self.assertIn(expected_chassis, self.controller.db_store2)
        self.nb_api.update.assert_called_once_with(expected_chassis)
Example #7
0
    def register_chassis(self):
        # Get all chassis from nb db to db store.
        for c in self.nb_api.get_all(core.Chassis):
            self.db_store.update(c)

        old_chassis = self.db_store.get_one(core.Chassis(id=self.chassis_name))

        chassis = core.Chassis(
            id=self.chassis_name,
            ip=self.ip,
            tunnel_types=self.tunnel_types,
        )
        if cfg.CONF.df.external_host_ip:
            chassis.external_host_ip = cfg.CONF.df.external_host_ip

        self.db_store.update(chassis)

        if old_chassis is None:
            self.nb_api.create(chassis)
        elif old_chassis != chassis:
            self.nb_api.update(chassis)
Example #8
0
                                         id='fake_external_switch1')

external_switch1_subnets = [
    l2.Subnet(name="public-subnet",
              enable_dhcp=False,
              topic="fake_tenant1",
              gateway_ip="172.24.4.1",
              cidr="172.24.4.0/24",
              id="fake_external_subnet1",
              version=1,
              lswitch='fake_external_switch1')
]

fake_chassis1 = core.Chassis(
    id='fakehost',
    ip='172.24.4.50',
    tunnel_types=('vxlan', ),
)

fake_chassis2 = core.Chassis(
    id='fake_host2',
    ip='172.24.4.51',
    tunnel_types=('vxlan', ),
)


def chassis_binding(chassis):
    return l2.PortBinding(
        type=l2.BINDING_CHASSIS,
        chassis=chassis,
    )
Example #9
0
class RyuDFAdapter(ofp_handler.OFPHandler):
    OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
    OF_AUTO_PORT_DESC_STATS_REQ_VER = 0x04

    call_on_datapath_set = None
    ctrl = None
    chassis = core.Chassis(
        id='whoami',
        ip='172.24.4.50',
        tunnel_types=('vxlan', ),
    )

    local_binding = l2.PortBinding(
        type=l2.BINDING_CHASSIS,
        chassis=chassis,
    )
    fake_lswitch_default_subnets = [
        l2.Subnet(dhcp_ip="192.168.123.0",
                  name="private-subnet",
                  enable_dhcp=True,
                  topic="fake_tenant1",
                  gateway_ip="192.168.123.1",
                  cidr="192.168.123.0/24",
                  id="fake_subnet1")
    ]

    def __init__(self, vswitch_api, nb_api, neutron_server_notifier=None):
        super(RyuDFAdapter, self).__init__()
        self.dispatcher = dispatcher.AppDispatcher(cfg.CONF.df.apps_list)
        # TODO: Remove vswitch api for standalone version
        self.vswitch_api = vswitch_api
        self.nb_api = nb_api
        self.neutron_server_notifier = neutron_server_notifier
        self._datapath = None
        self.table_handlers = {}
        self.first_connect = True

    @property
    def datapath(self):
        return self._datapath

    def start(self):
        super(RyuDFAdapter, self).start()

        self.load(self,
                  vswitch_api=self.vswitch_api,
                  nb_api=self.nb_api,
                  neutron_server_notifier=self.neutron_server_notifier)
        #self.wait_until_ready()

    def load(self, *args, **kwargs):
        self.dispatcher.load(*args, **kwargs)

    def is_ready(self):
        return self.datapath is not None

    def wait_until_ready(self):
        while not self.is_ready():
            LOG.debug("Not ready. Going to sleep 3")
            time.sleep(3)

    def register_table_handler(self, table_id, handler):
        if table_id in self.table_handlers:
            raise RuntimeError(
                _('Cannot register handler {new_handler} for table {table},'
                  'occupied by {existing_handler}').format(
                      table=table_id,
                      new_handler=handler,
                      existing_handler=self.table_handlers[table_id],
                  ), )
        self.table_handlers[table_id] = handler

    def unregister_table_handler(self, table_id, handler):
        self.table_handlers.pop(table_id, None)

    def notify_ovs_sync_finished(self):
        self.dispatcher.dispatch('ovs_sync_finished')

    def notify_ovs_sync_started(self):
        self.dispatcher.dispatch('ovs_sync_started')

    @handler.set_ev_cls(ofp_event.EventOFPStateChange,
                        [handler.MAIN_DISPATCHER, handler.DEAD_DISPATCHER])
    def state_change_handler(self, ev):
        dp = ev.datapath

    @handler.set_ev_handler(ofp_event.EventOFPSwitchFeatures,
                            handler.CONFIG_DISPATCHER)
    def switch_features_handler(self, ev):
        # TODO(oanson) is there a better way to get the datapath?
        datapath = ev.msg.datapath
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser
        self._datapath = datapath
        super(RyuDFAdapter, self).switch_features_handler(ev)
        version = self.datapath.ofproto.OFP_VERSION
        if version < RyuDFAdapter.OF_AUTO_PORT_DESC_STATS_REQ_VER:
            # Otherwise, this is done automatically by OFPHandler
            self._send_port_desc_stats_request(self.datapath)

        self.get_sw_async_msg_config()

        self.dispatcher.dispatch('switch_features_handler', ev)

        if not self.first_connect:
            # For reconnecting to the ryu controller, df needs a full sync
            # in case any resource added during the disconnection.
            self.nb_api.db_change_callback(None, None,
                                           constants.CONTROLLER_REINITIALIZE,
                                           None)
        self.first_connect = False
        #self.vswitch_api.initialize(self.nb_api)
        if RyuDFAdapter.call_on_datapath_set is not None:
            RyuDFAdapter.call_on_datapath_set(RyuDFAdapter.ctrl)
        # install table miss flow
        # match = parser.OFPMatch()
        # actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
        #                                   ofproto.OFPCML_NO_BUFFER)]
        # self.add_flow(datapath, 0, match, actions)

    def _send_port_desc_stats_request(self, datapath):
        ofp_parser = datapath.ofproto_parser
        req = ofp_parser.OFPPortDescStatsRequest(datapath, 0)
        datapath.send_msg(req)

    @handler.set_ev_handler(ofp_event.EventOFPPortDescStatsReply,
                            handler.MAIN_DISPATCHER)
    def port_desc_stats_reply_handler(self, ev):
        self.dispatcher.dispatch('port_desc_stats_reply_handler', ev)

    @handler.set_ev_handler(ofp_event.EventOFPPacketIn,
                            handler.MAIN_DISPATCHER)
    def OF_packet_in_handler(self, event):
        msg = event.msg
        table_id = msg.table_id
        if table_id in self.table_handlers:
            handler = self.table_handlers[table_id]
            handler(event)
        else:
            LOG.info(
                "No handler for table id %(table)s with message "
                "%(msg)", {
                    'table': table_id,
                    'msg': msg
                })

    @handler.set_ev_handler(ofp_event.EventOFPErrorMsg,
                            handler.MAIN_DISPATCHER)
    def OF_error_msg_handler(self, event):
        msg = event.msg
        try:
            (version, msg_type, msg_len, xid) = ofproto_parser.header(msg.data)
            ryu_msg = ofproto_parser.msg(
                self._datapath, version, msg_type,
                msg_len - ofproto_common.OFP_HEADER_SIZE, xid, msg.data)
            LOG.error('OFPErrorMsg received: %s', ryu_msg)
        except Exception:
            LOG.error(
                'Unrecognized OFPErrorMsg received: '
                'type=0x%(type)02x code=0x%(code)02x '
                'message=%(msg)s', {
                    'type': msg.type,
                    'code': msg.code,
                    'msg': utils.hex_array(msg.data)
                })

    @handler.set_ev_cls(ofp_event.EventOFPGetAsyncReply,
                        handler.MAIN_DISPATCHER)
    def get_async_reply_handler(self, event):
        msg = event.msg
        LOG.debug(
            'OFPGetAsyncReply received: packet_in_mask=0x%08x:0x%08x '
            'port_status_mask=0x%08x:0x%08x '
            'flow_removed_mask=0x%08x:0x%08x', msg.packet_in_mask[0],
            msg.packet_in_mask[1], msg.port_status_mask[0],
            msg.port_status_mask[1], msg.flow_removed_mask[0],
            msg.flow_removed_mask[1])
        self.set_sw_async_msg_config_for_ttl(msg)

    def get_sw_async_msg_config(self):
        """Get the configuration of current switch"""
        ofp_parser = self._datapath.ofproto_parser
        req = ofp_parser.OFPGetAsyncRequest(self._datapath)
        self._datapath.send_msg(req)

    def set_sw_async_msg_config_for_ttl(self, cur_config):
        """Configure switch for TTL

        Configure the switch to packet-in TTL invalid packets to controller.
        Note that this method only works in OFP 1.3, however, this ryu app
        claims that it only supports ofproto_v1_3.OFP_VERSION. So, no check
        will be made here.
        """
        dp = self._datapath
        parser = dp.ofproto_parser
        ofproto = dp.ofproto

        if cur_config.packet_in_mask[0] & 1 << ofproto.OFPR_INVALID_TTL != 0:
            LOG.info('SW config for TTL error packet in has already '
                     'been set')
            return

        packet_in_mask = (cur_config.packet_in_mask[0]
                          | 1 << ofproto.OFPR_INVALID_TTL)
        m = parser.OFPSetAsync(
            dp, [packet_in_mask, cur_config.packet_in_mask[1]],
            [cur_config.port_status_mask[0], cur_config.port_status_mask[1]],
            [cur_config.flow_removed_mask[0], cur_config.flow_removed_mask[1]])
        dp.send_msg(m)
        LOG.info('Set SW config for TTL error packet in.')
Example #10
0
class SimpleSwitch13(app_manager.RyuApp):
    OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]

    chassis = core.Chassis(
        id='whoami',
        ip='172.24.4.50',
        tunnel_types=('vxlan', ),
    )

    local_binding = l2.PortBinding(
        type=l2.BINDING_CHASSIS,
        chassis=chassis,
    )
    # This cache will be shared with other apps in future development
    cache_ports_by_datapath_id = {}

    USE_CACHE = True

    def __init__(self, *args, **kwargs):
        super(SimpleSwitch13, self).__init__(*args, **kwargs)
        # self.mac_to_port = {}
        db_store._instance = None
        self.fake_lswitch_default_subnets = [
            l2.Subnet(dhcp_ip="192.168.123.0",
                      name="private-subnet",
                      enable_dhcp=True,
                      topic="fake_tenant1",
                      gateway_ip="192.168.123.1",
                      cidr="192.168.123.0/24",
                      id="fake_subnet1")
        ]
        #print (self.fake_lswitch_default_subnets[0].dhcp_ip)

        #common_config.init(sys.argv[1:3])
        #common_config.setup_logging()
        self.nb_api = api_nb.NbApi.get_instance(False)

        self.controller = controller_concept.DfStandaloneController(
            'df_standalone', self.nb_api)
        self.db_store = db_store.get_instance()
        self.controller.on_datapath_set()

        self.nb_api.on_db_change.append(self.db_change_callback)

        if self.USE_CACHE:
            self.sync_with_database()

    def sync_with_database(self):
        """
        After controller start/restart synchronize cache with db
        :return:
        """
        # learn from db
        lports = self.nb_api.get_all(l2.LogicalPort)
        lswitches = self.nb_api.get_all(l2.LogicalSwitch)
        for lswitch in lswitches:
            dpid = "{}".format(lswitch.id)
            for lport in lports:
                if lport.lswitch.id == dpid:
                    self.cache_ports_by_datapath_id.setdefault(dpid, {})
                    self.cache_ports_by_datapath_id[dpid][lport.id] = lport
                    # TODO Controller name as topic
                    # self.controller.register_topic("fake_tenant1")

    def db_change_callback(self, table, key, action, value, topic=None):
        """
        Called from nb_api on db update.

        :param table:
        :param key:
        :param action:
        :param value:
        :param topic:
        """
        print(
            "L2 App: Received Update for table {} and key {} action {}".format(
                table, key, action))
        # These updates are only required if data is cached locally
        if self.USE_CACHE:
            if table == 'lport' and (action == 'create' or action == 'update'):
                # check if datapath of port can be found in cache
                cache_dpid = None
                for dpid, port_id in self.cache_ports_by_datapath_id.iteritems(
                ):
                    if port_id == key:
                        # this value needs to bee updated
                        # updating values while iterating isn't a good practice: Exit loop and apply update
                        cache_dpid = dpid
                        break
                if not cache_dpid is None:
                    # values was in cache -> update
                    self.cache_ports_by_datapath_id[cache_dpid][
                        key] = self.nb_api.get(l2.LogicalPort(id=key))
                else:
                    # port not in cache
                    lport = self.nb_api.get(l2.LogicalPort(id=key))
                    dpid = lport.lswitch.id
                    self.cache_ports_by_datapath_id[dpid][lport.id] = lport

    @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
    def switch_features_handler(self, ev):
        datapath = ev.msg.datapath
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser

        # install table-miss flow entry
        #
        # We specify NO BUFFER to max_len of the output action due to
        # OVS bug. At this moment, if we specify a lesser number, e.g.,
        # 128, OVS will send Packet-In with invalid buffer_id and
        # truncated packet data. In that case, we cannot output packets
        # correctly.  The bug has been fixed in OVS v2.1.0.
        match = parser.OFPMatch()
        actions = [
            parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
                                   ofproto.OFPCML_NO_BUFFER)
        ]
        self.add_flow(datapath, 0, match, actions)

    def add_flow(self, datapath, priority, match, actions, buffer_id=None):
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser

        inst = [
            parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)
        ]
        if buffer_id:
            mod = parser.OFPFlowMod(datapath=datapath,
                                    buffer_id=buffer_id,
                                    priority=priority,
                                    match=match,
                                    instructions=inst)
        else:
            mod = parser.OFPFlowMod(datapath=datapath,
                                    priority=priority,
                                    match=match,
                                    instructions=inst)
        datapath.send_msg(mod)

    @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
    def _packet_in_handler(self, ev):
        # If you hit this you might want to increase
        # the "miss_send_length" of your switch
        if ev.msg.msg_len < ev.msg.total_len:
            self.logger.debug("packet truncated: only %s of %s bytes",
                              ev.msg.msg_len, ev.msg.total_len)
        msg = ev.msg
        datapath = msg.datapath
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser
        in_port = msg.match['in_port']

        pkt = packet.Packet(msg.data)
        eth = pkt.get_protocols(ethernet.ethernet)[0]

        if eth.ethertype == ether_types.ETH_TYPE_LLDP:
            # ignore lldp packet
            return
        dst = eth.dst
        src = eth.src

        dpid = datapath.id
        # self.mac_to_port.setdefault(dpid, {})

        self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)

        # self.print_cache_ports_by_datapath_id()

        # learn a mac address to avoid FLOOD next time.
        # self.mac_to_port[dpid][src] = in_port
        self.update_mac_to_port(dpid, src, in_port, self.USE_CACHE)

        #        if dst in self.mac_to_port[dpid]:
        #            out_port = self.mac_to_port[dpid][dst]
        #        else:
        #            out_port = ofproto.OFPP_FLOOD

        out_port = self.get_port_from_mac(dpid, dst, self.USE_CACHE)
        if out_port is None:
            # unknown
            out_port = ofproto.OFPP_FLOOD

        actions = [parser.OFPActionOutput(out_port)]

        # install a flow to avoid packet_in next time
        if out_port != ofproto.OFPP_FLOOD:
            match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
            # verify if we have a valid buffer_id, if yes avoid to send both
            # flow_mod & packet_out
            if msg.buffer_id != ofproto.OFP_NO_BUFFER:
                self.add_flow(datapath, 1, match, actions, msg.buffer_id)
                return
            else:
                self.add_flow(datapath, 1, match, actions)
        data = None
        if msg.buffer_id == ofproto.OFP_NO_BUFFER:
            data = msg.data

        out = parser.OFPPacketOut(datapath=datapath,
                                  buffer_id=msg.buffer_id,
                                  in_port=in_port,
                                  actions=actions,
                                  data=data)
        datapath.send_msg(out)

    @set_ev_cls(event.EventSwitchEnter)
    def get_topology_data(self, ev):
        """
        Called when a new switch connects to the controller.
        Creates switches and ports in the db (if the do not already exist)

        :param ev:
        """
        switch_list = get_switch(self, None)  # .topology_api_app
        # switches = [switch.dp.id for switch in switch_list]
        # print "switches: ", switches

        # links_list = get_link(self, switches[0])  # .topology_api_app ,None
        # links = [(link.src.dpid, link.dst.dpid, {'port': link.src.port_no}) for link in links_list]
        # print "links_list: ", links_list  # [0]
        # print "links", links

        # update load monitoring
        override_load_file(load=len(switch_list))

        for switch in switch_list:
            self.create_switch(switch)
            for p in switch.ports:
                #print 'create port {}'.format(p)
                self.create_port(switch.dp.id, "", p.port_no, p.hw_addr)

        print("L2 App: Switch ENTER Done")

    @set_ev_cls(event.EventSwitchLeave)
    def on_switch_leave(self, ev):
        dpid = "{}".format(ev.switch.dp.id)
        print "L2 App: Switch {} left".format(dpid)
        # Removing Switch from DB and Cache (optional)
        db_switch = self.nb_api.get(l2.LogicalSwitch(id=dpid))
        self.nb_api.delete(db_switch)
        lports = self.nb_api.get_all(l2.LogicalPort)
        for port in lports:
            if str(port.lswitch.id) == dpid:
                self.nb_api.delete(port)
        # Remove switch and ports from cache if cacheing is enabled
        if self.USE_CACHE:
            self.cache_ports_by_datapath_id.pop(dpid, None)

        switch_list = get_switch(self, None)
        # update load monitoring
        override_load_file(load=len(switch_list))

    # DATABASE and CACHE Access

    def create_port_id(self, dpid, port_no):
        """
        Create the id of a port with its datapath/switch id
        :rtype: Created ID: Used for key in store
        """
        return "{}:{}".format(dpid, port_no)

    def get_port_from_mac(self, dpid, mac, use_cache=True):
        """

        Can be inconsistent with db
        :param dpid:
        :param mac:
        :return:
        """

        dpid = str(dpid)
        if use_cache:
            if dpid in self.cache_ports_by_datapath_id.keys():
                for _, lport in self.cache_ports_by_datapath_id[
                        dpid].iteritems():
                    if mac in lport.macs:
                        return int(lport.port_no)
        else:
            try:
                lports = self.nb_api.get_all(l2.LogicalPort)
                for port in lports:
                    if port.lswitch.id == dpid and mac in port.macs:
                        return int(port.port_no)
            except DBKeyNotFound:
                return None
        # if nothing was found in cache
        return None

    def update_mac_to_port(self, dpid, mac, port, use_cache=True):
        """
        Can be inconsistent with db
        :param dpid:
        :param mac:
        :param port:
        """
        port_id = self.create_port_id(dpid, port)
        dpid = str(dpid)
        # TODO: check for host migration
        if use_cache:
            # check cache:
            if dpid in self.cache_ports_by_datapath_id.keys():
                if port_id in self.cache_ports_by_datapath_id[dpid]:
                    cport = self.cache_ports_by_datapath_id[dpid][port_id]
                    if mac not in cport.macs:
                        # update cache
                        cport.macs.append(mac)
                        # update db
                        self.nb_api.update(cport)
            else:
                # new learned port!
                # write to database
                self.cache_ports_by_datapath_id.setdefault(
                    dpid, {})  # create empty entry if key does not exists
                self.cache_ports_by_datapath_id[dpid][
                    port_id] = self.create_port(dpid, mac, port)
        else:
            try:
                lport = self.nb_api.get(l2.LogicalPort(id=port_id))
                if lport is not None and mac not in lport.macs:
                    lport.macs.append(mac)
                    self.nb_api.update(lport)
            except DBKeyNotFound:
                self.create_port(dpid, mac, port)

    def create_port(self, dpid, mac, port_no, hw_addr=""):
        """
        Creates port in db if not exist
        :param dpid:
        :param mac:
        :param port_no:
        :return:
        """
        ips = ('0.0.0.0', )
        p_id = self.create_port_id(dpid, port_no)
        dpid = str(dpid)
        macs = []
        if mac is not "":
            macs.append(mac)

        if not self.nb_api.get(l2.LogicalPort(id=p_id)):
            new_port = l2.LogicalPort(
                id=p_id,
                port_no=str(port_no),
                topic="debug-topic",
                name='logical_port',
                unique_key=2,
                version=2,
                hw_addr=hw_addr,
                #ips=ips,
                subnets=None,
                macs=macs,
                binding=self.local_binding,
                lswitch='{}'.format(dpid),
                security_groups=['fake_security_group_id1'],
                allowed_address_pairs=[],
                port_security_enabled=False,
                device_owner='whoami',
                device_id='fake_device_id',
                # binding_vnic_type=binding_vnic_type,
                dhcp_params={},
            )
            self.cache_ports_by_datapath_id.setdefault(dpid, {})
            self.cache_ports_by_datapath_id[dpid][p_id] = new_port
            self.nb_api.create(new_port)
            new_port.emit_created()
            return new_port

    def create_switch(self, switch):
        """
        Creates switch in db if not exist
        :rtype: LogicalSwitch
        :param switch:
        """
        if not self.nb_api.get(l2.LogicalSwitch(id='{}'.format(switch.dp.id))):
            # switch does not exists in db
            local_switch = l2.LogicalSwitch(
                subnets=self.fake_lswitch_default_subnets,
                network_type='local',
                id='{}'.format(switch.dp.id),
                segmentation_id=41,
                mtu=1500,
                topic='fake_tenant1',
                unique_key=int(switch.dp.id),
                is_external=False,
                name='private')
            self.nb_api.create(local_switch)
            return local_switch

    # Debug utils

    def print_cache_ports_by_datapath_id(self):
        for dpid, port_dict in self.cache_ports_by_datapath_id.iteritems():
            for key in port_dict.keys():
                print "\ndpid:{}".format(dpid)
                port = port_dict[key]
                print "\nPort_id: {}, macs:{}".format(port.id, port.macs)
    def update_migration_flows(self, lport):
        # This method processes the migration event sent from source node.
        # There are three parts for event process, source node, destination
        # node, other nodes which related to topic of migrating VM, according
        # to the chassis ID in lport, and local chassis..
        port_id = lport.id
        migration = self.nb_api.get_lport_migration(port_id)
        original_lport = self.db_store2.get_one(lport)

        if migration:
            dest_chassis = migration['migration']
        else:
            LOG.info("last lport deleted of this topic, do nothing %s", lport)
            return

        if not self._set_lport_external_values(lport):
            return

        if dest_chassis == self.chassis_name:
            # destination node
            ofport = self.vswitch_api.get_port_ofport_by_id(port_id)
            lport.ofport = ofport
            lport.is_local = True
            self.db_store2.update(lport)

            LOG.info(
                "dest process migration event port = %(port)s"
                "original_port = %(original_port)s"
                "chassis = %(chassis)s"
                "self_chassis = %(self_chassis)s", {
                    'port': lport,
                    'original_port': original_lport,
                    'chassis': dest_chassis,
                    'self_chassis': self.chassis_name
                })
            if original_lport:
                original_lport.emit_remote_deleted()
            lport.emit_local_created()
            return

        # Here It could be either source node or other nodes, so
        # get ofport from chassis.
        ofport = self.vswitch_api.get_vtp_ofport(lport.network_type)
        lport.ofport = ofport
        remote_chassis = self.db_store2.get_one(core.Chassis(id=dest_chassis))
        if not remote_chassis:
            # chassis has not been online yet.
            return
        lport.peer_vtep_address = remote_chassis.ip

        LOG.info(
            "src process migration event port = %(port)s"
            "original_port = %(original_port)s"
            "chassis = %(chassis)s", {
                'port': lport,
                'original_port': original_lport,
                'chassis': dest_chassis
            })

        # source node and other related nodes
        if original_lport and lport.chassis.id != self.chassis_name:
            original_lport.emit_remote_deleted()

        lport.emit_remote_created()