def proxy_request(self, req): headers = self.get_headers(req) url = urlparse.urlunsplit( (self.get_scheme(req), self.get_host(req), self.get_path_info(req), self.get_query_string(req), '')) h = self.create_http_client(req) resp, content = h.request(url, method=req.method, headers=headers, body=req.body) if resp.status == 200: LOG.debug(str(resp)) return self.create_response(req, resp, content) elif resp.status == 403: LOG.warning( _LW('The remote metadata server responded with Forbidden. This ' 'response usually occurs when shared secrets do not match.' )) return webob.exc.HTTPForbidden() elif resp.status == 400: return webob.exc.HTTPBadRequest() elif resp.status == 404: return webob.exc.HTTPNotFound() elif resp.status == 409: return webob.exc.HTTPConflict() elif resp.status == 500: msg = _LW( 'Remote metadata server experienced an internal server error.') LOG.warning(msg) explanation = six.text_type(msg) return webob.exc.HTTPInternalServerError(explanation=explanation) else: raise Exception(_('Unexpected response code: %s') % resp.status)
def _check_ofport(port_name, ofport): if ofport is None: LOG.warning(_LW("Can't find ofport for port %s."), port_name) return False if ofport < OFPORT_RANGE_MIN or ofport > OFPORT_RANGE_MAX: LOG.warning(_LW("ofport %(ofport)s for port %(port)s is invalid."), { 'ofport': ofport, 'port': port_name }) return False return True
def run_db_poll(self): try: self.nb_api.sync() self.register_chassis() self.create_tunnels() if not self.enable_selective_topo_dist: self.read_switches() self.read_security_groups() self.port_mappings() self.read_routers() self.read_floatingip() self.sync_finished = True except Exception as e: self.sync_finished = False LOG.warning(_LW("run_db_poll - suppressing exception")) LOG.exception(e)
def run(self): if self.multiproc_subscriber: self.multiproc_subscriber.daemonize() self.db.initialize( db_ip=cfg.CONF.df.remote_db_ip, db_port=cfg.CONF.df.remote_db_port, config=cfg.CONF.df ) self._register_as_publisher() self._publishers_table_monitor = pub_sub_api.StalePublisherMonitor( self.db, self.publisher, cfg.CONF.df.publisher_timeout ) self._publishers_table_monitor.daemonize() # TODO(oanson) TableMonitor daemonize will go here while True: try: event = self._queue.get() self.publisher.send_event(event) if event.table != pub_sub_api.PUBLISHER_TABLE: self._update_timestamp_in_db() eventlet.sleep(0) except Exception as e: LOG.warning(_LW("Exception in main loop: {}, {}").format( e, traceback.format_exc() ))
def run_db_poll(self): try: self.nb_api.sync() self.vswitch_api.sync() self.register_chassis() self.create_tunnels() self.read_switches() self.read_security_groups() self.port_mappings() self.read_routers() self.read_floatingip() self.sync_finished = True except Exception as e: self.sync_finished = False LOG.warning(_LW("run_db_poll - suppressing exception")) LOG.warning(e)
def _is_dhcp_enabled_on_network(self, lport, net_id): subnet = self._get_subnet_by_port(lport) if subnet: return subnet.enable_dhcp() LOG.warning(_LW("No subnet found for port <%s>") % lport.get_id()) return True
def _test_and_create_object(id): try: session = db_api.get_session() with session.begin(): row = session.query( models.DFLockedObjects).filter_by(object_uuid=id).one() # test ttl if row.lock and timeutils.is_older_than( row.created_at, cfg.CONF.df.distributed_lock_ttl): # reset the lock if it is timeout LOG.warning( _LW('The lock for object %(id)s is reset ' 'due to timeout.'), {'id': id}) _lock_free_update(session, id, lock_state=True, session_id=row.session_id) except orm_exc.NoResultFound: try: session = db_api.get_session() with session.begin(): _create_db_row(session, oid=id) except db_exc.DBDuplicateEntry: # the lock is concurrently created. pass
def get_cluster_topology_by_all_nodes(self): # get redis cluster topology from local nodes cached in initialization new_nodes = {} for host, info in six.iteritems(self.cluster_nodes): ip_port = host.split(':') try: node = self._init_node(ip_port[0], ip_port[1]) info = self._get_cluster_info(node) if info['cluster_state'] != 'ok': LOG.warning(_LW("redis cluster state failed")) else: new_nodes.update(self._get_cluster_nodes(node)) self._release_node(node) break except Exception: LOG.exception( _LE("exception happened " "when get cluster topology, %(ip)s:" "%(port)s") % { 'ip': ip_port[0], 'port': ip_port[1] }) return new_nodes
def start_detect_for_failover(self): # only start in NB plugin if self.redis_mgt is not None: self.redis_mgt.daemonize() else: LOG.warning(_LW("redis mgt is none"))
def _logical_port_process(self, lport, original_lport=None): chassis = lport.get_chassis() if chassis in (None, '', constants.DRAGONFLOW_VIRTUAL_PORT): LOG.debug(("Port %s has not been bound or it is a vPort ") % lport.get_id()) return chassis_to_ofport, lport_to_ofport = ( self.vswitch_api.get_local_ports_to_ofport_mapping()) network = self.get_network_id( lport.get_lswitch_id(), ) lport.set_external_value('local_network_id', network) if chassis == self.chassis_name: lport.set_external_value('is_local', True) ofport = lport_to_ofport.get(lport.get_id(), 0) if ofport != 0: lport.set_external_value('ofport', ofport) if original_lport is None: LOG.info(_LI("Adding new local logical port = %s") % str(lport)) self.open_flow_app.notify_add_local_port(lport) else: LOG.info(_LI("Updating local logical port = %(port)s, " "original port = %(original_port)s") % {'port': str(lport), 'original_port': str(original_lport)}) self.open_flow_app.notify_update_local_port(lport, original_lport) self.db_store.set_port(lport.get_id(), lport, True) else: LOG.info(_LI("Local logical port %s was not created yet") % str(lport)) else: lport.set_external_value('is_local', False) ofport = chassis_to_ofport.get(chassis, 0) if ofport != 0: lport.set_external_value('ofport', ofport) if original_lport is None: LOG.info(_LI("Adding new remote logical port = %s") % str(lport)) self.open_flow_app.notify_add_remote_port(lport) else: LOG.info(_LI("Updating remote logical port = %(port)s, " "original port = %(original_port)s") % {'port': str(lport), 'original_port': str(original_lport)}) self.open_flow_app.notify_update_remote_port( lport, original_lport) self.db_store.set_port(lport.get_id(), lport, False) else: # TODO(gampel) add handling for this use case # remote port but no tunnel to remote Host # if this should never happen raise an exception LOG.warning(_LW("No tunnel for remote logical port %s") % str(lport))
def run_db_poll(self): try: self.nb_api.sync() self.register_chassis() self.create_tunnels() if not self.enable_selective_topo_dist: # The order of the items here is meaningful, it is sorted # by the objects dependency in each other items = [ df_db_objects_refresh.DfObjectRefresher( 'Switches', self.db_store.get_lswitch_keys, self.nb_api.get_all_logical_switches, self.logical_switch_updated, self.logical_switch_deleted), df_db_objects_refresh.DfObjectRefresher( 'Security Groups', self.db_store.get_security_group_keys, self.nb_api.get_security_groups, self.security_group_updated, self.security_group_deleted), df_db_objects_refresh.DfObjectRefresher( 'Ports', self.db_store.get_port_keys, self.nb_api.get_all_logical_ports, self.logical_port_updated, self.logical_port_deleted), df_db_objects_refresh.DfObjectRefresher( 'Routers', self.db_store.get_router_keys, self.nb_api.get_routers, self.router_updated, self.router_deleted), df_db_objects_refresh.DfObjectRefresher( 'Floating IPs', self.db_store.get_floatingip_keys, self.nb_api.get_floatingips, self.floatingip_updated, self.floatingip_deleted), ] # Refresh all the objects and find which ones should be removed for item in items: item.read() item.update() # Remove obsolete objects in reverse order for item in reversed(items): item.delete() self.sync_finished = True except Exception as e: self.sync_finished = False LOG.warning(_LW("run_db_poll - suppressing exception")) LOG.exception(e)
def _logical_port_process(self, lport, original_lport=None): if lport.get_chassis() is None or ( lport.get_chassis() == constants.DRAGONFLOW_VIRTUAL_PORT): return chassis_to_ofport, lport_to_ofport = ( self.vswitch_api.get_local_ports_to_ofport_mapping()) network = self.get_network_id(lport.get_lswitch_id(), ) lport.set_external_value('local_network_id', network) if lport.get_chassis() == self.chassis_name: lport.set_external_value('is_local', True) ofport = lport_to_ofport.get(lport.get_id(), 0) if ofport != 0: lport.set_external_value('ofport', ofport) if original_lport is None: LOG.info( _LI("Adding new local logical port = %s") % str(lport)) self.open_flow_app.notify_add_local_port(lport) else: LOG.info( _LI("Updating local logical port = %(port)s, " "original port = %(original_port)s") % { 'port': str(lport), 'original_port': str(original_lport) }) self.open_flow_app.notify_update_local_port( lport, original_lport) else: LOG.info( _LI("Local logical port %s was not created yet") % str(lport)) self.db_store.set_port(lport.get_id(), lport, True) else: lport.set_external_value('is_local', False) ofport = chassis_to_ofport.get(lport.get_chassis(), 0) if ofport != 0: lport.set_external_value('ofport', ofport) if original_lport is None: LOG.info( _LI("Adding new remote logical port = %s") % str(lport)) self.open_flow_app.notify_add_remote_port(lport) else: LOG.info( _LI("Updating remote logical port = %(port)s, " "original port = %(original_port)s") % { 'port': str(lport), 'original_port': str(original_lport) }) self.open_flow_app.notify_update_remote_port( lport, original_lport) else: # TODO(gampel) add handling for this use case # remote port but no tunnel to remote Host # if this should never happen raise an exception LOG.warning( _LW("No tunnel for remote logical port %s") % str(lport)) self.db_store.set_port(lport.get_id(), lport, False)
def _execute_cmd(self, oper, local_key, value=None): if not self._is_oper_valid(oper): LOG.warning(_LW("invalid oper: %(oper)s") % {'oper': oper}) return None ip_port = self.redis_mgt.get_ip_by_key(local_key) client = self._get_client(local_key) if client is None: return None arg = self._gen_args(local_key, value) ttl = self.RequestRetryTimes asking = False while ttl > 0: ttl -= 1 try: if asking: client.execute_command('ASKING') asking = False return client.execute_command(oper, *arg) except ConnectionError as e: self._handle_db_conn_error(ip_port, local_key) LOG.exception( _LE("connection error while sending " "request to db: %(e)s") % {'e': e}) raise e except ResponseError as e: resp = str(e).split(' ') if 'ASK' in resp[0]: # one-time flag to force a node to serve a query about an # IMPORTING slot asking = True if 'ASK' in resp[0] or 'MOVE' in resp[0]: # MOVED/ASK XXX X.X.X.X:X # do redirection client = self._get_client(host=resp[2]) if client is None: # maybe there is a fast failover self._handle_db_conn_error(ip_port, local_key) LOG.exception( _LE("no client available: " "%(ip_port)s, %(e)s") % { 'ip_port': resp[2], 'e': e }) raise e else: LOG.exception(_LE("error not handled: %(e)s") % {'e': e}) raise e except Exception as e: self._handle_db_conn_error(ip_port, local_key) LOG.exception( _LE("exception while sending request to " "db: %(e)s") % {'e': e}) raise e
def router_deleted(self, lrouter_id): router = self.db_store.get_router(lrouter_id) if router is None: LOG.warning(_LW("Try to delete a nonexistent router(%s)"), lrouter_id) return LOG.info(_LI("Removing router = %s"), lrouter_id) self.open_flow_app.notify_delete_router(router) self.db_store.delete_router(lrouter_id)
def find_first_network(nclient, params): networks = nclient.list_networks(**params)['networks'] networks_count = len(networks) if networks_count == 0: return None if networks_count > 1: message = _LW("More than one network (%(count)d) found matching: " "%(args)s") LOG.warning(message % {'args': params, 'count': networks_count}) return networks[0]
def logical_switch_deleted(self, lswitch_id): lswitch = self.db_store.get_lswitch(lswitch_id) LOG.info(_LI("Removing Logical Switch = %s") % lswitch_id) if lswitch is None: LOG.warning(_LW("Try to delete a nonexistent lswitch(%s)") % lswitch_id) return self.open_flow_app.notify_remove_logical_switch(lswitch) self.db_store.del_lswitch(lswitch_id) self.db_store.del_network_id(lswitch_id)
def _find_first_network(self, **kwargs): networks = self.neutron.list_networks(**kwargs)['networks'] networks_count = len(networks) if networks_count == 0: return None if networks_count > 1: message = _LW("More than one network (%(count)d) found matching: " "%(args)s") LOG.warning(message % {'args': kwargs, 'count': networks_count}) return networks[0]
def logical_switch_deleted(self, lswitch_id): lswitch = self.db_store.get_lswitch(lswitch_id) LOG.info(_LI("Removing Logical Switch = %s") % lswitch_id) if lswitch is None: LOG.warning( _LW("Try to delete a nonexistent lswitch(%s)") % lswitch_id) return self.open_flow_app.notify_remove_logical_switch(lswitch) self.db_store.del_lswitch(lswitch_id) self.db_store.del_network_id(lswitch_id)
def _execute_cmd(self, oper, local_key, value=None): if not self._is_oper_valid(oper): LOG.warning(_LW("invalid oper: %(oper)s") % {'oper': oper}) return None ip_port = self.redis_mgt.get_ip_by_key(local_key) client = self._get_client(local_key) if client is None: return None arg = self._gen_args(local_key, value) ttl = self.RequestRetryTimes asking = False while ttl > 0: ttl -= 1 try: if asking: client.execute_command('ASKING') asking = False return client.execute_command(oper, *arg) except ConnectionError as e: self._handle_db_conn_error(ip_port, local_key) LOG.exception(_LE("connection error while sending " "request to db: %(e)s") % {'e': e}) raise e except ResponseError as e: resp = str(e).split(' ') if 'ASK' in resp[0]: # one-time flag to force a node to serve a query about an # IMPORTING slot asking = True if 'ASK' in resp[0] or 'MOVE' in resp[0]: # MOVED/ASK XXX X.X.X.X:X # do redirection client = self._get_client(host=resp[2]) if client is None: # maybe there is a fast failover self._handle_db_conn_error(ip_port, local_key) LOG.exception(_LE("no client available: " "%(ip_port)s, %(e)s") % {'ip_port': resp[2], 'e': e}) raise e else: LOG.exception(_LE("error not handled: %(e)s") % {'e': e}) raise e except Exception as e: self._handle_db_conn_error(ip_port, local_key) LOG.exception(_LE("exception while sending request to " "db: %(e)s") % {'e': e}) raise e
def _create_db_version_row(session, obj_id): try: row = models.DFVersionObjects(object_uuid=obj_id, version=0) session.add(row) session.flush() return 0 except db_exc.DBDuplicateEntry: LOG.warning( _LW('DuplicateEntry in Neutron DB when' 'create version for object_id:%(id)s'), {'id': obj_id}) return 0
def _check_nodes_change(self, old_nodes, new_nodes): changed = RET_CODE.NOT_CHANGE if len(old_nodes) < len(new_nodes): changed = RET_CODE.NODES_CHANGE elif len(old_nodes) == len(new_nodes): if 0 == len(old_nodes) and 0 == len(new_nodes): return changed cnt = 0 master_cnt = 0 slave_cnt = 0 slot_changed = False for host, info in six.iteritems(old_nodes): for new_host, new_info in six.iteritems(new_nodes): if host == new_host and info['role'] == \ new_info['role']: if info['slots'] != new_info['slots']: # scale-up reshard slot_changed = True cnt += 1 if new_info['role'] == 'master': master_cnt += 1 else: slave_cnt += 1 break if master_cnt != slave_cnt: # this means a tmp status # one master one slave changed = RET_CODE.NODES_CHANGE LOG.info(_LI("master nodes not equals to slave nodes")) else: if cnt != len(old_nodes): changed = RET_CODE.NODES_CHANGE elif slot_changed: changed = RET_CODE.SLOTS_CHANGE else: # This scenario can be considered as en exception and # should be recovered manually. Assumed that no scale-down in # cluster. # Do not have to notify changes. LOG.warning( _LW("redis cluster nodes less than local, " "maybe there is a partition in db " "cluster, nodes:%(new)s, " "local nodes:%(local)s") % { 'new': new_nodes, 'local': old_nodes }) return changed
def _create_db_version_row(session, obj_id): try: row = models.DFVersionObjects(object_uuid=obj_id, version=0) session.add(row) session.flush() return 0 except db_exc.DBDuplicateEntry: LOG.warning(_LW('DuplicateEntry in Neutron DB when' 'create version for object_id:%(id)s'), {'id': obj_id}) return 0
def run(self): cache = {} while True: try: eventlet.sleep(self._polling_time) cache = self._update_cache(cache) except Exception as e: LOG.warning(_LW("Error when polling table {}: {} {}").format( self._table_name, repr(e), sys.exc_info()[2], ))
def run(self): if self.multiproc_subscriber: self.multiproc_subscriber.daemonize() # TODO(oanson) TableMonitor daemonize will go here while True: try: event = self._queue.get() self.publisher.send_event(event) eventlet.sleep(0) except Exception as e: LOG.warning(_LW("Exception in main loop: {}, {}").format( e, traceback.format_exc() ))
def _update_db_version_row(session, obj_id): try: row = session.query(models.DFVersionObjects).filter_by( object_uuid=obj_id).one() new_version = row.version + 1 if new_version == sys.maxsize: new_version = 0 row.version = new_version session.merge(row) session.flush() return new_version except orm_exc.NoResultFound: LOG.warning(_LW('NoResultFound in Neutron DB when' 'update version for object_id:%(id)s'), {'id': obj_id}) return _create_db_version_row(session, obj_id)
def packet_in_handler(self, event): msg = event.msg pkt = ryu_packet.Packet(msg.data) is_pkt_ipv4 = pkt.get_protocol(ipv4.ipv4) is not None if is_pkt_ipv4: pkt_ip = pkt.get_protocol(ipv4.ipv4) else: LOG.error(_LE("No support for non IPv4 protocol")) return if pkt_ip is None: LOG.error(_LE("Received None IP Packet")) return port_tunnel_key = msg.match.get('metadata') if port_tunnel_key not in self.local_tunnel_to_pid_map: LOG.error( _LE("No lport found for tunnel_id %s for dhcp req"), port_tunnel_key) return (port_rate_limiter, ofport_num, lport_id) = self.local_tunnel_to_pid_map[port_tunnel_key] if port_rate_limiter(): self._block_port_dhcp_traffic( ofport_num, self.block_hard_timeout) LOG.warning(_LW("pass rate limit for %(port_id)s blocking DHCP" " traffic for %(time)s sec") % {'port_id': lport_id, 'time': self.block_hard_timeout}) return lport = self.db_store.get_port(lport_id) if lport is None: LOG.error( _LE("No lport found for tunnel_id %s for dhcp req"), port_tunnel_key) return try: self._handle_dhcp_request(msg, pkt, lport) except Exception as exception: LOG.exception(_LE( "Unable to handle packet %(msg)s: %(e)s") % {'msg': msg, 'e': exception} )
def packet_in_handler(self, event): msg = event.msg pkt = ryu_packet.Packet(msg.data) is_pkt_ipv4 = pkt.get_protocol(ipv4.ipv4) is not None if is_pkt_ipv4: pkt_ip = pkt.get_protocol(ipv4.ipv4) else: LOG.error(_LE("No support for non IpV4 protocol")) return if pkt_ip is None: LOG.error(_LE("Received None IP Packet")) return port_tunnel_key = msg.match.get('metadata') if port_tunnel_key not in self.local_tunnel_to_pid_map: LOG.error( _LE("No lport found for tunnel_id %s for dhcp req"), port_tunnel_key) return (port_rate_limiter, ofport_num, lport_id) = self.local_tunnel_to_pid_map[port_tunnel_key] if port_rate_limiter(): self._block_port_dhcp_traffic( ofport_num, self.block_hard_timeout) LOG.warning(_LW("pass rate limit for %(port_id)s blocking DHCP" " traffic for %(time)s sec") % {'port_id': lport_id, 'time': self.block_hard_timeout}) return lport = self.db_store.get_port(lport_id) if lport is None: LOG.error( _LE("No lport found for tunnel_id %s for dhcp req"), port_tunnel_key) return try: self._handle_dhcp_request(msg, pkt, lport) except Exception as exception: LOG.exception(_LE( "Unable to handle packet %(msg)s: %(e)s") % {'msg': msg, 'e': exception} )
def _update_db_version_row(session, obj_id): try: row = session.query( models.DFVersionObjects).filter_by(object_uuid=obj_id).one() new_version = row.version + 1 if new_version == sys.maxsize: new_version = 0 row.version = new_version session.merge(row) session.flush() return new_version except orm_exc.NoResultFound: LOG.warning( _LW('NoResultFound in Neutron DB when' 'update version for object_id:%(id)s'), {'id': obj_id}) return _create_db_version_row(session, obj_id)
def logical_port_updated(self, lport): if self.db_store.get_port(lport.get_id()) is not None: # TODO(gsagie) support updating port return if lport.get_chassis() is None or ( lport.get_chassis() == constants.DRAGONFLOW_VIRTUAL_PORT): return chassis_to_ofport, lport_to_ofport = ( self.vswitch_api.get_local_ports_to_ofport_mapping()) network = self.get_network_id( lport.get_lswitch_id(), lport.get_topic(), ) lport.set_external_value('local_network_id', network) if lport.get_chassis() == self.chassis_name: ofport = lport_to_ofport.get(lport.get_id(), 0) self.db_store.set_port(lport.get_id(), lport, True) if ofport != 0: lport.set_external_value('ofport', ofport) lport.set_external_value('is_local', True) LOG.info(_LI("Adding new local Logical Port = %s") % lport.__str__()) self.open_flow_app.notify_add_local_port(lport) self.db_store.set_port(lport.get_id(), lport, True) else: LOG.info(_LI("Logical Local Port %s was not created yet ") % lport.__str__()) else: ofport = chassis_to_ofport.get(lport.get_chassis(), 0) self.db_store.set_port(lport.get_id(), lport, False) if ofport != 0: lport.set_external_value('ofport', ofport) lport.set_external_value('is_local', False) LOG.info(_LI("Adding new remote Logical Port = %s") % lport.__str__()) self.open_flow_app.notify_add_remote_port(lport) self.db_store.set_port(lport.get_id(), lport, False) else: #TODO(gampel) add handling for this use case #remote port but no tunnel to remote Host #if this should never happen raise an exception LOG.warning(_LW("No tunnel for Logical Remote Port %s ") % lport.__str__())
def add_local_port(self, lport): network_id = lport.get_external_value('local_network_id') if self.get_datapath() is None: return if not netaddr.valid_ipv4(lport.get_ip()): LOG.warning(_LW("No support for non IPv4 protocol")) return lport_id = lport.get_id() tunnel_key = lport.get_tunnel_key() ofport = lport.get_external_value('ofport') port_rate_limiter = df_utils.RateLimiter( max_rate=self.conf.df_dhcp_max_rate_per_sec, time_unit=1) self.local_tunnel_to_pid_map[tunnel_key] = (port_rate_limiter, ofport, lport_id) if not self._is_dhcp_enabled_on_network(lport, network_id): return if not self._is_port_a_vm(lport): return LOG.info(_LI("Register VM as DHCP client::port <%s>") % lport.get_id()) ofport = lport.get_external_value('ofport') parser = self.get_datapath().ofproto_parser ofproto = self.get_datapath().ofproto match = parser.OFPMatch() match.set_in_port(ofport) actions = [] actions.append(parser.OFPActionSetField(metadata=tunnel_key)) actions.append(parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)) inst = [self.get_datapath().ofproto_parser.OFPInstructionActions( ofproto.OFPIT_APPLY_ACTIONS, actions)] self.mod_flow( self.get_datapath(), inst=inst, table_id=const.DHCP_TABLE, priority=const.PRIORITY_MEDIUM, match=match)
def add_local_port(self, lport): if self.get_datapath() is None: LOG.error(_LE("datapath is none")) return secgroups = lport.get_security_groups() if not secgroups: return if not netaddr.valid_ipv4(lport.get_ip()): LOG.warning(_LW("No support for non IPv4 protocol")) return for secgroup_id in secgroups: self._add_local_port_associating(lport, secgroup_id) # install ct table self._install_connection_track_flows(lport)
def _get_rule_flows_match_except_net_addresses(self, secgroup_rule): """ Create the match object for the security group rule given in secgroup_rule (type SecurityGroupRule). """ result_base = {} ethertype = secgroup_rule.get_ethertype() if ethertype == n_const.IPv4: result_base['eth_type'] = ether.ETH_TYPE_IP elif ethertype == n_const.IPv6: LOG.warning( _LW("IPv6 in security group rules is not yet supported") ) result_base['eth_type'] = ether.ETH_TYPE_IPV6 return [result_base] protocol_name = secgroup_rule.get_protocol() if not protocol_name: return [result_base] protocol = self._protocol_number_by_name(protocol_name) result_base["ip_proto"] = protocol port_range_min = secgroup_rule.get_port_range_min() port_range_max = secgroup_rule.get_port_range_max() if protocol == n_const.PROTO_NUM_ICMP: if port_range_min: result_base['icmpv4_type'] = int(port_range_min) if port_range_max: result_base['icmpv4_code'] = int(port_range_max) results = [result_base] elif ((not port_range_min and not port_range_max) or (int(port_range_min) == const.MIN_PORT and int(port_range_max) == const.MAX_PORT)): results = [result_base] else: port_range = netaddr.IPRange(port_range_min, port_range_max) key = DEST_FIELD_NAME_BY_PROTOCOL_NUMBER[protocol] results = [] for cidr in port_range.cidrs(): result = result_base.copy() port_num = int(cidr.network) & 0xffff mask = int(cidr.netmask) & 0xffff result[key] = (port_num, mask) results.append(result) return results
def remove_local_port(self, lport): if not netaddr.valid_ipv4(lport.get_ip()): LOG.warning(_LW("No support for non IPv4 protocol")) return tunnel_key = lport.get_tunnel_key() if tunnel_key in self.local_tunnel_to_pid_map: self.local_tunnel_to_pid_map.pop(tunnel_key, None) # Remove ingress classifier for port ofport = lport.get_external_value('ofport') parser = self.get_datapath().ofproto_parser ofproto = self.get_datapath().ofproto match = parser.OFPMatch() match.set_in_port(ofport) self.mod_flow( datapath=self.get_datapath(), table_id=const.DHCP_TABLE, command=ofproto.OFPFC_DELETE, priority=const.PRIORITY_MEDIUM, match=match)
def run(self): if self.multiproc_subscriber: self.multiproc_subscriber.daemonize() self.db.initialize( db_ip=cfg.CONF.df.remote_db_ip, db_port=cfg.CONF.df.remote_db_port, config=cfg.CONF.df ) self._register_as_publisher() self._start_db_table_monitors() while True: try: event = self._queue.get() self.publisher.send_event(event) if event.table != pub_sub_api.PUBLISHER_TABLE: self._update_timestamp_in_db() eventlet.sleep(0) except Exception as e: LOG.warning(_LW("Exception in main loop: {}, {}").format( e, traceback.format_exc() ))
def _vm_port_updated(self, ovs_port): lport_id = ovs_port.get_iface_id() lport = self._get_lport(lport_id) if lport is None: LOG.warning(_LW("No logical port found for ovs port: %s") % str(ovs_port)) return topic = lport.get_topic() self._add_to_topic_subscribed(topic, lport_id) # update lport, notify apps ovs_port_id = ovs_port.get_id() self.ovs_to_lport_mapping[ovs_port_id] = {'lport_id': lport_id, 'topic': topic} LOG.info(_LI("A local logical port(%s) is online") % str(lport)) try: self.controller.logical_port_updated(lport) except Exception: LOG.exception(_LE('Failed to process logical port online ' 'event: %s') % str(lport))
def _test_and_create_object(id): try: session = db_api.get_session() with session.begin(): row = session.query(models.DFLockedObjects).filter_by( object_uuid=id).one() # test ttl if row.lock and timeutils.is_older_than(row.created_at, cfg.CONF.df.distributed_lock_ttl): # reset the lock if it is timeout LOG.warning(_LW('The lock for object %(id)s is reset ' 'due to timeout.'), {'id': id}) _lock_free_update(session, id, lock_state=True, session_id=row.session_id) except orm_exc.NoResultFound: try: session = db_api.get_session() with session.begin(): _create_db_row(session, oid=id) except db_exc.DBDuplicateEntry: # the lock is concurrently created. pass
def _vm_port_updated(self, ovs_port): lport_id = ovs_port.get_iface_id() lport = self._get_lport(lport_id) if lport is None: LOG.warning(_LW("No lport found for ovs port %s ") % str(ovs_port)) return topic = lport.get_topic() self._add_to_topic_subscribed(topic, lport_id) # update lport, notify apps ovs_port_id = ovs_port.get_id() self.ovs_to_lport_mapping[ovs_port_id] = {'lport_id': lport_id, 'topic': topic} LOG.info(_LI("Adding new local Logical Port = %s") % str(lport)) try: self.controller.logical_port_updated(lport) except Exception: LOG.exception(_LE('App failed to process vm port online event %s') % str(lport)) finally: self.db_store.set_port(lport.get_id(), lport, True)
def _vm_port_updated(self, ovs_port): lport_id = ovs_port.get_iface_id() lport = self._get_lport(lport_id) if lport is None: LOG.warning( _LW("No logical port found for ovs port: %s") % str(ovs_port)) return topic = lport.get_topic() self._add_to_topic_subscribed(topic, lport_id) # update lport, notify apps ovs_port_id = ovs_port.get_id() self.ovs_to_lport_mapping[ovs_port_id] = { 'lport_id': lport_id, 'topic': topic } LOG.info(_LI("A local logical port(%s) is online") % str(lport)) try: self.controller.logical_port_updated(lport) except Exception: LOG.exception( _LE('Failed to process logical port online ' 'event: %s') % str(lport))
def apply_db_change(self, table, key, action, value): # determine if the action is allowed or not if action not in DB_ACTION_LIST: LOG.warning(_LW('Unknown action %(action)s for table ' '%(table)s'), {'action': action, 'table': table}) return if action == 'sync': self.controller.run_sync() return if 'secgroup' == table: if action == 'set' or action == 'create': secgroup = SecurityGroup(value) self.controller.security_group_updated(secgroup) elif action == 'delete': secgroup_id = key self.controller.security_group_deleted(secgroup_id) elif 'lport' == table: if action == 'set' or action == 'create': lport = LogicalPort(value) self.controller.logical_port_updated(lport) elif action == 'delete': lport_id = key self.controller.logical_port_deleted(lport_id) elif 'lrouter' == table: if action == 'set' or action == 'create': lrouter = LogicalRouter(value) self.controller.router_updated(lrouter) elif action == 'delete': lrouter_id = key self.controller.router_deleted(lrouter_id) elif 'chassis' == table: if action == 'set' or action == 'create': chassis = Chassis(value) self.controller.chassis_created(chassis) elif action == 'delete': chassis_id = key self.controller.chassis_deleted(chassis_id) elif 'lswitch' == table: if action == 'set' or action == 'create': lswitch = LogicalSwitch(value) self.controller.logical_switch_updated(lswitch) elif action == 'delete': lswitch_id = key self.controller.logical_switch_deleted(lswitch_id) elif 'floatingip' == table: if action == 'set' or action == 'create': floatingip = Floatingip(value) self.controller.floatingip_updated(floatingip) elif action == 'delete': floatingip_id = key self.controller.floatingip_deleted(floatingip_id) elif pub_sub_api.PUBLISHER_TABLE == table: if action == 'set' or action == 'create': publisher = Publisher(value) self.controller.publisher_updated(publisher) elif action == 'delete': self.controller.publisher_deleted(key) elif 'ovsinterface' == table: if action == 'set' or action == 'create': ovs_port = OvsPort(value) self.controller.ovs_port_updated(ovs_port) elif action == 'sync_finished': self.controller.ovs_sync_finished() elif action == 'sync_started': self.controller.ovs_sync_started() elif action == 'delete': ovs_port_id = key self.controller.ovs_port_deleted(ovs_port_id) elif 'log' == action: message = _LI( 'Log event (Info): ' 'table: %(table)s ' 'key: %(key)s ' 'action: %(action)s ' 'value: %(value)s' ) LOG.info(message, { 'table': str(table), 'key': str(key), 'action': str(action), 'value': str(value), }) else: LOG.warning(_LW('Unknown table %s'), table)
def _logical_port_process(self, lport, original_lport=None): chassis = lport.get_chassis() local_network_id = self.get_network_id(lport.get_lswitch_id(), ) lswitch = self.db_store.get_lswitch(lport.get_lswitch_id()) if lswitch is not None: network_type = lswitch.get_network_type() segment_id = lswitch.get_segment_id() physical_network = lswitch.get_physical_network() lport.set_external_value('network_type', network_type) if segment_id is not None: lport.set_external_value('segmentation_id', int(segment_id)) if physical_network: lport.set_external_value('physical_network', physical_network) lport.set_external_value('local_network_id', local_network_id) if chassis == self.chassis_name: lport.set_external_value('is_local', True) ofport = self.vswitch_api.get_port_ofport_by_id(lport.get_id()) if ofport: lport.set_external_value('ofport', ofport) self.db_store.set_port(lport.get_id(), lport, True) if original_lport is None: LOG.info( _LI("Adding new local logical port = %s") % str(lport)) self.open_flow_app.notify_add_local_port(lport) else: LOG.info( _LI("Updating local logical port = %(port)s, " "original port = %(original_port)s") % { 'port': str(lport), 'original_port': str(original_lport) }) self.open_flow_app.notify_update_local_port( lport, original_lport) else: LOG.info( _LI("Local logical port %s was not created yet") % str(lport)) else: lport.set_external_value('is_local', False) ofport = self.vswitch_api.get_chassis_ofport(chassis) if ofport: lport.set_external_value('ofport', ofport) self.db_store.set_port(lport.get_id(), lport, False) if original_lport is None: LOG.info( _LI("Adding new remote logical port = %s") % str(lport)) self.open_flow_app.notify_add_remote_port(lport) else: LOG.info( _LI("Updating remote logical port = %(port)s, " "original port = %(original_port)s") % { 'port': str(lport), 'original_port': str(original_lport) }) self.open_flow_app.notify_update_remote_port( lport, original_lport) else: # TODO(gampel) add handling for this use case # remote port but no tunnel to remote Host # if this should never happen raise an exception LOG.warning( _LW("No tunnel for remote logical port %s") % str(lport))
def apply_db_change(self, table, key, action, value): # determine if the action is allowed or not if action not in DB_ACTION_LIST: LOG.warning( _LW('Unknown action %(action)s for table ' '%(table)s'), { 'action': action, 'table': table }) return if action == 'sync': self.controller.run_sync() return if 'secgroup' == table: if action == 'set' or action == 'create': secgroup = SecurityGroup(value) self.controller.security_group_updated(secgroup) elif action == 'delete': secgroup_id = key self.controller.security_group_deleted(secgroup_id) elif 'lport' == table: if action == 'set' or action == 'create': lport = LogicalPort(value) self.controller.logical_port_updated(lport) elif action == 'delete': lport_id = key self.controller.logical_port_deleted(lport_id) elif 'lrouter' == table: if action == 'set' or action == 'create': lrouter = LogicalRouter(value) self.controller.router_updated(lrouter) elif action == 'delete': lrouter_id = key self.controller.router_deleted(lrouter_id) elif 'chassis' == table: if action == 'set' or action == 'create': chassis = Chassis(value) self.controller.chassis_created(chassis) elif action == 'delete': chassis_id = key self.controller.chassis_deleted(chassis_id) elif 'lswitch' == table: if action == 'set' or action == 'create': lswitch = LogicalSwitch(value) self.controller.logical_switch_updated(lswitch) elif action == 'delete': lswitch_id = key self.controller.logical_switch_deleted(lswitch_id) elif 'floatingip' == table: if action == 'set' or action == 'create': floatingip = Floatingip(value) self.controller.floatingip_updated(floatingip) elif action == 'delete': floatingip_id = key self.controller.floatingip_deleted(floatingip_id) elif pub_sub_api.PUBLISHER_TABLE == table: if action == 'set' or action == 'create': publisher = Publisher(value) self.controller.publisher_updated(publisher) elif action == 'delete': self.controller.publisher_deleted(key) elif 'ovsinterface' == table: if action == 'set' or action == 'create': ovs_port = OvsPort(value) self.controller.ovs_port_updated(ovs_port) elif action == 'sync_finished': self.controller.ovs_sync_finished() elif action == 'sync_started': self.controller.ovs_sync_started() elif action == 'delete': ovs_port_id = key self.controller.ovs_port_deleted(ovs_port_id) elif 'log' == action: message = _LI('Log event (Info): ' 'table: %(table)s ' 'key: %(key)s ' 'action: %(action)s ' 'value: %(value)s') LOG.info( message, { 'table': str(table), 'key': str(key), 'action': str(action), 'value': str(value), }) else: LOG.warning(_LW('Unknown table %s'), table)
def register_hamsg_for_db(self): if self.redis_mgt is not None: self.redis_mgt.register_ha_topic() else: LOG.warning(_LW("redis mgt is none"))
def run(self): while True: eventlet.sleep(0) try: if self.pub_sub is not None: for data in self.pub_sub.listen(): if 'subscribe' == data['type']: continue elif 'unsubscribe' == data['type']: continue elif 'message' == data['type']: message = pub_sub_api.unpack_message(data['data']) if message['table'] != 'ha': self.db_changes_callback( message['table'], message['key'], message['action'], message['value'], message['topic']) else: # redis ha message value = jsonutils.loads(message['value']) self.redis_mgt.redis_failover_callback( value) else: LOG.warning(_LW("receive unknown message in " "subscriber %(type)s"), {'type': data['type']}) else: LOG.warning(_LW("pubsub lost connection %(ip)s:" "%(port)s"), {'ip': self.ip, 'port': self.plugin_updates_port}) eventlet.sleep(1) except Exception as e: LOG.warning(_LW("subscriber listening task lost " "connection " "%(e)s"), {'e': e}) try: connection = self.pub_sub.connection connection.connect() self.pub_sub.on_connect(connection) # self.db_changes_callback(None, None, 'sync', None, None) # notify restart self.db_changes_callback(None, None, 'dbrestart', False, None) except Exception: self.redis_mgt.remove_node_from_master_list(self.remote) self._update_client() # if pubsub not none notify restart if self.remote is not None: # to re-subscribe self.register_hamsg_for_db() self.db_changes_callback(None, None, 'dbrestart', True, None) else: LOG.warning(_LW("there is no more db node " "available")) LOG.exception(_LE("reconnect error %(ip)s:%(port)s"), {'ip': self.ip, 'port': self.plugin_updates_port})