def get_key(self, table, key, topic=None): if topic is None: local_key = self.uuid_to_key(table, key, '*') try: for client in six.itervalues(self.clients): local_keys = client.keys(local_key) if len(local_keys) == 1: return client.get(local_keys[0]) except Exception as e: LOG.exception( _LE("exception %(key)s: %(e)s") % { 'key': local_key, 'e': e }) raise df_exceptions.DBKeyNotFound(key=local_key) else: local_key = self.uuid_to_key(table, key, topic) try: client = self._get_client(local_key) # return nil if not found return client.get(local_key) except Exception as e: LOG.exception( _LE("exception %(key)s: %(e)s") % { 'key': local_key, 'e': e }) raise df_exceptions.DBKeyNotFound(key=local_key)
def send_event(self, update, topic=None): if topic: update.topic = topic local_topic = update.topic event_json = jsonutils.dumps(update.to_dict()) local_topic = local_topic.encode('utf8') data = pub_sub_api.pack_message(event_json) try: if self.client is not None: self.client.publish(local_topic, data) except Exception as e: LOG.exception( _LE("publish connection get exception " "%(e)s") % {'e': e}) self.redis_mgt.remove_node_from_master_list(self.remote) self._update_client() try: if self.client is not None: self.client.publish(local_topic, data) except Exception: self.redis_mgt.remove_node_from_master_list(self.remote) self._update_client() LOG.exception( _LE("publish error remote:%(remote)s") % {'remote': self.remote})
def ovs_port_deleted(self, ovs_port_id): """ Changes in ovs port status will be monitored by ovsdb monitor thread and notified to topology. This method is the entrance port to process port offline event @param ovs_port_id: @return : None """ ovs_port = self.ovs_ports.get(ovs_port_id) if ovs_port is None: return port_type = ovs_port.get_type() if port_type not in self.ovs_port_type: LOG.error(_LE("Unknown port offline: %s") % str(ovs_port)) return handler_name = '_' + port_type + '_port_deleted' try: handler = getattr(self, handler_name, None) if handler is not None: handler(ovs_port) else: LOG(_LI("%s is None.") % handler_name) except Exception: LOG.exception(_LE("Exception occurred when handling " "ovs port update event")) finally: del self.ovs_ports[ovs_port_id]
def get_all_entries(self, table, topic=None): res = [] ip_port = None if topic is None: local_key = self.uuid_to_key(table, '*', '*') try: for host, client in six.iteritems(self.clients): local_keys = client.keys(local_key) if len(local_keys) > 0: for tmp_key in local_keys: res.append(self._execute_cmd("GET", tmp_key)) return res except Exception: LOG.exception(_LE("exception when get_all_entries: " "%(key)s ") % {'key': local_key}) else: local_key = self.uuid_to_key(table, '*', topic) try: ip_port = self.redis_mgt.get_ip_by_key(local_key) client = self._get_client(local_key) if client is None: return res local_keys = client.keys(local_key) if len(local_keys) > 0: res.extend(client.mget(local_keys)) return res except Exception as e: self._handle_db_conn_error(ip_port, local_key) LOG.exception(_LE("exception when mget: %(key)s, %(e)s") % {'key': local_key, 'e': e})
def get_all_keys(self, table, topic=None): if topic is None: res = [] local_key = self.uuid_to_key(table, '*', '*') try: for client in six.itervalues(self.clients): res.extend(client.keys(local_key)) return [self._strip_table_name_from_key(key) for key in res] except Exception as e: LOG.exception( _LE("exception %(key)s: %(e)s") % { 'key': local_key, 'e': e }) raise df_exceptions.DBKeyNotFound(key=local_key) else: local_key = self.uuid_to_key(table, '*', topic) try: client = self._get_client(local_key) res = client.keys(local_key) return [self._strip_table_name_from_key(key) for key in res] except Exception as e: LOG.exception( _LE("exception %(key)s: %(e)s") % { 'key': local_key, 'e': e }) raise df_exceptions.DBKeyNotFound(key=local_key)
def ovs_port_deleted(self, ovs_port_id): """ Changes in ovs port status will be monitored by ovsdb monitor thread and notified to topology. This method is the entrance port to process port offline event @param ovs_port_id: @return : None """ ovs_port = self.ovs_ports.get(ovs_port_id) if ovs_port is None: return port_type = ovs_port.get_type() if port_type not in self.ovs_port_type: LOG.error(_LE("Unknown port offline: %s") % str(ovs_port)) return handler_name = '_' + port_type + '_port_deleted' try: handler = getattr(self, handler_name, None) if handler is not None: handler(ovs_port) else: LOG(_LI("%s is None.") % handler_name) except Exception: LOG.exception( _LE("Exception occurred when handling " "ovs port update event")) finally: del self.ovs_ports[ovs_port_id]
def get_all_entries(self, table, topic=None): res = [] if topic is None: local_key = self.uuid_to_key(table, '*', '*') try: for client in six.itervalues(self.clients): local_keys = client.keys(local_key) if len(local_keys) > 0: for tmp_key in local_keys: res.append(client.get(tmp_key)) return res except Exception as e: LOG.exception( _LE("exception %(key)s: %(e)s") % { 'key': local_key, 'e': e }) raise df_exceptions.DBKeyNotFound(key=local_key) else: local_key = self.uuid_to_key(table, '*', topic) try: client = self._get_client(local_key) local_keys = client.keys(local_key) if len(local_keys) > 0: res.extend(client.mget(local_keys)) return res except Exception as e: LOG.exception( _LE("exception %(key)s: %(e)s") % { 'key': local_key, 'e': e }) raise df_exceptions.DBKeyNotFound(key=local_key)
def get_all_entries(self, table, topic=None): res = [] if topic is None: local_key = self.uuid_to_key(table, '*', '*') try: for client in six.itervalues(self.clients): local_keys = client.keys(local_key) if len(local_keys) > 0: for tmp_key in local_keys: res.append(client.get(tmp_key)) return res except Exception as e: LOG.exception(_LE("exception %(key)s: %(e)s") % {'key': local_key, 'e': e}) raise df_exceptions.DBKeyNotFound(key=local_key) else: local_key = self.uuid_to_key(table, '*', topic) try: client = self._get_client(local_key) local_keys = client.keys(local_key) if len(local_keys) > 0: res.extend(client.mget(local_keys)) return res except Exception as e: LOG.exception(_LE("exception %(key)s: %(e)s") % {'key': local_key, 'e': e}) raise df_exceptions.DBKeyNotFound(key=local_key)
def get_all_keys(self, table, topic=None): res = [] ip_port = None if topic is None: local_key = self.uuid_to_key(table, '*', '*') try: for host, client in six.iteritems(self.clients): ip_port = host res.extend(client.keys(local_key)) return [self._strip_table_name_from_key(key) for key in res] except Exception as e: self._handle_db_conn_error(ip_port, local_key) LOG.exception(_LE("exception when get_all_keys: " "%(key)s, %(e)s") % {'key': local_key, 'e': e}) else: local_key = self.uuid_to_key(table, '*', topic) try: ip_port = self.redis_mgt.get_ip_by_key(local_key) client = self._get_client(local_key) if client is None: return res res = client.keys(local_key) return [self._strip_table_name_from_key(key) for key in res] except Exception as e: self._handle_db_conn_error(ip_port, local_key) LOG.exception(_LE("exception when get_all_keys: " "%(key)s, %(e)s") % {'key': local_key, 'e': e})
def _execute_cmd(self, oper, local_key, value=None): if not self._is_oper_valid(oper): LOG.warning(_LW("invalid oper: %(oper)s") % {'oper': oper}) return None ip_port = self.redis_mgt.get_ip_by_key(local_key) client = self._get_client(local_key) if client is None: return None arg = self._gen_args(local_key, value) ttl = self.RequestRetryTimes asking = False while ttl > 0: ttl -= 1 try: if asking: client.execute_command('ASKING') asking = False return client.execute_command(oper, *arg) except ConnectionError as e: self._handle_db_conn_error(ip_port, local_key) LOG.exception( _LE("connection error while sending " "request to db: %(e)s") % {'e': e}) raise e except ResponseError as e: resp = str(e).split(' ') if 'ASK' in resp[0]: # one-time flag to force a node to serve a query about an # IMPORTING slot asking = True if 'ASK' in resp[0] or 'MOVE' in resp[0]: # MOVED/ASK XXX X.X.X.X:X # do redirection client = self._get_client(host=resp[2]) if client is None: # maybe there is a fast failover self._handle_db_conn_error(ip_port, local_key) LOG.exception( _LE("no client available: " "%(ip_port)s, %(e)s") % { 'ip_port': resp[2], 'e': e }) raise e else: LOG.exception(_LE("error not handled: %(e)s") % {'e': e}) raise e except Exception as e: self._handle_db_conn_error(ip_port, local_key) LOG.exception( _LE("exception while sending request to " "db: %(e)s") % {'e': e}) raise e
def _execute_cmd(self, oper, local_key, value=None): if not self._is_oper_valid(oper): LOG.warning(_LW("invalid oper: %(oper)s") % {'oper': oper}) return None ip_port = self.redis_mgt.get_ip_by_key(local_key) client = self._get_client(local_key) if client is None: return None arg = self._gen_args(local_key, value) ttl = self.RequestRetryTimes asking = False while ttl > 0: ttl -= 1 try: if asking: client.execute_command('ASKING') asking = False return client.execute_command(oper, *arg) except ConnectionError as e: self._handle_db_conn_error(ip_port, local_key) LOG.exception(_LE("connection error while sending " "request to db: %(e)s") % {'e': e}) raise e except ResponseError as e: resp = str(e).split(' ') if 'ASK' in resp[0]: # one-time flag to force a node to serve a query about an # IMPORTING slot asking = True if 'ASK' in resp[0] or 'MOVE' in resp[0]: # MOVED/ASK XXX X.X.X.X:X # do redirection client = self._get_client(host=resp[2]) if client is None: # maybe there is a fast failover self._handle_db_conn_error(ip_port, local_key) LOG.exception(_LE("no client available: " "%(ip_port)s, %(e)s") % {'ip_port': resp[2], 'e': e}) raise e else: LOG.exception(_LE("error not handled: %(e)s") % {'e': e}) raise e except Exception as e: self._handle_db_conn_error(ip_port, local_key) LOG.exception(_LE("exception while sending request to " "db: %(e)s") % {'e': e}) raise e
def packet_in_handler(self, event): msg = event.msg pkt = ryu_packet.Packet(msg.data) is_pkt_ipv4 = pkt.get_protocol(ipv4.ipv4) is not None if is_pkt_ipv4: pkt_ip = pkt.get_protocol(ipv4.ipv4) else: LOG.error(_LE("No support for non IpV4 protocol")) return if pkt_ip is None: LOG.error(_LE("Received None IP Packet")) return port_tunnel_key = msg.match.get('metadata') if port_tunnel_key not in self.local_tunnel_to_pid_map: LOG.error( _LE("No lport found for tunnel_id %s for dhcp req"), port_tunnel_key) return (port_rate_limiter, ofport_num, lport_id) = self.local_tunnel_to_pid_map[port_tunnel_key] if port_rate_limiter(): self._block_port_dhcp_traffic( ofport_num, self.block_hard_timeout) LOG.warning(_LW("pass rate limit for %(port_id)s blocking DHCP" " traffic for %(time)s sec") % {'port_id': lport_id, 'time': self.block_hard_timeout}) return lport = self.db_store.get_port(lport_id) if lport is None: LOG.error( _LE("No lport found for tunnel_id %s for dhcp req"), port_tunnel_key) return try: self._handle_dhcp_request(msg, pkt, lport) except Exception as exception: LOG.exception(_LE( "Unable to handle packet %(msg)s: %(e)s") % {'msg': msg, 'e': exception} )
def packet_in_handler(self, event): msg = event.msg pkt = ryu_packet.Packet(msg.data) is_pkt_ipv4 = pkt.get_protocol(ipv4.ipv4) is not None if is_pkt_ipv4: pkt_ip = pkt.get_protocol(ipv4.ipv4) else: LOG.error(_LE("No support for non IPv4 protocol")) return if pkt_ip is None: LOG.error(_LE("Received None IP Packet")) return port_tunnel_key = msg.match.get('metadata') if port_tunnel_key not in self.local_tunnel_to_pid_map: LOG.error( _LE("No lport found for tunnel_id %s for dhcp req"), port_tunnel_key) return (port_rate_limiter, ofport_num, lport_id) = self.local_tunnel_to_pid_map[port_tunnel_key] if port_rate_limiter(): self._block_port_dhcp_traffic( ofport_num, self.block_hard_timeout) LOG.warning(_LW("pass rate limit for %(port_id)s blocking DHCP" " traffic for %(time)s sec") % {'port_id': lport_id, 'time': self.block_hard_timeout}) return lport = self.db_store.get_port(lport_id) if lport is None: LOG.error( _LE("No lport found for tunnel_id %s for dhcp req"), port_tunnel_key) return try: self._handle_dhcp_request(msg, pkt, lport) except Exception as exception: LOG.exception(_LE( "Unable to handle packet %(msg)s: %(e)s") % {'msg': msg, 'e': exception} )
def pack_message(message): data = None try: data = msgpack.packb(message, encoding='utf-8') except Exception: LOG.exception(_LE("Error in pack_message: ")) return data
def run(self): while True: eventlet.sleep(0) try: for data in self.pub_sub.listen(): if 'subscribe' == data['type']: continue if 'unsubscribe' == data['type']: continue if 'message' == data['type']: entry = pub_sub_api.unpack_message(data['data']) entry_json = jsonutils.loads(entry) self.db_changes_callback(entry_json['table'], entry_json['key'], entry_json['action'], entry_json['value'], entry_json['topic']) except Exception as e: LOG.warning(e) try: connection = self.pub_sub.connection connection.connect() self.db_changes_callback(None, None, 'sync', None, None) except Exception as e: LOG.exception( _LE("reconnect error %(ip)s:%(port)s") % { 'ip': self.ip, 'port': self.plugin_updates_port })
def wrapper(*args, **kwargs): next_interval = self.retry_interval remaining = self.max_retries while True: try: return f(*args, **kwargs) except Exception as e: with excutils.save_and_reraise_exception() as ectxt: if remaining > 0: ectxt.reraise = not self._is_exception_expected(e) else: LOG.exception( _LE('Function exceeded ' 'retry limit.')) LOG.debug("Performing retry for function %s", reflection.get_callable_name(f)) # NOTE(vsergeyev): We are using patched time module, so # this effectively yields the execution # context to another green thread. time.sleep(next_interval) if self.inc_retry_interval: next_interval = min(next_interval * 2, self.max_retry_interval) remaining -= 1
def _get_rule_flows_match_except_net_addresses(secgroup_rule): protocol = secgroup_rule.get_protocol() port_range_max = secgroup_rule.get_port_range_max() port_range_min = secgroup_rule.get_port_range_min() ethertype = secgroup_rule.get_ethertype() match_list = [] dl_type_match = {} protocol_match = {} port_match_list = [{}] if ethertype == 'IPv4': dl_type_match["eth_type"] = ether.ETH_TYPE_IP if protocol is not None: if protocol == 'icmp': protocol = 1 elif protocol == 'tcp': protocol = 6 elif protocol == 'udp': protocol = 17 else: protocol = int(protocol) protocol_match["ip_proto"] = protocol port_match_name = SGApp._get_port_match_name(protocol) if (port_range_min is not None) and \ (port_match_name is not None): port_match_list = [] if protocol == 1: icmpv4_match = {port_match_name: int(port_range_min)} if port_range_max is not None: icmpv4_match["icmpv4_code"] = int(port_range_max) port_match_list.append(icmpv4_match) elif (int(port_range_min) == 1 and int(port_range_max) == 65535): port_match_list.append(protocol_match) else: split_port_range = SGApp._split_range( int(port_range_min), int(port_range_max), 0xffff ) for port_item in split_port_range: port_match_list.append( {port_match_name: SGApp._get_port_range_match(port_item)} ) elif ethertype == 'IPv6': # not support yet dl_type_match["eth_type"] = ether.ETH_TYPE_IPV6 else: LOG.error(_LE("wrong ethernet type")) for port_match in port_match_list: parameters_merge = dl_type_match.copy() parameters_merge.update(protocol_match) parameters_merge.update(port_match) match_list.append(parameters_merge) return match_list
def allocate_unique_key(self): try: return self._allocate_unique_key() except Exception as e: LOG.error(_LE("allocate_unique_key exception: %(e)s") % {'e': e}) return
def _vm_port_deleted(self, ovs_port): ovs_port_id = ovs_port.get_id() lport_id = ovs_port.get_iface_id() lport = self.db_store.get_port(lport_id) if lport is None: lport = self.ovs_to_lport_mapping.get(ovs_port_id) if lport is None: return topic = lport.get('topic') del self.ovs_to_lport_mapping[ovs_port_id] self._del_from_topic_subscribed(topic, lport_id) return topic = lport.get_topic() LOG.info(_LI("The logical port(%s) is offline") % str(lport)) try: self.controller.logical_port_deleted(lport_id) except Exception: LOG.exception( _LE('Failed to process logical port offline event %s') % lport_id) finally: # TODO(duankebo) publish vm port offline later # currently we will not publish vm port offline event. # lport = self.nb_api.get_logical_port(lport_id) # if lport.get_chassis() == self.chassis_name: # self.nb_api.update_lport(lport.get_id(), chassis=None, # status='DOWN') del self.ovs_to_lport_mapping[ovs_port_id] self._del_from_topic_subscribed(topic, lport_id)
def update_local_port(self, lport, original_lport): if self.get_datapath() is None: LOG.error(_LE("datapath is none")) return secgroups = lport.get_security_groups() original_secgroups = original_lport.get_security_groups() added_secgroups, removed_secgroups = \ self._get_added_and_removed_secgroups(secgroups, original_secgroups) if self._is_security_groups_none_or_empty(secgroups) and \ (not self._is_security_groups_none_or_empty( original_secgroups)): # uninstall ct table self._uninstall_connection_track_flows(lport) for secgroup_id in added_secgroups: self._add_local_port_associating(lport, secgroup_id) for secgroup_id in removed_secgroups: self._remove_local_port_associating(lport, secgroup_id) if (not self._is_security_groups_none_or_empty(secgroups)) and \ self._is_security_groups_none_or_empty(original_secgroups): # install ct table self._install_connection_track_flows(lport)
def remove_security_group_rule(self, secgroup, secgroup_rule): LOG.info( _LI("remove a rule %(rule)s to security group %(secgroup)s") % { 'rule': secgroup_rule, 'secgroup': secgroup.get_id() }) if self.get_datapath() is None: LOG.error(_LE("datapath is none")) return conj_id, priority = \ self._get_secgroup_conj_id_and_priority(secgroup.get_id()) if conj_id is None: # this security group wasn't associated with a local port LOG.info( _LI("this security group %s wasn't associated with" " a local port"), secgroup.get_id()) return # update the record of rules each of which specifies a same security # group as its parameter of remote group. remote_group_id = secgroup_rule.get_remote_group_id() if remote_group_id is not None: associate_rules = self.remote_secgroup_ref.get(remote_group_id) if associate_rules is not None: del associate_rules[secgroup_rule.get_id()] if len(associate_rules) == 0: del self.remote_secgroup_ref[remote_group_id] self._uninstall_security_group_rule_flows(secgroup_rule)
def _uninstall_associating_flow_by_direction(self, security_group_id, lport, direction): parser = self.get_datapath().ofproto_parser ofproto = self.get_datapath().ofproto if direction == 'ingress': table_id = const.INGRESS_SECURITY_GROUP_TABLE tunnel_key = lport.get_tunnel_key() lport_classify_match = {"reg7": tunnel_key} else: table_id = const.EGRESS_SECURITY_GROUP_TABLE ofport = lport.get_external_value('ofport') lport_classify_match = {"in_port": ofport} conj_id, priority = \ self._get_secgroup_conj_id_and_priority(security_group_id) if conj_id is None: LOG.error(_LE("the conj_id of the security group %s is none"), security_group_id) return match = parser.OFPMatch(ct_state=(const.CT_STATE_TRK | const.CT_STATE_NEW, SG_CT_STATE_MASK), **lport_classify_match) self.mod_flow( datapath=self.get_datapath(), table_id=table_id, priority=priority, match=match, command=ofproto.OFPFC_DELETE_STRICT, out_port=ofproto.OFPP_ANY, out_group=ofproto.OFPG_ANY)
def _uninstall_associating_flow_by_direction(self, security_group_id, lport, direction): parser = self.get_datapath().ofproto_parser ofproto = self.get_datapath().ofproto if direction == 'ingress': table_id = const.INGRESS_SECURITY_GROUP_TABLE tunnel_key = lport.get_tunnel_key() lport_classify_match = {"reg7": tunnel_key} else: table_id = const.EGRESS_SECURITY_GROUP_TABLE ofport = lport.get_external_value('ofport') lport_classify_match = {"in_port": ofport} conj_id, priority = \ self._get_secgroup_conj_id_and_priority(security_group_id) if conj_id is None: LOG.error(_LE("the conj_id of the security group %s is none"), security_group_id) return match = parser.OFPMatch( ct_state=(const.CT_STATE_TRK | const.CT_STATE_NEW, SG_CT_STATE_MASK), **lport_classify_match) self.mod_flow(datapath=self.get_datapath(), table_id=table_id, priority=priority, match=match, command=ofproto.OFPFC_DELETE_STRICT)
def _uninstall_security_group_permit_flow_by_direction(self, security_group_id, direction): if direction == 'ingress': table_id = const.INGRESS_SECURITY_GROUP_TABLE else: table_id = const.EGRESS_SECURITY_GROUP_TABLE parser = self.get_datapath().ofproto_parser ofproto = self.get_datapath().ofproto conj_id, priority = \ self._get_secgroup_conj_id_and_priority(security_group_id) if conj_id is None: LOG.error(_LE("the conj_id of the security group %s is none"), security_group_id) return match = parser.OFPMatch(eth_type=ether.ETH_TYPE_IP, conj_id=conj_id) self.mod_flow( datapath=self.get_datapath(), table_id=table_id, match=match, command=ofproto.OFPFC_DELETE, out_port=ofproto.OFPP_ANY, out_group=ofproto.OFPG_ANY)
def add_remote_port(self, lport): if self.get_datapath() is None: LOG.error(_LE("datapath is none")) return secgroups = lport.get_security_groups() if secgroups is None: return ip = lport.get_ip() for secgroup_id in secgroups: # update the record of aggregate addresses of ports associated # with this security group. aggregate_addresses_range = \ self.secgroup_aggregate_addresses.get(secgroup_id) if aggregate_addresses_range is None: aggregate_addresses_range = [] new_cidr_array, added_cidr, removed_cidr =\ SGApp._add_one_address( aggregate_addresses_range, SGApp._get_integer_value_from_address(ip) ) self.secgroup_aggregate_addresses[secgroup_id] = new_cidr_array # update the flows representing those rules each of which specifies # this security group as its parameter of remote group. secrules = self.remote_secgroup_ref.get(secgroup_id) if secrules is not None: for rule_info in secrules.values(): self._update_security_group_rule_flows_by_addresses( rule_info.security_group_id, rule_info, added_cidr, removed_cidr )
def unpack_message(message): entry = None try: entry = msgpack.unpackb(message, encoding='utf-8') except Exception: LOG.exception(_LE("Error in unpack_message: ")) return entry
def remove_security_group_rule(self, secgroup, secgroup_rule): LOG.info(_LI("remove a rule %(rule)s to security group %(secgroup)s") % {'rule': secgroup_rule, 'secgroup': secgroup.name}) if self.get_datapath() is None: LOG.error(_LE("datapath is none")) return conj_id, priority = \ self._get_secgroup_conj_id_and_priority(secgroup.name) if conj_id is None: # this security group wasn't associated with a local port LOG.info(_LI("this security group %s wasn't associated with" " a local port"), secgroup.name) return # update the record of rules each of which specifies a same security # group as its parameter of remote group. remote_group_id = secgroup_rule.remote_group_id if remote_group_id is not None: associate_rules = self.remote_secgroup_ref.get(remote_group_id) if associate_rules is not None: del associate_rules[secgroup_rule.id] if len(associate_rules) == 0: del self.remote_secgroup_ref[remote_group_id] self._uninstall_security_group_rule_flows(secgroup_rule)
def _handle_db_conn_error(self, ip_port, local_key=None): self.redis_mgt.remove_node_from_master_list(ip_port) self._update_server_list() if local_key is not None: LOG.exception(_LE("update server list, key: %(key)s") % {'key': local_key})
def _get_rule_flows_match_except_net_addresses(secgroup_rule): protocol = secgroup_rule.protocol port_range_max = secgroup_rule.port_range_max port_range_min = secgroup_rule.port_range_min ethertype = secgroup_rule.ethertype match_list = [] dl_type_match = {} protocol_match = {} port_match_list = [{}] if ethertype == 'IPv4': dl_type_match["eth_type"] = ether.ETH_TYPE_IP if protocol is not None: if protocol == 'icmp': protocol = 1 elif protocol == 'tcp': protocol = 6 elif protocol == 'udp': protocol = 17 else: protocol = int(protocol) protocol_match["ip_proto"] = protocol port_match_name = SGApp._get_port_match_name(protocol) if (port_range_min is not None) and \ (port_match_name is not None): port_match_list = [] if protocol == 1: icmpv4_match = {port_match_name: int(port_range_min)} if port_range_max is not None: icmpv4_match["icmpv4_code"] = int(port_range_max) port_match_list.append(icmpv4_match) elif (int(port_range_min) == 1 and int(port_range_max) == 65535): port_match_list.append(protocol_match) else: split_port_range = SGApp._split_range( int(port_range_min), int(port_range_max), 0xffff ) for port_item in split_port_range: port_match_list.append( {port_match_name: SGApp._get_port_range_match(port_item)} ) elif ethertype == 'IPv6': # not support yet dl_type_match["eth_type"] = ether.ETH_TYPE_IPV6 else: LOG.error(_LE("wrong ethernet type")) for port_match in port_match_list: parameters_merge = dl_type_match.copy() parameters_merge.update(protocol_match) parameters_merge.update(port_match) match_list.append(parameters_merge) return match_list
def update_local_port(self, lport, original_lport): if self.get_datapath() is None: LOG.error(_LE("datapath is none")) return secgroups = lport.get_security_groups() original_secgroups = original_lport.get_security_groups() added_secgroups, removed_secgroups = \ self._get_added_and_removed_secgroups(secgroups, original_secgroups) if not secgroups and original_secgroups: # uninstall ct table self._uninstall_connection_track_flows(lport) for secgroup_id in added_secgroups: self._add_local_port_associating(lport, secgroup_id) for secgroup_id in removed_secgroups: self._remove_local_port_associating(lport, secgroup_id) if secgroups and not original_secgroups: # install ct table self._install_connection_track_flows(lport)
def _vm_port_deleted(self, ovs_port): ovs_port_id = ovs_port.get_id() lport_id = ovs_port.get_iface_id() lport = self.db_store.get_port(lport_id) if lport is None: lport = self.ovs_to_lport_mapping.get(ovs_port_id) if lport is None: return topic = lport.get('topic') del self.ovs_to_lport_mapping[ovs_port_id] self._del_from_topic_subscribed(topic, lport_id) return topic = lport.get_topic() LOG.info(_LI("The logical port(%s) is offline") % str(lport)) try: self.controller.logical_port_deleted(lport_id) except Exception: LOG.exception( _LE('Failed to process logical port offline event %s') % lport_id) finally: # publish vm port down event. if cfg.CONF.df.enable_port_status_notifier: self.port_status_reporter.notify_port_status( ovs_port, constants.PORT_STATUS_DOWN) del self.ovs_to_lport_mapping[ovs_port_id] self._del_from_topic_subscribed(topic, lport_id)
def _bridge_port_updated(self, ovs_port): try: self.controller.bridge_port_updated(ovs_port) except Exception: LOG.exception( _LE('Failed to process bridge port online ' 'event: %s') % str(ovs_port))
def get_cluster_topology_by_all_nodes(self): # get redis cluster topology from local nodes cached in initialization new_nodes = {} for host, info in six.iteritems(self.cluster_nodes): ip_port = host.split(':') try: node = self._init_node(ip_port[0], ip_port[1]) info = self._get_cluster_info(node) if info['cluster_state'] != 'ok': LOG.warning(_LW("redis cluster state failed")) else: new_nodes.update(self._get_cluster_nodes(node)) self._release_node(node) break except Exception: LOG.exception( _LE("exception happened " "when get cluster topology, %(ip)s:" "%(port)s") % { 'ip': ip_port[0], 'port': ip_port[1] }) return new_nodes
def wrapper(*args, **kwargs): next_interval = self.retry_interval remaining = self.max_retries while True: try: return f(*args, **kwargs) except Exception as e: with excutils.save_and_reraise_exception() as ectxt: if remaining > 0: ectxt.reraise = not self._is_exception_expected(e) else: LOG.exception(_LE('Function exceeded ' 'retry limit.')) LOG.debug("Performing retry for function %s", reflection.get_callable_name(f)) # NOTE(vsergeyev): We are using patched time module, so # this effectively yields the execution # context to another green thread. time.sleep(next_interval) if self.inc_retry_interval: next_interval = min( next_interval * 2, self.max_retry_interval ) remaining -= 1
def delete_floatingip(self, context, id): floatingip = self.get_floatingip(context, id) super(DFL3RouterPlugin, self).delete_floatingip(context, id) try: self.nb_api.delete_floatingip(id=id, topic=floatingip['tenant_id']) except df_exceptions.DBKeyNotFound: LOG.exception(_LE("floatingip %s is not found in DF DB") % id)
def _handle_db_conn_error(self, ip_port, local_key=None): self.redis_mgt.remove_node_from_master_list(ip_port) self._update_server_list() if local_key is not None: LOG.exception( _LE("update server list, key: %(key)s") % {'key': local_key})
def _vm_port_deleted(self, ovs_port): ovs_port_id = ovs_port.get_id() lport_id = ovs_port.get_iface_id() lport = self.db_store.get_port(lport_id) if lport is None: lport = self.ovs_to_lport_mapping.get(ovs_port_id) if lport is None: return topic = lport.get('topic') del self.ovs_to_lport_mapping[ovs_port_id] self._del_from_topic_subscribed(topic, lport_id) return topic = lport.get_topic() LOG.info(_LI("The logical port(%s) is offline") % str(lport)) try: self.controller.logical_port_deleted(lport_id) except Exception: LOG.exception(_LE( 'Failed to process logical port offline event %s') % lport_id) finally: # TODO(duankebo) publish vm port offline later # currently we will not publish vm port offline event. # lport = self.nb_api.get_logical_port(lport_id) # if lport.get_chassis() == self.chassis_name: # self.nb_api.update_lport(lport.get_id(), chassis=None, # status='DOWN') del self.ovs_to_lport_mapping[ovs_port_id] self._del_from_topic_subscribed(topic, lport_id)
def update_subnet_postcommit(self, context): new_subnet = context.current old_subnet = context.original network = context.network.current plugin_context = context._plugin_context dhcp_ip = None dhcp_port = None try: dhcp_ip, dhcp_port = self._handle_update_subnet_dhcp( plugin_context, old_subnet, new_subnet) except Exception: LOG.exception( _LE("Failed to create dhcp port for subnet %s"), new_subnet['id']) return None self.nb_api.update_subnet( new_subnet['id'], new_subnet['network_id'], new_subnet['tenant_id'], name=new_subnet.get('name', df_const.DF_SUBNET_DEFAULT_NAME), nw_version=network['revision_number'], enable_dhcp=new_subnet['enable_dhcp'], cidr=new_subnet['cidr'], dhcp_ip=dhcp_ip, gateway_ip=new_subnet['gateway_ip'], dns_nameservers=new_subnet.get('dns_nameservers', []), host_routes=new_subnet.get('host_routes', [])) LOG.info(_LI("DFMechDriver: update subnet %s"), new_subnet['id']) return new_subnet
def redis_set_master_list_to_syncstring(self, master_list): try: RedisMgt.global_sharedlist.raw = msgpack.packb(master_list) except Exception: LOG.exception( _LE("exception happened " "when set new master to syncstring"))
def run(self): while True: eventlet.sleep(0) try: for data in self.pub_sub.listen(): if 'subscribe' == data['type']: continue if 'unsubscribe' == data['type']: continue if 'message' == data['type']: entry = pub_sub_api.unpack_message(data['data']) entry_json = jsonutils.loads(entry) self.db_changes_callback( entry_json['table'], entry_json['key'], entry_json['action'], entry_json['value'], entry_json['topic']) except Exception as e: LOG.warning(e) try: connection = self.pub_sub.connection connection.connect() self.db_changes_callback(None, None, 'sync', None, None) except Exception as e: LOG.exception(_LE("reconnect error %(ip)s:%(port)s") % {'ip': self.ip, 'port': self.plugin_updates_port})
def _handle_dhcp_request(self, msg, pkt, lport): packet = ryu_packet.Packet(data=msg.data) in_port = msg.match.get("in_port") if isinstance(packet[3], str): dhcp_packet = dhcp.dhcp.parser(packet[3])[0] else: dhcp_packet = packet[3] dhcp_message_type = self._get_dhcp_message_type_opt(dhcp_packet) send_packet = None if dhcp_message_type == DHCP_DISCOVER: #DHCP DISCOVER send_packet = self._create_dhcp_offer( pkt, dhcp_packet, lport) LOG.info(_LI("sending DHCP offer for port IP %(port_ip)s" " port id %(port_id)s") % {'port_ip': lport.get_ip(), 'port_id': lport.get_id()}) elif dhcp_message_type == DHCP_REQUEST: #DHCP REQUEST send_packet = self._create_dhcp_ack( pkt, dhcp_packet, lport) LOG.info(_LI("sending DHCP ACK for port IP %(port_ip)s" " port id %(tunnel_id)s") % {'port_ip': lport.get_ip(), 'tunnel_id': lport.get_id()}) else: LOG.error(_LE("DHCP message type %d not handled"), dhcp_message_type) if send_packet: self._send_packet(self.get_datapath(), in_port, send_packet)
def init_default_node(self, host, port): try: self.default_node = redis.StrictRedis(host, port) RedisMgt.check_connection(self.default_node) except Exception as e: LOG.exception(_LE("exception happened " "when connect to default node, %s"), e)
def get_qos_policy(self, policy_id, topic=None): try: qospolicy_value = self.driver.get_key('qospolicy', policy_id, topic) return QosPolicy(qospolicy_value) except Exception: LOG.exception(_LE('Could not get qos policy %s'), policy_id) return None
def OF_error_msg_handler(self, event): msg = event.msg try: (version, msg_type, msg_len, xid) = ofproto_parser.header(msg.data) ryu_msg = ofproto_parser.msg( self._datapath, version, msg_type, msg_len - ofproto_common.OFP_HEADER_SIZE, xid, msg.data) LOG.error(_LE('OFPErrorMsg received: %s'), ryu_msg) except Exception: LOG.error( _LE('Unrecognized OFPErrorMsg received: ' 'type=0x%(type)02x code=0x%(code)02x ' 'message=%(msg)s'), { 'type': msg.type, 'code': msg.code, 'msg': utils.hex_array(msg.data) })
def load_driver(driver_cfg, namespace): try: # Try to resolve by alias mgr = driver.DriverManager(namespace, driver_cfg) class_to_load = mgr.driver except RuntimeError: e1_info = sys.exc_info() # try with name try: class_to_load = importutils.import_class(driver_cfg) except (ImportError, ValueError): LOG.error(_LE("Error loading class %(class)s by alias e: %(e)s") % {'class': driver_cfg, 'e': e1_info}, exc_info=e1_info) LOG.error(_LE("Error loading class by class name"), exc_info=True) raise ImportError(_("Class not found.")) return class_to_load()
def run(self): cache = {} while True: try: eventlet.sleep(self._polling_time) cache = self._poll_once(cache) except Exception: LOG.exception(_LE("Error when polling table %s"), self._table_name)
def remove_local_port(self, lport): if self.get_datapath() is None: LOG.error(_LE("datapath is none")) return secgroups = lport.get_security_groups() if secgroups is None: return # uninstall ct table self._uninstall_connection_track_flows(lport) ip = lport.get_ip() for secgroup_id in secgroups: # uninstall associating flow self._uninstall_associating_flows(secgroup_id, lport) # update the record of aggregate addresses of ports associated # with this security group. aggregate_addresses_range = \ self.secgroup_aggregate_addresses.get(secgroup_id) if aggregate_addresses_range is not None: new_cidr_array, added_cidr, removed_cidr = \ SGApp._remove_one_address( aggregate_addresses_range, SGApp._get_integer_value_from_address(ip) ) if len(new_cidr_array) == 0: del self.secgroup_aggregate_addresses[secgroup_id] else: self.secgroup_aggregate_addresses[secgroup_id] = \ new_cidr_array # update the flows representing those rules each of which # specifies this security group as its # parameter of remote group. secrules = self.remote_secgroup_ref.get(secgroup_id) if secrules is not None: for rule_info in secrules.values(): self._update_security_group_rule_flows_by_addresses( rule_info.security_group_id, rule_info, added_cidr, removed_cidr ) # update the record of ports associated with this security group. associate_ports = \ self.secgroup_associate_local_ports.get(secgroup_id) if associate_ports is not None: if lport.get_id() in associate_ports: associate_ports.remove(lport.get_id()) if len(associate_ports) == 0: self._uninstall_security_group_flow(secgroup_id) self._release_security_group_id(secgroup_id) del self.secgroup_associate_local_ports[secgroup_id]
def packet_in_handler(self, event): msg = event.msg pkt = packet.Packet(msg.data) pkt_ip = pkt.get_protocol(ipv4.ipv4) if pkt_ip is None: pkt_ip = pkt.get_protocol(ipv6.ipv6) if pkt_ip is None: LOG.error(_LE("Received Non IP Packet")) return network_id = msg.match.get('metadata') pkt_ethernet = pkt.get_protocol(ethernet.ethernet) try: self.get_route(pkt_ip, pkt_ethernet, network_id, msg) except Exception as e: LOG.error(_LE("L3 App PacketIn exception raised")) LOG.error(e)
def add_exception(self, exception): """Exception handler. Record this exception to be read later by the caller :param exception: The exception to record :type exception: Exception """ LOG.exception(_LE('Adding exception:')) self.exceptions.append(exception) self.stop()
def _send_msg(self, msg, reply_cls=None, reply_multi=False): timeout_sec = 20 # TODO(heshan) should be configured in cfg file timeout = eventlet.timeout.Timeout(seconds=timeout_sec) result = None try: result = ofctl_api.send_msg(self._app, msg, reply_cls, reply_multi) except ryu_exc.RyuException as e: m = _LE("ofctl request %(request)s error %(error)s") % { "request": msg, "error": e, } LOG.error(_LE("exception occurred, %s"), m) except eventlet.timeout.Timeout as e: LOG.error(_LE("exception occurred, %s"), e) finally: timeout.cancel() LOG.debug("ofctl request %(request)s result %(result)s", {"request": msg, "result": result}) return result
def load(self, *args, **kwargs): for app in self.apps_list: app_class_name = self.apps_location_prefix + "." + app try: app_class = importutils.import_class(app_class_name) app = app_class(*args, **kwargs) self.apps.append(app) except ImportError as e: LOG.exception(_LE("Error loading application by class, %s"), e) raise ImportError(_("Application class not found."))
def delete_key(self, table, key, topic=None): local_topic = topic local_key = self.uuid_to_key(table, key, local_topic) try: client = self._get_client(local_key) return client.delete(local_key) except Exception as e: LOG.exception(_LE("exception %(key)s: %(e)s") % {'key': local_key, 'e': e}) raise df_exceptions.DBKeyNotFound(key=local_key)
def daemonize(self, run): if self.is_daemonize: LOG.error(_LE("already daemonized")) return self.is_daemonize = True if self.is_not_light: self.thread = self.pool.spawn(run) else: self.thread = self.pool.spawn_n(run) eventlet.sleep(0) return self.thread