def recv(self): if self.status: return self.status, None while True: if not self.input: error, data = self.stream.recv(4096) if error: if error == errno.EAGAIN: return error, None else: # XXX rate-limit vlog.warn("%s: receive error: %s" % (self.name, os.strerror(error))) self.error(error) return self.status, None elif not data: self.error(EOF) return EOF, None else: self.input += data else: if self.parser is None: self.parser = ovs.json.Parser() self.input = self.input[self.parser.feed(self.input):] if self.parser.is_done(): msg = self.__process_msg() if msg: return 0, msg else: return self.status, None
def get_config(idl_cfg): ''' Walk through the rows in the config table (if any) looking for a row with type == startup. If found, set global variable saved_config to the content of the "config" field in that row. ''' global saved_config #Note: You can't tell the difference between the config table not # existing (that is the configdb is not there) or just that there # are no rows in the config table. tbl_found = False for ovs_rec in idl_cfg.tables["config"].rows.itervalues(): tbl_found = True if ovs_rec.type: if ovs_rec.type == type_startup_config: if ovs_rec.config: saved_config = ovs_rec.config else: vlog.warn("startup config row does not have config column") return if not tbl_found: vlog.info("No rows found in the config table")
def run(self): self._rpc.run() error = self._rpc.get_status() if error or self._rpc.get_backlog(): return error for _ in range(10): if error or self._request_id: break error, msg = self._rpc.recv() if msg: if msg.type == Message.T_REQUEST: self._process_command(msg) else: # XXX: rate-limit vlog.warn("%s: received unexpected %s message" % (self._rpc.name, Message.type_to_string(msg.type))) error = errno.EINVAL if not error: error = self._rpc.get_status() return error
def _create_external_vip(self, namespace, external_ip, ips, port, target_port, protocol): # With external_ip:port as the VIP, create an entry in a gateway # load-balancer. # Get the gateway where we can add external_ip:port as a VIP. physical_gateway = self._get_ovn_external_ip_gateway() if not physical_gateway: return try: # Get the load-balancer instantiated in the gateway. external_id_key = protocol + "_lb_gateway_router" load_balancer = ovn_nbctl("--data=bare", "--no-heading", "--columns=_uuid", "find", "load_balancer", "external_ids:" + external_id_key + "=" + physical_gateway).strip('"') except Exception as e: vlog.err("_create_external_vip: get failed for" " %s (%s)" % (physical_gateway, str(e))) return if not load_balancer: vlog.warn("physical gateway %s does not have a load_balancer" % (physical_gateway)) # With external_ip:port as VIP, add an entry in 'load_balancer'. self._create_load_balancer_vip(namespace, load_balancer, external_ip, ips, port, target_port, protocol)
def __process_insert_reply(self, insert, ops): if insert.op_index >= len(ops): # XXX rate-limit vlog.warn("reply does not contain enough operations " "for insert (has %d, needs %d)" % (len(ops), insert.op_index)) return False # We know that this is a JSON object because the loop in # __process_reply() already checked. reply = ops[insert.op_index] json_uuid = reply.get("uuid") if not Transaction.__check_json_type(json_uuid, (tuple, list), '"insert" reply "uuid"'): return False try: uuid_ = ovs.ovsuuid.from_json(json_uuid) except error.Error: # XXX rate-limit vlog.warn('"insert" reply "uuid" is not a JSON UUID') return False insert.real = uuid_ return True
def run(self): self._rpc.run() error = self._rpc.get_status() if error or self._rpc.get_backlog(): return error for _ in range(10): if error or self._request_id: break error, msg = self._rpc.recv() if msg: if msg.type == Message.T_REQUEST: self._process_command(msg) else: # XXX: rate-limit vlog.warn( "%s: received unexpected %s message" % (self._rpc.name, Message.type_to_string(msg.type))) error = errno.EINVAL if not error: error = self._rpc.get_status() return error
def _create_external_vip(self, external_ip, ips, port, target_port, protocol): # With external_ip:port as the VIP, create an entry in a gateway # load-balancer. # Get the gateway where we can add external_ip:port as a VIP. physical_gateway = self._get_ovn_external_ip_gateway() if not physical_gateway: return try: # Get the load-balancer instantiated in the gateway. external_id_key = protocol + "_lb_gateway_router" load_balancer = ovn_nbctl( "--data=bare", "--no-heading", "--columns=_uuid", "find", "load_balancer", "external_ids:" + external_id_key + "=" + physical_gateway).strip('"') except Exception as e: vlog.err("_create_external_vip: get failed for" " %s (%s)" % (physical_gateway, str(e))) return if not load_balancer: vlog.warn("physical gateway %s does not have a load_balancer" % (physical_gateway)) # With external_ip:port as VIP, add an entry in 'load_balancer'. self._create_load_balancer_vip(load_balancer, external_ip, ips, port, target_port, protocol)
def __row_update(self, table, row, row_json): changed = False for column_name, datum_json in row_json.iteritems(): column = table.columns.get(column_name) if not column: # XXX rate-limit vlog.warn("unknown column %s updating table %s" % (column_name, table.name)) continue try: datum = ovs.db.data.Datum.from_json(column.type, datum_json) except error.Error, e: # XXX rate-limit vlog.warn("error parsing column %s in table %s: %s" % (column_name, table.name, e)) continue if datum != row._data[column_name]: row._data[column_name] = datum if column.alert: changed = True else: # Didn't really change but the OVSDB monitor protocol always # includes every value in a row. pass
def compat_read_unix_socket(path): try: file_handle = open(path, "r+") except IOError as e: vlog.warn("%s: open: %s" % (path, e.strerror)) raise socket.error(errno.ENOENT) return int(file_handle.readline())
def compat_write_unix_socket(path, port): try: file_handle = open(path, "w") except IOError as e: vlog.warn("%s: open: %s" % (path, e.strerror)) raise socket.error(errno.ENOENT) file_handle.write(str(port))
def unlink_file_now(file): """Like fatal_signal_remove_file_to_unlink(), but also unlinks 'file'. Returns 0 if successful, otherwise a positive errno value.""" error = _unlink(file) if error: vlog.warn("could not unlink \"%s\" (%s)" % (file, os.strerror(error))) remove_file_to_unlink(file) return error
def _create_load_balancer_vip(self, namespace, load_balancer, service_ip, ips, port, target_port, protocol): # With service_ip:port as a VIP, create an entry in 'load_balancer' vlog.dbg( "received event to create/modify load_balancer (%s) vip " "service_ip=%s, ips=%s, port=%s, target_port=%s, protocol=%s" % (load_balancer, service_ip, ips, port, target_port, protocol)) if not port or not target_port or not protocol or not load_balancer: return # key is of the form "IP:port" (with quotes around) key = "\"" + service_ip + ":" + str(port) + "\"" if not ips: try: ovn_nbctl("remove", "load_balancer", load_balancer, "vips", key) except Exception as e: vlog.err("_create_load_balancer_vip remove: (%s)" % (str(e))) return if target_port.isdigit(): # target is of the form "IP1:port, IP2:port, IP3:port" target_endpoints = ",".join( ["%s:%s" % (ip, target_port) for ip in ips]) else: # 'target_port' is a string. We should get its number # from the cache. if not self.port_name_cache.get(namespace): vlog.warn("targetPort of %s in ns %s does not have an " "associated port. Ignoring endpoint creation." % (target_port, namespace)) return target_endpoint_list = [] for ip in ips: if not self.port_name_cache[namespace].get(ip): continue num_port = self.port_name_cache[namespace][ip].get(target_port) if not num_port: continue target_endpoint_list.append("%s:%s" % (ip, num_port)) if not target_endpoint_list: vlog.warn("targetPort of %s in ns %s does not have any " "associated ports. Ignoring endpoint creation." % (target_port, namespace)) return target_endpoints = ",".join(target_endpoint_list) target = "\"" + target_endpoints + "\"" try: ovn_nbctl("set", "load_balancer", load_balancer, "vips:" + key + "=" + target) except Exception as e: vlog.err("_create_load_balancer_vip add: (%s)" % (str(e)))
def _create_load_balancer_vip(self, namespace, load_balancer, service_ip, ips, port, target_port, protocol): # With service_ip:port as a VIP, create an entry in 'load_balancer' vlog.dbg("received event to create/modify load_balancer (%s) vip " "service_ip=%s, ips=%s, port=%s, target_port=%s, protocol=%s" % (load_balancer, service_ip, ips, port, target_port, protocol)) if not port or not target_port or not protocol or not load_balancer: return # key is of the form "IP:port" (with quotes around) key = "\"" + service_ip + ":" + str(port) + "\"" if not ips: try: ovn_nbctl("remove", "load_balancer", load_balancer, "vips", key) except Exception as e: vlog.err("_create_load_balancer_vip remove: (%s)" % (str(e))) return if target_port.isdigit(): # target is of the form "IP1:port, IP2:port, IP3:port" target_endpoints = ",".join(["%s:%s" % (ip, target_port) for ip in ips]) else: # 'target_port' is a string. We should get its number # from the cache. if not self.port_name_cache.get(namespace): vlog.warn("targetPort of %s in ns %s does not have an " "associated port. Ignoring endpoint creation." % (target_port, namespace)) return target_endpoint_list = [] for ip in ips: if not self.port_name_cache[namespace].get(ip): continue num_port = self.port_name_cache[namespace][ip].get(target_port) if not num_port: continue target_endpoint_list.append("%s:%s" % (ip, num_port)) if not target_endpoint_list: vlog.warn("targetPort of %s in ns %s does not have any " "associated ports. Ignoring endpoint creation." % (target_port, namespace)) return target_endpoints = ",".join(target_endpoint_list) target = "\"" + target_endpoints + "\"" try: ovn_nbctl("set", "load_balancer", load_balancer, "vips:" + key + "=" + target) except Exception as e: vlog.err("_create_load_balancer_vip add: (%s)" % (str(e)))
def _process_func(watcher, watcher_recycle_func): while True: try: watcher.process() except Exception as e: # Recycle watcher vlog.warn("Regenerating watcher because of %s and reconnecting to " "stream using function %s" % (str(e), watcher_recycle_func.__name__)) watcher = watcher_recycle_func()
def _process_reply(self, msg): if msg.type == ovs.jsonrpc.Message.T_ERROR: self._status = Transaction.ERROR elif type(msg.result) not in (list, tuple): # XXX rate-limit vlog.warn('reply to "transact" is not JSON array') else: hard_errors = False soft_errors = False lock_errors = False ops = msg.result for op in ops: if op is None: # This isn't an error in itself but indicates that some # prior operation failed, so make sure that we know about # it. soft_errors = True elif type(op) == dict: error = op.get("error") if error is not None: if error == "timed out": soft_errors = True elif error == "not owner": lock_errors = True elif error == "aborted": pass else: hard_errors = True self.__set_error_json(op) else: hard_errors = True self.__set_error_json(op) # XXX rate-limit vlog.warn("operation reply is not JSON null or object") if not soft_errors and not hard_errors and not lock_errors: if self._inc_table and not self.__process_inc_reply(ops): hard_errors = True for insert in self._inserted_rows.itervalues(): if not self.__process_insert_reply(insert, ops): hard_errors = True if hard_errors: self._status = Transaction.ERROR elif lock_errors: self._status = Transaction.NOT_LOCKED elif soft_errors: if self._commit_seqno == self.idl.change_seqno: self._status = Transaction.AGAIN_WAIT else: self._status = Transaction.AGAIN_NOW else: self._status = Transaction.SUCCESS
def __check_json_type(json, types, name): if not json: # XXX rate-limit vlog.warn("%s is missing" % name) return False elif type(json) not in types: # XXX rate-limit vlog.warn("%s has unexpected type %s" % (name, type(json))) return False else: return True
def process_stream(data_stream, event_callback): # StopIteration should be caught in the routine that sets up the stream # and reconnects it line = next(data_stream) if not line: return try: event_callback(json.loads(line)) except ValueError: vlog.warn("Invalid JSON data from response stream:%s" % line)
def _process_func(watcher, watcher_recycle_func): while True: try: watcher.process() except Exception as e: # Recycle watcher vlog.exception("Failure in watcher %s" % type(watcher).__name__) vlog.warn("Regenerating watcher because of \"%s\" and " "reconnecting to stream using function %s" % (str(e), watcher_recycle_func.__name__)) watcher = watcher_recycle_func()
def create(path): assert isinstance(path, str) unix = "unix:%s" % ovs.util.abs_file_name(ovs.dirs.RUNDIR, path) error, stream = ovs.stream.Stream.open_block(ovs.stream.Stream.open(unix)) if error: vlog.warn("failed to connect to %s" % path) return error, None return 0, UnixctlClient(ovs.jsonrpc.Connection(stream))
def create(path): assert isinstance(path, str) unix = "unix:%s" % ovs.util.abs_file_name(ovs.dirs.RUNDIR, path) error, stream = ovs.stream.Stream.open_block( ovs.stream.Stream.open(unix)) if error: vlog.warn("failed to connect to %s" % path) return error, None return 0, UnixctlClient(ovs.jsonrpc.Connection(stream))
def process_stream(data_stream, event_callback): try: line = next(data_stream) if not line: return except StopIteration: raise exceptions.APIServerTimeout() try: event_callback(json.loads(line)) except ValueError: vlog.warn("Invalid JSON data from response stream:%s" % line)
def _process_func(watcher, watcher_recycle_func): while True: try: watcher.process() except Exception as e: # Recycle watcher if not isinstance(e, exceptions.APIServerTimeout): vlog.exception("Failure in watcher %s" % type(watcher).__name__) vlog.warn("Regenerating watcher because of \"%s\" and " "reconnecting to stream using function %s" % (str(e), watcher_recycle_func.__name__)) watcher = watcher_recycle_func()
def recv(self): if self.status: return self.status, None decoder = codecs.getincrementaldecoder('utf-8')() while True: if not self.input: error, data = self.stream.recv(4096) # Python 3 has separate types for strings and bytes. We # received bytes from a socket. We expect it to be string # data, so we convert it here as soon as possible. if data and not error: try: if six.PY3 or ovs.json.PARSER == ovs.json.PARSER_PY: data = decoder.decode(data) except UnicodeError: error = errno.EILSEQ if error: if (sys.platform == "win32" and error == errno.WSAEWOULDBLOCK): # WSAEWOULDBLOCK would be the equivalent on Windows # for EAGAIN on Unix. error = errno.EAGAIN if error == errno.EAGAIN: return error, None else: # XXX rate-limit vlog.warn("%s: receive error: %s" % (self.name, os.strerror(error))) self.error(error) return self.status, None elif not data: self.error(EOF) return EOF, None else: self.input += data self.received_bytes += len(data) else: if self.parser is None: self.parser = ovs.json.Parser() if six.PY3 and ovs.json.PARSER == ovs.json.PARSER_C: self.input = self.input.encode('utf-8')[ self.parser.feed(self.input):].decode() else: self.input = self.input[self.parser.feed(self.input):] if self.parser.is_done(): msg = self.__process_msg() if msg: return 0, msg else: return self.status, None
def run(self): if self.status: return while len(self.output): retval = self.stream.send(self.output) if retval >= 0: self.output = self.output[retval:] else: if retval != -errno.EAGAIN: vlog.warn("%s: send error: %s" % (self.name, os.strerror(-retval))) self.error(-retval) break
def _create_gateways_vip(self, namespace, ips, port, target_port, protocol): # Each gateway has a separate load-balancer for N/S traffic physical_gateways = self._get_ovn_gateways() if not physical_gateways: return for physical_gateway in physical_gateways: # Go through each gateway to get its physical_ip and load-balancer. try: physical_ip = ovn_nbctl( "get", "logical_router", physical_gateway, "external_ids:physical_ip").strip('"') except Exception as e: vlog.err("_create_gateways_vip: get failed for" " %s (%s)" % (physical_gateway, str(e))) continue if not physical_ip: vlog.warn("physical gateway %s does not have physical ip" % (physical_ip)) continue try: external_id_key = protocol + "_lb_gateway_router" load_balancer = ovn_nbctl( "--data=bare", "--no-heading", "--columns=_uuid", "find", "load_balancer", "external_ids:" + external_id_key + "=" + physical_gateway ).strip('"') except Exception as e: vlog.err("_create_gateways_vip: find failed for" " %s (%s)" % (physical_gateway, str(e))) continue if not load_balancer: vlog.warn("physical gateway %s does not have load_balancer" % (physical_gateway)) continue # With the physical_ip:port as the VIP, add an entry in # 'load_balancer'. self._create_load_balancer_vip(namespace, load_balancer, physical_ip, ips, port, target_port, protocol)
def run(self): for _ in range(10): error, stream = self._listener.accept() if not error: rpc = ovs.jsonrpc.Connection(stream) self._conns.append(UnixctlConnection(rpc)) elif error == errno.EAGAIN: break else: # XXX: rate-limit vlog.warn("%s: accept failed: %s" % (self._listener.name, os.strerror(error))) for conn in copy.copy(self._conns): error = conn.run() if error and error != errno.EAGAIN: conn._close() self._conns.remove(conn)
def transact(self, command, argv): assert isinstance(command, strtypes) assert isinstance(argv, list) for arg in argv: assert isinstance(arg, strtypes) request = Message.create_request(command, argv) error, reply = self._conn.transact_block(request) if error: vlog.warn("error communicating with %s: %s" % (self._conn.name, os.strerror(error))) return error, None, None if reply.error is not None: return 0, str(reply.error), None else: assert reply.result is not None return 0, None, str(reply.result)
def _monitor_daemon(daemon_pid): # XXX should log daemon's stderr output at startup time # XXX should use setproctitle module if available last_restart = None while True: retval, status = _waitpid(daemon_pid, 0) if retval < 0: sys.stderr.write("waitpid failed\n") sys.exit(1) elif retval == daemon_pid: status_msg = ("pid %d died, %s" % (daemon_pid, ovs.process.status_msg(status))) if _should_restart(status): if os.WCOREDUMP(status) and sys.platform != "win32": import resource # Disable further core dumps to save disk space. try: resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) except resource.error: vlog.warn("failed to disable core dumps") # Throttle restarts to no more than once every 10 seconds. if (last_restart is not None and ovs.timeval.msec() < last_restart + 10000): vlog.warn("%s, waiting until 10 seconds since last " "restart" % status_msg) while True: now = ovs.timeval.msec() wakeup = last_restart + 10000 if now > wakeup: break sys.stdout.write("sleep %f\n" % ( (wakeup - now) / 1000.0)) time.sleep((wakeup - now) / 1000.0) last_restart = ovs.timeval.msec() vlog.err("%s, restarting" % status_msg) daemon_pid = fork_and_wait_for_startup() if not daemon_pid: break else: vlog.info("%s, exiting" % status_msg) sys.exit(0)
def __process_msg(self): json = self.parser.finish() self.parser = None if type(json) in [str, unicode]: # XXX rate-limit vlog.warn("%s: error parsing stream: %s" % (self.name, json)) self.error(errno.EPROTO) return msg = Message.from_json(json) if not isinstance(msg, Message): # XXX rate-limit vlog.warn("%s: received bad JSON-RPC message: %s" % (self.name, msg)) self.error(errno.EPROTO) return self.__log_msg("received", msg) return msg
def _monitor_daemon(daemon_pid): # XXX should log daemon's stderr output at startup time # XXX should use setproctitle module if available last_restart = None while True: retval, status = _waitpid(daemon_pid, 0) if retval < 0: sys.stderr.write("waitpid failed\n") sys.exit(1) elif retval == daemon_pid: status_msg = ("pid %d died, %s" % (daemon_pid, ovs.process.status_msg(status))) if _should_restart(status): if sys.platform != 'win32' and os.WCOREDUMP(status): # Disable further core dumps to save disk space. try: resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) except resource.error: vlog.warn("failed to disable core dumps") # Throttle restarts to no more than once every 10 seconds. if (last_restart is not None and ovs.timeval.msec() < last_restart + 10000): vlog.warn("%s, waiting until 10 seconds since last " "restart" % status_msg) while True: now = ovs.timeval.msec() wakeup = last_restart + 10000 if now > wakeup: break sys.stdout.write("sleep %f\n" % ((wakeup - now) / 1000.0)) time.sleep((wakeup - now) / 1000.0) last_restart = ovs.timeval.msec() vlog.err("%s, restarting" % status_msg) daemon_pid = _fork_and_wait_for_startup() if not daemon_pid: break else: vlog.info("%s, exiting" % status_msg) sys.exit(0)
def recv(self): if self.status: return self.status, None while True: if not self.input: error, data = self.stream.recv(4096) # Python 3 has separate types for strings and bytes. We # received bytes from a socket. We expect it to be string # data, so we convert it here as soon as possible. if (data and not error and not isinstance(data, six.string_types)): try: data = data.decode('utf-8') except UnicodeError: error = errno.EILSEQ if error: if sys.platform == "win32" and error == errno.WSAEWOULDBLOCK: error = errno.EAGAIN if error == errno.EAGAIN: return error, None else: # XXX rate-limit vlog.warn("%s: receive error: %s" % (self.name, os.strerror(error))) self.error(error) return self.status, None elif not data: self.error(EOF) return EOF, None else: self.input += data self.received_bytes += len(data) else: if self.parser is None: self.parser = ovs.json.Parser() self.input = self.input[self.parser.feed(self.input):] if self.parser.is_done(): msg = self.__process_msg() if msg: return 0, msg else: return self.status, None
def __process_msg(self): json = self.parser.finish() self.parser = None if isinstance(json, six.string_types): # XXX rate-limit vlog.warn("%s: error parsing stream: %s" % (self.name, json)) self.error(errno.EPROTO) return msg = Message.from_json(json) if not isinstance(msg, Message): # XXX rate-limit vlog.warn("%s: received bad JSON-RPC message: %s" % (self.name, msg)) self.error(errno.EPROTO) return self.__log_msg("received", msg) return msg
def recv(self): if self.status: return self.status, None while True: if not self.input: error, data = self.stream.recv(4096) # Python 3 has separate types for strings and bytes. We # received bytes from a socket. We expect it to be string # data, so we convert it here as soon as possible. if (data and not error and not isinstance(data, six.string_types)): try: data = data.decode('utf-8') except UnicodeError: error = errno.EILSEQ if error: if error == errno.EAGAIN: return error, None else: # XXX rate-limit vlog.warn("%s: receive error: %s" % (self.name, os.strerror(error))) self.error(error) return self.status, None elif not data: self.error(EOF) return EOF, None else: self.input += data self.received_bytes += len(data) else: if self.parser is None: self.parser = ovs.json.Parser() self.input = self.input[self.parser.feed(self.input):] if self.parser.is_done(): msg = self.__process_msg() if msg: return 0, msg else: return self.status, None
def __read_pidfile(pidfile, delete_if_stale): if _pidfile_dev is not None: try: s = os.stat(pidfile) if s.st_ino == _pidfile_ino and s.st_dev == _pidfile_dev: # It's our own pidfile. We can't afford to open it, # because closing *any* fd for a file that a process # has locked also releases all the locks on that file. # # Fortunately, we know the associated pid anyhow. return os.getpid() except OSError: pass try: file_handle = open(pidfile, "r+") except IOError, e: if e.errno == errno.ENOENT and delete_if_stale: return 0 vlog.warn("%s: open: %s" % (pidfile, e.strerror)) return -e.errno
def __process_update(self, table, uuid, old, new): """Returns True if a column changed, False otherwise.""" row = table.rows.get(uuid) changed = False if not new: # Delete row. if row: del table.rows[uuid] changed = True else: # XXX rate-limit vlog.warn("cannot delete missing row %s from table %s" % (uuid, table.name)) elif not old: # Insert row. if not row: row = self.__create_row(table, uuid) changed = True else: # XXX rate-limit vlog.warn("cannot add existing row %s to table %s" % (uuid, table.name)) if self.__row_update(table, row, new): changed = True else: if not row: row = self.__create_row(table, uuid) changed = True # XXX rate-limit vlog.warn("cannot modify missing row %s in table %s" % (uuid, table.name)) if self.__row_update(table, row, new): changed = True return changed
def run(self): for _ in range(10): error, stream = self._listener.accept() if sys.platform == "win32" and error == errno.WSAEWOULDBLOCK: # WSAEWOULDBLOCK would be the equivalent on Windows # for EAGAIN on Unix. error = errno.EAGAIN if not error: rpc = ovs.jsonrpc.Connection(stream) self._conns.append(UnixctlConnection(rpc)) elif error == errno.EAGAIN: break else: # XXX: rate-limit vlog.warn("%s: accept failed: %s" % (self._listener.name, os.strerror(error))) for conn in copy.copy(self._conns): error = conn.run() if error and error != errno.EAGAIN: conn._close() self._conns.remove(conn)
def write_fully(fd, buf): """Returns an (error, bytes_written) tuple where 'error' is 0 on success, otherwise a positive errno value, and 'bytes_written' is the number of bytes that were written before the error occurred. 'error' is 0 if and only if 'bytes_written' is len(buf).""" bytes_written = 0 if len(buf) == 0: return 0, 0 while True: try: retval = os.write(fd, buf) assert retval >= 0 if retval == len(buf): return 0, bytes_written + len(buf) elif retval == 0: vlog.warn("write returned 0") return errno.EPROTO, bytes_written else: bytes_written += retval buf = buf[:retval] except OSError, e: return e.errno, bytes_written
def update_vip(self, event): service_data = event.metadata service_type = service_data['spec'].get('type') service_name = service_data['metadata']['name'] vlog.dbg("update_vip: received service data %s" % (service_data)) # We only care about services that are of type 'clusterIP' and # 'nodePort'. if service_type != "ClusterIP" and service_type != "NodePort": vlog.warn("ignoring unsupported service %s of type %s" % (service_name, service_type)) return event_type = event.event_type namespace = service_data['metadata']['namespace'] cache_key = "%s_%s" % (namespace, service_name) self._update_service_cache(event_type, cache_key, service_data) if event.event_type == "DELETED": vlog.dbg("received service delete event.") self._update_vip(service_data, None)
def write_fully(fd, buf): """Returns an (error, bytes_written) tuple where 'error' is 0 on success, otherwise a positive errno value, and 'bytes_written' is the number of bytes that were written before the error occurred. 'error' is 0 if and only if 'bytes_written' is len(buf).""" bytes_written = 0 if len(buf) == 0: return 0, 0 if sys.version_info[0] >= 3 and not isinstance(buf, six.binary_type): buf = six.binary_type(buf, 'utf-8') while True: try: retval = os.write(fd, buf) assert retval >= 0 if retval == len(buf): return 0, bytes_written + len(buf) elif retval == 0: vlog.warn("write returned 0") return errno.EPROTO, bytes_written else: bytes_written += retval buf = buf[:retval] except OSError as e: return e.errno, bytes_written
def write_fully(fd, buf): """Returns an (error, bytes_written) tuple where 'error' is 0 on success, otherwise a positive errno value, and 'bytes_written' is the number of bytes that were written before the error occurred. 'error' is 0 if and only if 'bytes_written' is len(buf).""" bytes_written = 0 if len(buf) == 0: return 0, 0 if sys.version_info[0] >= 3 and not isinstance(buf, six.binary_type): buf = six.binary_type(buf, "utf-8") while True: try: retval = os.write(fd, buf) assert retval >= 0 if retval == len(buf): return 0, bytes_written + len(buf) elif retval == 0: vlog.warn("write returned 0") return errno.EPROTO, bytes_written else: bytes_written += retval buf = buf[:retval] except OSError as e: return e.errno, bytes_written
def __process_inc_reply(self, ops): if self._inc_index + 2 > len(ops): # XXX rate-limit vlog.warn("reply does not contain enough operations for " "increment (has %d, needs %d)" % (len(ops), self._inc_index + 2)) # We know that this is a JSON object because the loop in # __process_reply() already checked. mutate = ops[self._inc_index] count = mutate.get("count") if not Transaction.__check_json_type(count, (int, long), '"mutate" reply "count"'): return False if count != 1: # XXX rate-limit vlog.warn('"mutate" reply "count" is %d instead of 1' % count) return False select = ops[self._inc_index + 1] rows = select.get("rows") if not Transaction.__check_json_type(rows, (list, tuple), '"select" reply "rows"'): return False if len(rows) != 1: # XXX rate-limit vlog.warn('"select" reply "rows" has %d elements ' 'instead of 1' % len(rows)) return False row = rows[0] if not Transaction.__check_json_type(row, (dict, ), '"select" reply row'): return False column = row.get(self._inc_column) if not Transaction.__check_json_type(column, (int, long), '"select" reply inc column'): return False self._inc_new_value = column return True
def disconnected(self, now, error): """Tell this FSM that the connection dropped or that a connection attempt failed. 'error' specifies the reason: a positive value represents an errno value, EOF indicates that the connection was closed by the peer (e.g. read() returned 0), and 0 indicates no specific error. The FSM will back off, then reconnect.""" if self.state not in (Reconnect.Backoff, Reconnect.Void): # Report what happened if self.state in (Reconnect.Active, Reconnect.Idle): if error > 0: vlog.warn("%s: connection dropped (%s)" % (self.name, os.strerror(error))) elif error == EOF: self.info_level("%s: connection closed by peer" % self.name) else: self.info_level("%s: connection dropped" % self.name) elif self.state == Reconnect.Listening: if error > 0: vlog.warn("%s: error listening for connections (%s)" % (self.name, os.strerror(error))) else: self.info_level("%s: error listening for connections" % self.name) elif self.backoff < self.max_backoff: if self.passive: type_ = "listen" else: type_ = "connection" if error > 0: vlog.warn("%s: %s attempt failed (%s)" % (self.name, type_, os.strerror(error))) else: self.info_level("%s: %s attempt timed out" % (self.name, type_)) if (self.state in (Reconnect.Active, Reconnect.Idle)): self.last_disconnected = now if not self.__may_retry(): self._transition(now, Reconnect.Void) return # Back off if self.backoff_free_tries > 1: self.backoff_free_tries -= 1 self.backoff = 0 elif (self.state in (Reconnect.Active, Reconnect.Idle) and (self.last_activity - self.last_connected >= self.backoff or self.passive)): if self.passive: self.backoff = 0 else: self.backoff = self.min_backoff else: if self.backoff < self.min_backoff: self.backoff = self.min_backoff elif self.backoff < self.max_backoff / 2: self.backoff *= 2 if self.passive: action = "trying to listen again" else: action = "reconnect" self.info_level("%s: waiting %.3g seconds before %s" % (self.name, self.backoff / 1000.0, action)) else: if self.backoff < self.max_backoff: if self.passive: action = "try to listen" else: action = "reconnect" self.info_level("%s: continuing to %s in the " "background but suppressing further " "logging" % (self.name, action)) self.backoff = self.max_backoff self._transition(now, Reconnect.Backoff)