def __set_user_password(self, username, password): # XXX/jshilkaitis: lame, should be an action on the mgmtd side # XXX/jshilkaitis: why doesn't the framework do validation? valid_password = common.lc_password_validate(password) if not valid_password: # XXX/jshilkaitis: hardcode the reason for now, since we know # length is the only criterion, but that may not be true in the # future. raise ServiceError, 'Password must contain at least 6 characters' use_sha512 = Mgmt.get_value('/rbt/support/config/sha_password/enable') if use_sha512: crypted_password = common.sha_encrypt_password(False, password) else: crypted_password = common.ltc_encrypt_password(password) password_node_name = '/auth/passwd/user/%s/password' % username code, msg = Mgmt.set((password_node_name, 'string', crypted_password)) if code != 0: raise ServiceError, msg
def reboot(self): """ reboot(auth) -> None Reboot the appliance. Exceptions: rebootFault - Unable to verify user permissions rebootFault - Authorization failure: unable to complete action for user """ reboot_node = '/pm/actions/reboot' check_rbm_permissions(reboot_node, RBM_ACTION) # XXX/jshilkaitis: _highly_ questionable implementation, but we need # something now. Flaws: relies on arbitrary 3 second sleep, and does # not report an error to the client if the reboot action fails. The # action failing should be rare enough to not worry about, though. if os.fork() == 0: time.sleep(3) # give parent SOAP server time to respond # we fork so that the parent SOAP server can respond to the # client. The following action does not return if it is # successful. Mgmt.action(reboot_node)
def getTACACSAuthorizationCfged(): """ @returns true if TACACS+ is configured as an authorization method and that there is a valid TACACS+ server configured. """ author = [Mgmt.get_value("/aaa/cmd_author_method/1/name"), Mgmt.get_value("/aaa/cmd_author_method/2/name")] return ('tacacs+' in author)
def getTACACSAccountingCfged(): """ @returns true if TACACS+ is configured as an accounting method and that there is a valid TACACS+ server configured. """ acct = [Mgmt.get_value("/aaa/cmd_audit_method/1/name"), Mgmt.get_value("/aaa/cmd_audit_method/2/name")] return ('tacacs+' in acct)
def __call__(self, *args, **kw): auth_info = args[1] # the (de-)serializer turns the empty string into None, which # ctypes will then turn into NULL. This will crash the # pam_conversation function, so we protect against it here. if auth_info.password is None: auth_info.password = "" try: # XXX/jshilkaitis: is using wsmd okay here? pamh = pam.PamHandle("wsmd", auth_info.username, auth_info.password) # XXX/jshilkaitis: check to see if there are flags we want here pamh.authenticate(0) # XXX/jshilkaitis: flag check pamh.acct_mgmt(0) except: log(LOG_NOTICE, 'User %s failed to authenticate via the SOAP ' 'server.' % auth_info.username) raise else: log(LOG_NOTICE, 'User %s succesfully authenticated via the ' 'SOAP server.' % auth_info.username) # XXX/jshilkaitis: We need to ask PAM for the local username # and use that to get the proper uid/gid. # XXX/jshilkaitis: is username the LOCAL username? # We need to make sure that Tac+ and RADIUS auths play # nicely with the setuid/setgid. pwd_db_entry = pwd.getpwnam(auth_info.username) uid = pwd_db_entry[2] gid = pwd_db_entry[3] os.setegid(gid) os.seteuid(uid) # strip out auth_info args = list(args) args.pop(1) try: try: Mgmt.open() except: raise ServiceError, "Unable to connect to the " \ "management backend." return self.func(*args, **kw) finally: Mgmt.close()
def is_interceptor_in_cluster(): RSI = '/rbt/sport/intercept' names = Mgmt.get_children(RSI + '/config/neighbor/name')[0] for name in names: is_interceptor = Mgmt.get_value(RSI + '/neighbor/' + \ name + '/is_interceptor') == 'true' if is_interceptor: return True return False;
def __start_sport(self): val = Mgmt.get_value('/pm/monitor/process/sport/state') if val is None: raise ServiceError, "Could not determine the service's state" if val != 'running': code, msg, bindings = Mgmt.action( '/rbt/sport/main/action/restart_service') if code != 0: raise ServiceError, msg
def start_vmware_vmx(path): """! Start vmware-vmx with given vm """ Logging.log(Logging.LOG_INFO, "Starting vm %s" % path) vsp_ramfs = RamFs.RamFs(vsp_ramfs_path) if vsp_ramfs.is_mounted(): # we generally should not hit this path, we unmount the ramfs when # we stop vmware-vmx Logging.log(Logging.LOG_INFO, "VSP ramfs is already mounted %s, unmounting" % \ vsp_ramfs_path) try: vsp_ramfs.unmount_ramfs() except RamFs.RamFsCmdException as e: # we'll proceed with starting vmx even if we can't unmount Logging.log(Logging.LOG_ERR, e.msg) if not vsp_ramfs.is_mounted(): try: vsp_ramfs.mount_ramfs(vsp_ovhd_ramfs_min_size_mb) except (OSError, RamFs.RamFsCmdException) as e: Logging.log(Logging.LOG_ERR, str(e)) Logging.log(Logging.LOG_ERR, "Unable to create ramfs %s" \ " not starting VMX" % vsp_ramfs_path) # skip starting VMX, the caller will look for vmx status return # Link in performance tweaks library env_dict = os.environ.copy() Mgmt.open() if Vsp.is_memlock_enabled(): if env_dict.has_key("LD_PRELOAD"): env_dict["LD_PRELOAD"] = vmperf_path + " " + env_dict["LD_PRELOAD"] else: env_dict["LD_PRELOAD"] = vmperf_path # Check the ESXi debug option to see which binary we need to run vmx_option = get_debug_option() Mgmt.close() binary_path = option_to_path[vmx_option] Logging.log(Logging.LOG_DEBUG, "BINARY PATH: %s" % binary_path) pobj = subprocess.Popen([binary_path, "-qx", path], env = env_dict) pobj.wait()
def set_top_talkers_interval(self, interval): """ set_top_talkers_interval(auth, interval) -> None Set the Top Talkers collection period. Parameters: interval (integer) - Interval in hours (must be 24 or 48) Exceptions: set_top_talkers_intervalFault - Interval must be 24 or 48 hours """ if interval != 24 and interval != 48: raise ServiceError, 'Top Talkers interval must be 24 or 48 hours' # convert collection period to snapshot interval (24->300, 48->600) interval = interval / 24 * 300 code, msg = Mgmt.set( ('/rbt/sport/netflow/config/top_talkers/snapshot_interval', 'duration_sec', interval) ) if code != 0: raise ServiceError, msg
def get_stats_internal(self, report_name, num_sets, start_time, end_time, subclass=None): start_time = time_to_local_time(start_time) end_time = time_to_local_time(end_time) bns = [('time_lb', 'datetime_sec', start_time), ('time_ub', 'datetime_sec', end_time)] if subclass is not None: bns.append(('subclass', 'uint32', subclass)) code, msg, bindings = Mgmt.action( '/stats/actions/generate_report/%s' % report_name, *bns ) if code != 0: raise ServiceError, msg # save off the file name so we can remove it in self.onReturn() results = bindings['results'] res = bulk_stats.bsp_to_data_val_sets(results, num_sets) return res
def get_pfs_stats(self, start_time, end_time, share_name): """ get_pfs_stats(auth, start_time, end_time, share_name) -> [ [share size], [bytes received], [bytes sent] ] Fetch the system's PFS statistics. Parameters: start_time (datetime) - start of the stats query period end_time (datetime) - end of the stats query period share_name (string) - share whose stats are desired or 'all' for the sum of all shares Exceptions: get_pfs_statsFault - Datetime not in known format get_pfs_statsFault - Unknown share name """ if share_name == 'all': share_id = 0 else: share_id = Mgmt.get_value('/rbt/rcu/share/%s/config/id' % share_name) if share_id is None: raise ServiceError, 'Unknown share name: %s' % share_name return self.get_stats_internal('pfs', 3, start_time, end_time, share_id)
def __set_banner(self, node_list, motd): """Helper function for setting motd/login banner""" nodes_to_set = [(x, 'string', motd) for x in node_list] code, msg = Mgmt.set(*nodes_to_set) if code != 0: raise ServiceError, msg
def check_rbm_permissions(node_name, write_needed): """ Raises an exception if the user does not have at least the permissions specified by write_needed for the node specified by node_name. A questionable interface, but it works well for the SOAP server. """ import pwd username = pwd.getpwuid(os.geteuid())[0] # XXX/jshilkaitis: a small hack to get around the fact that the RBM # nodes verify action doesn't do what I want for admin or monitor. # I should figure out a cleaner way to do this in the future. if username == 'admin' or username == 'monitor': return code, msg, bindings = Mgmt.action('/rbm/action/nodes/verify', ('username', 'string', username), ('nodenames', 'string', node_name)) if code != 0: raise ServiceError, msg try: perms = bindings['permissions'] except: raise ServiceError, 'Server Error: Unable to authorize current user' if ((write_needed and perms != 'write') or (not write_needed and perms != 'write' and perms != 'read')): # XXX/jshilkaitis: make this match the message returned by the backend # someday. Same goes for the CLI. raise ServiceError, "Insufficient permissions for command execution."
def generate_mgmt_event(event, reason, version_info): """! Generate Mgmt event. """ binding_to_send = None if event == READY_EVENT: binding_to_send = ("version_info", "string", version_info) elif event == DISCONNECTED_EVENT: binding_to_send = ("reason", "string", reason) else: Logging.log(Logging.LOG_ERR, "Unknown event") # suicide! sys.exit() Mgmt.event(event, binding_to_send)
def main(): """! Entry point to the watchdog. Initialize logger and starts attempting to communicate with ESXi """ global g_mgmtd_pid g_mgmtd_pid = None mgmtd_pids = [] Logging.log_init('esxi_watchdog', 'esxi_watchdog', 0, Logging.component_id(Logging.LCI_VSP), Logging.LOG_DEBUG, Logging.LOG_LOCAL0, Logging.LCT_SYSLOG) Logging.log(Logging.LOG_INFO, "esxi watchdog started") # Bug 117274: It may happen that we get multiple pids for mgmtd process, # pidof ran between fork-exec call, retry to allow mgmtd to settle for i in range(1, MAX_MGMTD_SETTLE_RETRY): mgmtd_pids = Vsp.get_pids('mgmtd') if len(mgmtd_pids) > 1: # multiple pids detected, give mgmtd sometime to settle time.sleep(MGMTD_SETTLE_TIMEOUT) else: g_mgmtd_pid = mgmtd_pids[0] break # Bug 112192: monitor mgmtd pid, if mgmtd crashes/exits # terminate watchdog as well if g_mgmtd_pid == None: # mgmtd not up kill watchdog process Logging.log(Logging.LOG_ERR, "Mgmtd is not ready, kill watchdog!") sys.exit(); Mgmt.open() signal.signal(signal.SIGINT, terminate_handler) signal.signal(signal.SIGTERM, terminate_handler) signal.signal(signal.SIGQUIT, terminate_handler) # Invalidate the session file if it exists on startup if os.path.exists(SESSION_FILE): os.remove(SESSION_FILE) monitor_esxi() Mgmt.close()
def getNTPAuthEnabled(): """ @returns true if any enabled NTP server has a valid key configured. """ auth_enabled = False ntp_hosts = Mgmt.get_pattern('/ntp/server/address/*') for host in ntp_hosts: key = Mgmt.get_value('/ntp/server/address/%s/key' % host[2]) enable = Mgmt.get_value('/ntp/server/address/%s/enable' % host[2]) if (key != '0') and (enable == 'true'): if Mgmt.get_value('/ntp/keys/%s' % key): auth_enabled = True break return auth_enabled
def __set_alarm_threshold(self, alarm, rising_or_falling, error_or_clear, val): alarm = alarm.lower() rising_or_falling = rising_or_falling.lower() error_or_clear = error_or_clear.lower() self.__assert_alarm_exists(alarm) node_name = '/stats/config/alarm/%s/%s/%s_threshold' % \ (alarm, rising_or_falling, error_or_clear) # should never fail because we ensure that the alarm exists node_type = Mgmt.query(node_name)[0][1] code, msg = Mgmt.set((node_name, node_type, val)) if code != 0: raise ServiceError, msg
def is_memlock_enabled(): """! Queries mgmtd to determine if workstation memlock is turned on """ memlock_node = "/rbt/vsp/config/memlock/enable" memlocked = Mgmt.get_value(memlock_node) if not memlocked or memlocked == "": Logging.log(Logging.LOG_ERR, "Cannot determine if memlock is enabled") memlocked = "false" return memlocked == "true"
def set_snmp_password(self, username, password, auth_protocol): """ set_snmp_password(auth, username, password, auth_protocol) -> None Configure a SNMP user. Parameters: username - username of user to configure password - user's new password auth_protocol - protocol used to encrypt the password Exceptions: set_snmp_passwordFault - password must contain at least 8 characters set_snmp_passwordFault - XXX/BUG 47861/48172 for bad auth_protocol value """ # XXX/jshilkaitis: lame, should be an action on the mgmtd side # 8 is USM_LENGTH_P_MIN from net-snmp if len(password) < 8: raise ServiceError, "password must contain at least 8 characters" auth_protocol = auth_protocol.upper() code, msg, bindings = Mgmt.action( '/snmp/usm/actions/generate_auth_key', ('password', 'string', password), ('hash_function', 'string', auth_protocol) ) if code != 0: raise ServiceError, msg auth_key = bindings['auth_key'] snmp_user_pfx = '/snmp/usm/users/%s' % username code, msg = Mgmt.set( (snmp_user_pfx, 'string', username), (snmp_user_pfx + '/auth_key', 'string', auth_key), (snmp_user_pfx + '/hash_function', 'string', auth_protocol) ) if code != 0: raise ServiceError, msg
def __stop_sport(self): val = Mgmt.get_value('/pm/monitor/process/sport/state') if val is None: raise ServiceError, "Could not determine the service's state" if val == 'running': code, msg, bindings = Mgmt.action( '/rbt/sport/status/action/unset_restart_needed') if code != 0: raise ServiceError, msg code, msg, bindings = Mgmt.action( '/pm/actions/terminate_process', ('process_name', 'string', 'sport')) if code != 0: raise ServiceError, msg
def get_node_value(node_name): """get value of a specified node takes one argument: node_name -> name of the node you want the value of returns: (value) -> value of the node_name """ import Mgmt value = Mgmt.get_value(node_name) return value
def write_config(self): """ write_config(auth) -> None Save the current in-memory configuration to disk. Exceptions: None """ code, msg, bindings = Mgmt.action('/mgmtd/db/save') if code != 0: raise ServiceError, msg
def get_alarm_status(self, alarm): """ get_alarm_status(auth, alarm) -> string Get an alarm's status. Parameters: alarm (string) - name of the alarm whose status will be returned Exceptions: get_alarm_statusFault - Alarm does not exist get_alarm_statusFault - Server Error. Contact Support. """ alarm = alarm.lower() alarm_enabled = Mgmt.get_value('/stats/config/alarm/%s/enable' % alarm) if alarm_enabled is None: raise ServiceError, 'Alarm %s does not exist' % alarm elif alarm_enabled == 'false': return 'disabled' elif alarm_enabled == 'true': # XXX/jshilkaitis: use mdc_iterate_binding_pattern one day, but # need to expose it first, and since it's non-trivial, I'm # punting on that for now. alarm_pfx = '/stats/state/alarm/' + alarm alarm_nodes = Mgmt.iterate(alarm_pfx, subtree=True) for node in alarm_nodes: if ((Mgmt.bn_name_pattern_match( node[0], alarm_pfx + '/node/*/rising/error') or Mgmt.bn_name_pattern_match( node[0], alarm_pfx + '/node/*/falling/error')) and node[2] == 'true'): return 'ERROR' else: raise ServiceError, 'Server error. Contact Support.' return 'ok'
def sendVmnetEvent(self, eventName): binding = ("interface", "string", self.vmnetReal) Mgmt.open() Mgmt.event(eventName, binding) Mgmt.close() Logging.log(Logging.LOG_INFO, "Event %s on interface %s sent" % ( eventName, self.vmnetReal))
def schedule_tcpdump(self, interfaces, filename, duration, date_and_time, max_size): """ schedule_tcpdump(auth, interfaces, filename, duration, date_and_time, max_size) -> None Schedule a TCP dump to run at a later time. Parameters: interfaces (string): comma-separated list of interfaces or 'all' filename (string): id string added to the boilerplate TCP dump name duration (int): maximum number of seconds that the capture will run or 0 to run until stop_tcpdump is called date_and_time (datetime): date and time to start the TCP dump max_size (int): maximum size, in megabytes, of capture file. Settimg this to 0 causes the system default size to be used. Exceptions: schedule_tcpdumpFault - Invalid capture name schedule_tcpdumpFault - Invalid interfaces schedule_tcpdumpFault - Capture name too long schedule_tcpdumpFault - Duplicate interface name schedule_tcpdumpFault - No valid interfaces provided schedule_tcpdumpFault - Invalid file name (same as invalid capture name) schedule_tcpdumpFault - Invalid date or time """ if filename.find('..') != -1: raise ServiceError("filename %s must not contain '..'" % filename) action_params = [] if interfaces == 'all': action_params.append(('all_interfaces', 'bool', 'true')) else: for iface in interfaces.split(','): action_params.append(('interface', 'string', iface)) action_params.append(('cap_name', 'string', filename)) action_params.append(('duration', 'duration_sec', duration)) action_params.append(('file_size', 'uint32', max_size)) date_and_time = time_to_local_time(date_and_time) dump_date, dump_time = date_and_time.split() action_params.append(('sched_time', 'time_sec', dump_time)) action_params.append(('sched_date', 'date', dump_date)) code, msg, bindings = Mgmt.action('/rbt/tcpdump/action/start', action_params) if code != 0: raise ServiceError, msg
def revert_config(self): """ revert_config(auth) -> None Revert the current in-memory configuration to the last saved configuration. Exceptions: None """ code, msg, bindings = Mgmt.action('/mgmtd/db/revert') if code != 0: raise ServiceError, msg
def __get_top_talkers_list(self, node_path, value_type): """ Internal function to support top talkers queries. Iterate subtree on node_path and use the results to generate a result list of value_type instances. """ temp_result = {} bindings = Mgmt.iterate(node_path, subtree=True) for b in bindings: if Mgmt.bn_name_pattern_match(b[0], node_path + '/*/*'): parts = Mgmt.bn_name_to_parts(b[0]) idx = int(parts[3]) - 1 # iterate results are 1-based attr_name = parts[4] curr_val = temp_result.setdefault(idx, value_type()) setattr(curr_val, attr_name, b[2]) return [temp_result[i] for i in xrange(len(temp_result))]
def getValidServersCount(method): """ @param method: tacacs or radius, the server type to check. @returns number of enabled servers configured for the specified method """ configured = False if method == tacacs: servers = Mgmt.get_pattern('/tacacs/server/*/enable') elif method == radius: servers = Mgmt.get_pattern('/radius/server/*/enable') else: servers = None count = 0 for server in servers: if server[2] == 'true': count = count + 1 return count
def get_rios_manage_esxi_ip(): """! Queries mgmtd for the current RiOS manage ESXi IP address and returns it Any callers of this function MUST have an open Mgmt GCL session If failed, the function returns None. """ current_rios_mgmt_esxi_ip_node = "/rbt/vsp/state/network/rios_manage_esxi/ip" current_rios_mgmt_esxi_ip = Mgmt.get_value(current_rios_mgmt_esxi_ip_node) if not current_rios_mgmt_esxi_ip or current_rios_mgmt_esxi_ip == "0.0.0.0": Logging.log(Logging.LOG_INFO, "Failed to get RiOS manage ESXi IP address") current_rios_mgmt_esxi_ip = None return current_rios_mgmt_esxi_ip
def trigger_action(action_name,action_params): """invoke an action with its binding params takes 2 arguments: action_name -> name of the action to be triggered action_params -> varargs of bindings returns: (code, message, bindings) code -> non zero in case of failure, zero otherwise msg -> return message bindings -> dictionary keyed by parameter name """ import Mgmt code, msg, bindings = Mgmt.action(action_name,action_params) return code, msg, bindings