def reboot(self): """ reboot(auth) -> None Reboot the appliance. Exceptions: rebootFault - Unable to verify user permissions rebootFault - Authorization failure: unable to complete action for user """ reboot_node = '/pm/actions/reboot' check_rbm_permissions(reboot_node, RBM_ACTION) # XXX/jshilkaitis: _highly_ questionable implementation, but we need # something now. Flaws: relies on arbitrary 3 second sleep, and does # not report an error to the client if the reboot action fails. The # action failing should be rare enough to not worry about, though. if os.fork() == 0: time.sleep(3) # give parent SOAP server time to respond # we fork so that the parent SOAP server can respond to the # client. The following action does not return if it is # successful. Mgmt.action(reboot_node)
def check_rbm_permissions(node_name, write_needed): """ Raises an exception if the user does not have at least the permissions specified by write_needed for the node specified by node_name. A questionable interface, but it works well for the SOAP server. """ import pwd username = pwd.getpwuid(os.geteuid())[0] # XXX/jshilkaitis: a small hack to get around the fact that the RBM # nodes verify action doesn't do what I want for admin or monitor. # I should figure out a cleaner way to do this in the future. if username == 'admin' or username == 'monitor': return code, msg, bindings = Mgmt.action('/rbm/action/nodes/verify', ('username', 'string', username), ('nodenames', 'string', node_name)) if code != 0: raise ServiceError, msg try: perms = bindings['permissions'] except: raise ServiceError, 'Server Error: Unable to authorize current user' if ((write_needed and perms != 'write') or (not write_needed and perms != 'write' and perms != 'read')): # XXX/jshilkaitis: make this match the message returned by the backend # someday. Same goes for the CLI. raise ServiceError, "Insufficient permissions for command execution."
def get_stats_internal(self, report_name, num_sets, start_time, end_time, subclass=None): start_time = time_to_local_time(start_time) end_time = time_to_local_time(end_time) bns = [('time_lb', 'datetime_sec', start_time), ('time_ub', 'datetime_sec', end_time)] if subclass is not None: bns.append(('subclass', 'uint32', subclass)) code, msg, bindings = Mgmt.action( '/stats/actions/generate_report/%s' % report_name, *bns ) if code != 0: raise ServiceError, msg # save off the file name so we can remove it in self.onReturn() results = bindings['results'] res = bulk_stats.bsp_to_data_val_sets(results, num_sets) return res
def __stop_sport(self): val = Mgmt.get_value('/pm/monitor/process/sport/state') if val is None: raise ServiceError, "Could not determine the service's state" if val == 'running': code, msg, bindings = Mgmt.action( '/rbt/sport/status/action/unset_restart_needed') if code != 0: raise ServiceError, msg code, msg, bindings = Mgmt.action( '/pm/actions/terminate_process', ('process_name', 'string', 'sport')) if code != 0: raise ServiceError, msg
def __start_sport(self): val = Mgmt.get_value('/pm/monitor/process/sport/state') if val is None: raise ServiceError, "Could not determine the service's state" if val != 'running': code, msg, bindings = Mgmt.action( '/rbt/sport/main/action/restart_service') if code != 0: raise ServiceError, msg
def write_config(self): """ write_config(auth) -> None Save the current in-memory configuration to disk. Exceptions: None """ code, msg, bindings = Mgmt.action('/mgmtd/db/save') if code != 0: raise ServiceError, msg
def schedule_tcpdump(self, interfaces, filename, duration, date_and_time, max_size): """ schedule_tcpdump(auth, interfaces, filename, duration, date_and_time, max_size) -> None Schedule a TCP dump to run at a later time. Parameters: interfaces (string): comma-separated list of interfaces or 'all' filename (string): id string added to the boilerplate TCP dump name duration (int): maximum number of seconds that the capture will run or 0 to run until stop_tcpdump is called date_and_time (datetime): date and time to start the TCP dump max_size (int): maximum size, in megabytes, of capture file. Settimg this to 0 causes the system default size to be used. Exceptions: schedule_tcpdumpFault - Invalid capture name schedule_tcpdumpFault - Invalid interfaces schedule_tcpdumpFault - Capture name too long schedule_tcpdumpFault - Duplicate interface name schedule_tcpdumpFault - No valid interfaces provided schedule_tcpdumpFault - Invalid file name (same as invalid capture name) schedule_tcpdumpFault - Invalid date or time """ if filename.find('..') != -1: raise ServiceError("filename %s must not contain '..'" % filename) action_params = [] if interfaces == 'all': action_params.append(('all_interfaces', 'bool', 'true')) else: for iface in interfaces.split(','): action_params.append(('interface', 'string', iface)) action_params.append(('cap_name', 'string', filename)) action_params.append(('duration', 'duration_sec', duration)) action_params.append(('file_size', 'uint32', max_size)) date_and_time = time_to_local_time(date_and_time) dump_date, dump_time = date_and_time.split() action_params.append(('sched_time', 'time_sec', dump_time)) action_params.append(('sched_date', 'date', dump_date)) code, msg, bindings = Mgmt.action('/rbt/tcpdump/action/start', action_params) if code != 0: raise ServiceError, msg
def revert_config(self): """ revert_config(auth) -> None Revert the current in-memory configuration to the last saved configuration. Exceptions: None """ code, msg, bindings = Mgmt.action('/mgmtd/db/revert') if code != 0: raise ServiceError, msg
def trigger_action(action_name,action_params): """invoke an action with its binding params takes 2 arguments: action_name -> name of the action to be triggered action_params -> varargs of bindings returns: (code, message, bindings) code -> non zero in case of failure, zero otherwise msg -> return message bindings -> dictionary keyed by parameter name """ import Mgmt code, msg, bindings = Mgmt.action(action_name,action_params) return code, msg, bindings
def set_snmp_password(self, username, password, auth_protocol): """ set_snmp_password(auth, username, password, auth_protocol) -> None Configure a SNMP user. Parameters: username - username of user to configure password - user's new password auth_protocol - protocol used to encrypt the password Exceptions: set_snmp_passwordFault - password must contain at least 8 characters set_snmp_passwordFault - XXX/BUG 47861/48172 for bad auth_protocol value """ # XXX/jshilkaitis: lame, should be an action on the mgmtd side # 8 is USM_LENGTH_P_MIN from net-snmp if len(password) < 8: raise ServiceError, "password must contain at least 8 characters" auth_protocol = auth_protocol.upper() code, msg, bindings = Mgmt.action( '/snmp/usm/actions/generate_auth_key', ('password', 'string', password), ('hash_function', 'string', auth_protocol) ) if code != 0: raise ServiceError, msg auth_key = bindings['auth_key'] snmp_user_pfx = '/snmp/usm/users/%s' % username code, msg = Mgmt.set( (snmp_user_pfx, 'string', username), (snmp_user_pfx + '/auth_key', 'string', auth_key), (snmp_user_pfx + '/hash_function', 'string', auth_protocol) ) if code != 0: raise ServiceError, msg
def stop_tcpdump(self, filename): """ stop_tcpdump(auth, filename) -> None Stop a TCP dump Parameter: filename (string): name of capture to stop. This should be the same as the filename parameter passed to start_tcpdump or schedule_tcpdump. Exceptions: stop_tcpdumpFault - Unknown capture name """ code, msg, bindings = Mgmt.action('/rbt/tcpdump/action/stop', ('cap_name', 'string', filename)) if code != 0: raise ServiceError, msg
def get_config(self, config_name, target_url): """ get_config(config_name, target_url) -> None Download a configuration to the location specified by target_url. Parameters: config_name (string) - name of config to download target_url (string) - location to which the config will be downloaded Exceptions: get_configFault - database file does not exist get_configFault - unsupported transfer protocol (scp/ftp support only) get_configFault - upload failed for unknown reasons. Check logs. """ code, msg, bindings = Mgmt.action('/mgmtd/db/upload', ('db_name', 'string', config_name), ('remote_url', 'string', target_url)) if code != 0: raise ServiceError, msg
def restart_optimization_service(self, clean_datastore): """ start_optimization_service(auth, clean_datastore) -> None Restart the optimization service, potentially cleaning the Data Store. Parameters: clean_datastore (bool) - true to clean the datastore, false to leave the datastore as is Exceptions: restart_optimization_serviceFault - Could not determine service state """ self.__stop_sport() if clean_datastore: # XXX/jshilkaitis: should we error if "touch" fails? file('/var/opt/rbt/.clean', 'w') # approximates /bin/touch file('/var/opt/rbt/.datastore_notif', 'w') try: os.stat('/var/opt/rbt/.gen_store_id') except: pass else: code, msg, bindings = Mgmt.action( '/rbt/sport/datastore/action/generate_store_id') if code != 0: raise ServiceError, msg else: try: os.remove('/var/opt/rbt/.gen_store_id') except: pass self.__start_sport()
def get_qos_stats(self, start_time, end_time, qos_class): """ get_passthrough_stats(auth, start_time, end_time, qos_class) -> [ [packets sent], [packets dropped], [bits sent], [bits dropped] ] Fetch the system's QoS statistics. Parameters: start_time (datetime) - start of the stats query period end_time (datetime) - end of the stats query period qos_class (string) - name of qos class whose stats are desired or 'all' Exceptions: get_qos_statsFault - Datetime not in known format get_qos_statsFault - QoS class does not exist get_qos_statsFault - Internal error. Failed to map class to id. """ classid = None # the default class is guaranteed to exist, so this is always safe check_rbm_permissions('/rbt/hfsc/config/class/default/params/classid', RBM_READ) if qos_class == 'all': classid = "0" # elif qos_class == 'unknown': # XXX/jshilkaitis: figure out how to deal with this # pass else: # convert class name to class id classid = Mgmt.get_value('/rbt/hfsc/config/class/%s/params/classid' % qos_class) if classid is None: raise ServiceError, 'QoS class "%s" does not exist' % qos_class check_rbm_permissions('/rbt/hfsc/action/get_family_ids', RBM_ACTION) # handle hierarchical QoS classes code, msg, bindings = Mgmt.action('/rbt/hfsc/action/get_family_ids', ('parent_id', 'uint16', classid)) if code != 0: raise ServiceError, 'Unable to map class name "%s" to class id.' \ % qos_class if bindings == {}: family_ids = classid else: family_ids = bindings['family_ids'] data_dict = {0:{}, 1:{}, 2:{}, 3:{}} # all datapoints with the same timestamp have equal durations gran_dict = {} # merge the datapoints for each class into a summary dict for cid in family_ids.split(','): stats_array = self.get_stats_internal('qos', 4, start_time, end_time, cid) for i in range(4): curr_dict = data_dict[i] sub_array = stats_array[i] for dp in sub_array: curr_dict[dp.time] = curr_dict.get(dp.time, 0) + dp.value gran_dict[dp.time] = dp.duration results = [] sorted_times = gran_dict.keys() sorted_times.sort() # turn the summary dict into a list of datapoints, sorted by time for i in range(4): curr_dict = data_dict[i] curr_result_array = [] for t in sorted_times: d = Datapoint() d.time = t d.value = curr_dict[t] d.duration = gran_dict[t] curr_result_array.append(d) results.append(curr_result_array) return results