def sendVmnetEvent(self, eventName): binding = ("interface", "string", self.vmnetReal) Mgmt.open() Mgmt.event(eventName, binding) Mgmt.close() Logging.log(Logging.LOG_INFO, "Event %s on interface %s sent" % ( eventName, self.vmnetReal))
def __call__(self, *args, **kw): auth_info = args[1] # the (de-)serializer turns the empty string into None, which # ctypes will then turn into NULL. This will crash the # pam_conversation function, so we protect against it here. if auth_info.password is None: auth_info.password = "" try: # XXX/jshilkaitis: is using wsmd okay here? pamh = pam.PamHandle("wsmd", auth_info.username, auth_info.password) # XXX/jshilkaitis: check to see if there are flags we want here pamh.authenticate(0) # XXX/jshilkaitis: flag check pamh.acct_mgmt(0) except: log(LOG_NOTICE, 'User %s failed to authenticate via the SOAP ' 'server.' % auth_info.username) raise else: log(LOG_NOTICE, 'User %s succesfully authenticated via the ' 'SOAP server.' % auth_info.username) # XXX/jshilkaitis: We need to ask PAM for the local username # and use that to get the proper uid/gid. # XXX/jshilkaitis: is username the LOCAL username? # We need to make sure that Tac+ and RADIUS auths play # nicely with the setuid/setgid. pwd_db_entry = pwd.getpwnam(auth_info.username) uid = pwd_db_entry[2] gid = pwd_db_entry[3] os.setegid(gid) os.seteuid(uid) # strip out auth_info args = list(args) args.pop(1) try: try: Mgmt.open() except: raise ServiceError, "Unable to connect to the " \ "management backend." return self.func(*args, **kw) finally: Mgmt.close()
def start_vmware_vmx(path): """! Start vmware-vmx with given vm """ Logging.log(Logging.LOG_INFO, "Starting vm %s" % path) vsp_ramfs = RamFs.RamFs(vsp_ramfs_path) if vsp_ramfs.is_mounted(): # we generally should not hit this path, we unmount the ramfs when # we stop vmware-vmx Logging.log(Logging.LOG_INFO, "VSP ramfs is already mounted %s, unmounting" % \ vsp_ramfs_path) try: vsp_ramfs.unmount_ramfs() except RamFs.RamFsCmdException as e: # we'll proceed with starting vmx even if we can't unmount Logging.log(Logging.LOG_ERR, e.msg) if not vsp_ramfs.is_mounted(): try: vsp_ramfs.mount_ramfs(vsp_ovhd_ramfs_min_size_mb) except (OSError, RamFs.RamFsCmdException) as e: Logging.log(Logging.LOG_ERR, str(e)) Logging.log(Logging.LOG_ERR, "Unable to create ramfs %s" \ " not starting VMX" % vsp_ramfs_path) # skip starting VMX, the caller will look for vmx status return # Link in performance tweaks library env_dict = os.environ.copy() Mgmt.open() if Vsp.is_memlock_enabled(): if env_dict.has_key("LD_PRELOAD"): env_dict["LD_PRELOAD"] = vmperf_path + " " + env_dict["LD_PRELOAD"] else: env_dict["LD_PRELOAD"] = vmperf_path # Check the ESXi debug option to see which binary we need to run vmx_option = get_debug_option() Mgmt.close() binary_path = option_to_path[vmx_option] Logging.log(Logging.LOG_DEBUG, "BINARY PATH: %s" % binary_path) pobj = subprocess.Popen([binary_path, "-qx", path], env = env_dict) pobj.wait()
def main(): """! Entry point to the watchdog. Initialize logger and starts attempting to communicate with ESXi """ global g_mgmtd_pid g_mgmtd_pid = None mgmtd_pids = [] Logging.log_init('esxi_watchdog', 'esxi_watchdog', 0, Logging.component_id(Logging.LCI_VSP), Logging.LOG_DEBUG, Logging.LOG_LOCAL0, Logging.LCT_SYSLOG) Logging.log(Logging.LOG_INFO, "esxi watchdog started") # Bug 117274: It may happen that we get multiple pids for mgmtd process, # pidof ran between fork-exec call, retry to allow mgmtd to settle for i in range(1, MAX_MGMTD_SETTLE_RETRY): mgmtd_pids = Vsp.get_pids('mgmtd') if len(mgmtd_pids) > 1: # multiple pids detected, give mgmtd sometime to settle time.sleep(MGMTD_SETTLE_TIMEOUT) else: g_mgmtd_pid = mgmtd_pids[0] break # Bug 112192: monitor mgmtd pid, if mgmtd crashes/exits # terminate watchdog as well if g_mgmtd_pid == None: # mgmtd not up kill watchdog process Logging.log(Logging.LOG_ERR, "Mgmtd is not ready, kill watchdog!") sys.exit(); Mgmt.open() signal.signal(signal.SIGINT, terminate_handler) signal.signal(signal.SIGTERM, terminate_handler) signal.signal(signal.SIGQUIT, terminate_handler) # Invalidate the session file if it exists on startup if os.path.exists(SESSION_FILE): os.remove(SESSION_FILE) monitor_esxi() Mgmt.close()
def main(args): """Main function * parse input args * start easy auth Auto-Conf """ output = '' result = '' fail_string = 'Auto-conf FAILED\n' success_string = 'Auto-conf Successfully completed\n' ret_code = AutoConfEasyAuth.success usage = 'Usage: %prog options [logfile]' parser = optparse.OptionParser(usage=usage, description ='Easy Auth Auto-Conf utility') parser.add_option('-t', '--testname', dest = 'test_name', help = 'DomainHealth Test name', default = 'autoconf_easyauth') parser.add_option('-C', '--Conf-type', dest = 'conf_type', help = 'Auto-Configuration type: emapi/smbsigning/smb2signing/emapi,smbsigning/emapi,smb2signing/smbsigning,smb2signing/emapi,smbsigning,smb2signing/all', default = None) parser.add_option('-D', '--Domain', dest = 'join_domain', help = 'Join-Domain name', default = None) parser.add_option('-A', '--AdminUser', dest = 'admin_user', help = 'Domain Administrator', default = None) parser.add_option('-P', '--password', dest = 'admin_password', help = 'Admin User password', default = None) parser.add_option('-d', '--DC' , dest ='dc', help = 'Domain Controller hostname', default = None) parser.add_option('-j', '--Join-type', dest = 'join_type', help = 'Join type: bdc/rodc', default = None ) parser.add_option('-s', '--ShortDomain', dest = 'shortdom', help = 'Short Domain name', default = None ) parser.add_option('-u', '--ReplicationUser', dest = 'repl_user_name', help = 'Replication User name', default = None ) parser.add_option('-p', '--ReplicationPassword', dest = 'repl_user_pass', help = 'Replication User password', default = None ) parser.add_option('-R', '--ReplicationDomain', dest = 'repl_user_dom', help = 'Replication User domain', default = None ) parser.add_option('-r', '--IsRodc', dest = 'repl_isrodc', help = 'Is RODC', default = 'false' ) parser.add_option('-l', '--loglevel', dest = 'log_level', help = 'Set Minimum logging level', default = 'info' ) parser.add_option('-m', '--test_mode', action="store_true", dest = 'test_mode', help = 'Run script in test mode', default = False) options, arguments = parser.parse_args() if len(arguments) == 0 : param_logfile = '/var/log/domaind/autoconf-easyauth' else : param_logfile = arguments[0] inp_args = InputArgs(options.conf_type, options.admin_user, options.admin_password, options.join_domain, options.dc, options.join_type, options.shortdom, options.repl_user_dom, options.repl_user_name, options.repl_user_pass, options.repl_isrodc, options.test_mode) log_handle = 'domaind/configure/' + options.test_name log_file = CustomLogger(log_handle, param_logfile, options.log_level) easyauth = AutoConfEasyAuth(inp_args, log_file) #open Mgmt session Mgmt.open() try: ret_code = easyauth.run() if ret_code == AutoConfEasyAuth.success : output = easyauth.inp_args.conf_type + ' ' + success_string easyauth.log_file.notice(output) else : if ret_code == AutoConfEasyAuth.invalid : output = 'Options validation for test failed, testname: ' \ + options.test_name + '\n' + fail_string easyauth.log_file.error(output) except: ret_code = AutoConfEasyAuth.error output = 'Caught exception while running ' + options.test_name output += ': ' + str(sys.exc_info()[1]) + '\n' easyauth.log_file.error(output) easyauth.result += output if ret_code == AutoConfEasyAuth.error: easyauth.rollback() if easyauth.setconfig.restart_needed : output = 'You may need to restart the optimization' + \ ' service for the changes to be reverted properly.\n' easyauth.result += output output = easyauth.inp_args.conf_type + ' ' + fail_string easyauth.log_file.error(output) easyauth.result += output #close Mgmt sessiom Mgmt.close() print >> sys.stdout, easyauth.result if ret_code == AutoConfEasyAuth.invalid : parser.print_help() # display usage
def terminate_term_handler(signum, frame): """! Signal handler for SIGTERM and SIGINT. Whenever one of the signals occur, we will attempt to make the vicfg-hostops call to gracefully power down the host. If there is an issue with the password, we will immediately power down the host with vmrun stop. If the issue is connection related, we will not do anything and will let PM retry again (if possible) when the next signal is sent. """ do_forceful = False Logging.log(Logging.LOG_DEBUG, "Wrapper: got TERM signal") # We used to open and close the session in main(). However, if mgmtd # crashes and is restarted by PM, the session we created will not be # stale and any queries will fail miserably. To mitigate this issue, # we'll open and close the session as tightly as possible Mgmt.open() env_vars = Vsp.make_vicfg_env_vars() Logging.log(Logging.LOG_DEBUG, "Wrapper env: %s" % env_vars) Mgmt.close() # Send signal to active VM migration task so it can clean up stop_migrate_deploy() if env_vars == None or not os.path.exists(CONNECTED_MARKER): Logging.log(Logging.LOG_NOTICE, "Cannot get password or currently disconnected from ESXi") do_forceful = True # Do one last check for connectivity just in case the watchdog # says we are connected when really we are not due to a change of # IP or password on the ESXi side (but our sessionfile is still valid) # XXX/rcenteno Enhance with the session file if not do_forceful and not check_connectivity(env_vars): Logging.log(Logging.LOG_NOTICE, "ESXi connectivity not found") do_forceful = True if do_forceful: Logging.log(Logging.LOG_NOTICE, "Performing forceful power off") # We cannot get the ESXi password or cannot connect to ESXi, # so we must forcibly power down stop_vmware_vmx(); else: global g_shutdown_requested # update IQN cache value iqn = Vsp.get_iqn(env_vars, RUNFOR_TIMEOUT) if iqn: try: iqn_cache = open(iqn_cache_path, 'w') iqn_cache.write(iqn) iqn_cache.close() except Exception as e: Logging.log(Logging.LOG_ERR, "Exception while updating IQN cache file") # Create the shutdown marker that the hpn will use to know it should # wait for ESXi shutdown first open(SHUTDOWN_MARKER, 'w').close() # Enable SSH on the host pobj = subprocess.Popen(ENABLE_SSH_COMMAND, env=env_vars, stdout=subprocess.PIPE, stderr=subprocess.PIPE) ret_code = pobj.wait() if ret_code == 0: # Save the state on the host pobj = subprocess.Popen(SAVE_STATE_COMMAND, env=env_vars, stdout=subprocess.PIPE, stderr=subprocess.PIPE) ret_code = pobj.wait() if ret_code == 0: Logging.log(Logging.LOG_INFO, "Saved state on the host") # Disable SSH on the host pobj = subprocess.Popen(DISABLE_SSH_COMMAND, env=env_vars, stdout=subprocess.PIPE, stderr=subprocess.PIPE) ret_code = pobj.wait() # Regardless of what happened earlier, send the host operations command Logging.log(Logging.LOG_INFO, "ESXi graceful shutdown in progress") pobj = subprocess.Popen(SHUTDOWN_COMMAND, env=env_vars, stdout=subprocess.PIPE, stderr=subprocess.PIPE) ret_code = pobj.wait() if ret_code == 0: g_shutdown_requested = True # the command timed out elif ret_code == 99: Logging.log(Logging.LOG_INFO, "Timed out trying to send graceful shutdown request")
if full and full != options.full_interval: if options.verbose: print "Updating CRON config due to interval changes" do_install() elif minimal and minimal != options.min_interval: if options.verbose: print "Updating CRON config due to interval changes" do_install() # Command line & Config. Configuration settings (or defaults) come # from mgmt. The command line can override. HC = '/rbt/support/healthcheck' md_bool_map = {'true':True, 'false':False} try: Mgmt.open() enable_default=md_bool_map[Mgmt.get_value(HC + '/enable')] transport_default=Mgmt.get_value(HC + '/transport') url_default=Mgmt.get_value(HC + '/url') recipient_default=Mgmt.get_value(HC + '/recipient') proxy_default=Mgmt.get_value(HC + '/proxy') full_interval_default=Mgmt.get_value(HC + '/full_interval') min_interval_default=Mgmt.get_value(HC + '/min_interval') level_default=Mgmt.get_value(HC + '/level') Mgmt.close() except: # Mgmt cannot be accessed at boot so we muddle along with command # line only to allow the boot install. The first run after boot # will fix the scheduling to any user defined values or # md_support.xml defaults. enable_default=False
def initMgmt(): Mgmt.open(gcl_provider='mgmtd')
def main(args): """Main function * start verification and backup """ ret_code = Test.success task_name = "TDB file verification and backup utility" loghandle_name = "domain_auth/tdb_verify_backup" log_file = None success_msg = "TDB file verification and backup successfully completed." fail_msg = "TDB file verification and backup encountered problems." result = "" usage = "Usage: %prog [-l <log_level> logfile]" parser = optparse.OptionParser(usage=usage, description=task_name) parser.add_option("-l", "--loglevel", dest="loglevel", help="Minimum log level", default="info") parser.add_option( "-m", "--test_mode", action="store_true", dest="test_mode", help="Run script in test mode", default=False ) options, arguments = parser.parse_args() if len(arguments) == 0: param_logfile = None else: param_logfile = arguments[0] log_file = CustomLogger(loghandle_name, param_logfile, options.loglevel) tdbmanager = TDBManager(log_file, loghandle_name) tdbmanager.unit_test_mode = options.test_mode if TDBManager.is_duplicate_task(args[0]): output = "Cannot run " + task_name + " while an existing run is in progress.\n" result += output tdbmanager.log_file.error(output) ret_code = TDBManager.error else: if not tdbmanager.unit_test_mode: import Mgmt # open Mgmt session Mgmt.open() try: ret_code = tdbmanager.run() if ret_code == TDBManager.success: result += success_msg tdbmanager.log_file.notice(success_msg) except: ret_code = TDBManager.error output = "Caught exception while running " + task_name + " : " + str(sys.exc_info()[1]) + "\n" tdbmanager.log_file.error(output) if not tdbmanager.unit_test_mode: # close Mgmt session Mgmt.close() if ret_code == TDBManager.error: result += fail_msg tdbmanager.log_file.error(fail_msg) print >>sys.stdout, result return ret_code
def getBridgeInterfaceMTU(self, bridgeName): bn_node = "/rbt/vsp/state/interface/%s/bridged/interface/mtu" % bridgeName Mgmt.open() infMTU = Mgmt.get_value(bn_node) Mgmt.close() return infMTU