Exemple #1
0
    def parseConfigFile(self):
        """
        function: parse the configuration file:
                  1.get local installation path for restoration
                  2.Obtain user and group for restoration
                  3.Obtain the local node information for restoration
        input : NA
        output: NA
        """
        self.logger.log("Parsing the configuration file.")

        try:
            self.clusterInfo = dbClusterInfo()
            gaussHome = os.getenv("GAUSSHOME")
            if g_forceRestore and self.restoreBin:
                self.clusterInfo.appPath = gaussHome
            else:
                self.clusterInfo.initFromStaticConfig(self.user, g_staticFile)
                hostName = DefaultValue.GetHostIpOrName()
                self.dbNodeInfo = self.clusterInfo.getDbNodeByName(hostName)
                if self.dbNodeInfo is None:
                    self.logger.logExit(ErrorCode.GAUSS_516["GAUSS_51619"] %
                                        hostName)
            # Getting local installation path for restoration.
            self.logger.log("Getting local installation path for restoration.")
            self.installPath = os.path.realpath(self.clusterInfo.appPath)
            self.binExtractName = self.installPath.split("/")[-1]
            self.logger.debug("Local installation path: %s." %
                              self.installPath)
        except Exception as e:
            raise Exception(str(e))

        self.logger.log("Successfully parsed the configuration file.")
Exemple #2
0
    def checkLogFilePath(self):
        """
        function: Check log file path
        input : NA
        output: NA
        """
        clusterPath = []
        try:
            # get tool path
            clusterPath.append(DefaultValue.getClusterToolPath(self.user))
            # get tmp path
            tmpDir = DefaultValue.getTmpDirFromEnv()
            clusterPath.append(tmpDir)
            # get cluster path
            hostName = DefaultValue.GetHostIpOrName()
            dirs = self.clusterInfo.getClusterDirectorys(hostName, False)
            # loop all cluster path
            for checkdir in dirs.values():
                clusterPath.extend(checkdir)
            self.logger.debug("Cluster paths %s." % clusterPath)

            # check directory
            g_file.checkIsInDirectory(self.logFile, clusterPath)
        except Exception as e:
            self.logger.logExit(str(e))
Exemple #3
0
    def ReCleanEtcdPath(self):
        """
        function: make sure the etcd path is clean.
        input : NA
        output: NA
        """
        # check if need delete instance
        if (not self.cleanInstance):
            self.logger.debug("No need to redelete etcd path.")
            return

        if (self.localMode):
            for dbnode in self.clusterInfo.dbNodes:
                if (dbnode.name == DefaultValue.GetHostIpOrName()):
                    if (len(dbnode.etcds) > 0):
                        etcdDir = dbnode.etcds[0].datadir
                        self.logger.debug("Clean etcd path %s in node: %s." %
                                          (etcdDir, dbnode.name))
                        g_file.cleanDirectoryContent(etcdDir)
        else:
            for dbnode in self.clusterInfo.dbNodes:
                if (len(dbnode.etcds) > 0):
                    etcdDir = dbnode.etcds[0].datadir
                    cmd = g_file.SHELL_CMD_DICT["cleanDir4"] % etcdDir
                    self.logger.debug("Clean etcd path %s in node: %s." %
                                      (etcdDir, dbnode.name))
                    (status, output) = self.sshTool.getSshStatusOutput(
                        cmd, [dbnode.name], self.mpprcFile)
                    if (status[dbnode.name] != DefaultValue.SUCCESS):
                        self.logger.debug("Clean etcd failed: %s" % output)
Exemple #4
0
    def getDiskPath(self):
        nodeDirs = []
        # get PGHOST Dir
        tmpDir = DefaultValue.getEnv("PGHOST")
        nodeDirs.append(tmpDir)

        # get gphome dir
        gphome_path = DefaultValue.getEnv("GPHOME")
        nodeDirs.append(gphome_path)

        # get log dir
        log_path = DefaultValue.getEnv("GAUSSLOG")
        nodeDirs.append(log_path)

        # get gausshome dir
        gausshome_path = DefaultValue.getEnv("GAUSSHOME")
        nodeDirs.append(os.path.realpath(gausshome_path))

        hostName = DefaultValue.GetHostIpOrName()
        dbNode = self.cluster.getDbNodeByName(hostName)
        # including dn
        for dbInst in dbNode.datanodes:
            nodeDirs.append(dbInst.datadir)

        return nodeDirs
Exemple #5
0
    def cleanGroup(self):
        """
        function: clean group
        input : NA
        output: NA
        """
        self.logger.debug("Cleaning user group.")
        hostName = DefaultValue.GetHostIpOrName()
        groupname = self.user

        try:
            groupid = grp.getgrnam(groupname).gr_gid
        except Exception:
            self.logger.debug("group %s has been deleted." % groupname)
            sys.exit(0)

        cmd = "cat /etc/passwd | awk -F [:] '{print $1  \" \"$4}'" \
              "|grep ' %s$'" % groupid
        (status, output) = subprocess.getstatusoutput(cmd)
        if status == 0:
            self.logger.logExit(
                "Warning: There are other users in the group %s on %s,"
                " skip to delete group." % (groupname, hostName))
        elif status == 1:
            cmd = "groupdel %s" % groupname
            (status, output) = subprocess.getstatusoutput(cmd)
            if status != 0:
                self.logger.logExit("Warning: Failed to delete group "
                                    "%s by cmd:%s. Error: \n%s" %
                                    (groupname, cmd, output))
        else:
            self.logger.logExit("Warning: Failed to delete group "
                                "%s by cmd:%s. Error: \n%s" %
                                (groupname, cmd, output))
        self.logger.debug("Successfully cleaned user group.")
Exemple #6
0
def receiveFile(fileName, host, user, path, passwd=None):
    # Receive remote files
    t = None
    if (passwd):
        try:
            import paramiko
            t = paramiko.Transport((host, 22))
            t.connect(username=user, password=passwd)
            sftp = paramiko.SFTPClient.from_transport(t)
            if (type(fileName) == list):
                for fname in fileName:
                    sftp.get(fname, os.path.join(path,
                                                 os.path.basename(fname)))
            else:
                sftp.get(fileName, os.path.join(path, fileName))
        except Exception as e:
            raise Exception(str(e))
        finally:
            if (t):
                t.close()
    else:
        if "HOST_IP" not in list(os.environ.keys()):
            host = "%s@%s" % (user, host)
        cmd = "pssh -s -H %s 'pscp -H %s %s %s' " % (
            host, DefaultValue.GetHostIpOrName(), fileName, path)
        if (os.getuid() == 0):
            cmd = "su - %s -c \"%s\"" % (user, cmd)
        runShellCmd(cmd)
 def __init__(self, logFile, user, clusterConf, dwsMode=False):
     """
     function: Constructor
     input : logFile, user, clusterConf, dwsMode
     output: NA
     """
     LocalBaseOM.__init__(self, logFile, user, clusterConf, dwsMode)
     if (self.clusterConfig == ""):
         # Read config from static config file
         self.readConfigInfo()
     else:
         self.clusterInfo = dbClusterInfo()
         self.clusterInfo.initFromXml(self.clusterConfig)
         hostName = DefaultValue.GetHostIpOrName()
         self.dbNodeInfo = self.clusterInfo.getDbNodeByName(hostName)
         if (self.dbNodeInfo is None):
             self.logger.logExit(ErrorCode.GAUSS_516["GAUSS_51619"] %
                                 hostName)
     # get user info
     self.getUserInfo()
     if (user != "" and self.user != user.strip()):
         self.logger.debug("User parameter : %s." % user)
         self.logger.logExit(ErrorCode.GAUSS_503["GAUSS_50315"] %
                             (self.user, self.clusterInfo.appPath))
     # init every component
     self.initComponent()
Exemple #8
0
    def readConfigInfo(self):
        """
        function: Read config from static config file
        input : NA
        output: NA
        """
        try:
            self.clusterInfo = dbClusterInfo()
            hostName = DefaultValue.GetHostIpOrName()
            dynamicFileExist = False
            if self.__class__.__name__ == "Start":
                dynamicFileExist = \
                    self.clusterInfo.dynamicConfigExists(self.user)
            if dynamicFileExist:
                self.clusterInfo.readDynamicConfig(self.user)
                self.dbNodeInfo = self.clusterInfo.getDbNodeByName(hostName)
            else:
                self.clusterInfo.initFromStaticConfig(self.user)
                self.dbNodeInfo = self.clusterInfo.getDbNodeByName(hostName)
            if self.dbNodeInfo is None:
                self.logger.logExit(ErrorCode.GAUSS_516["GAUSS_51619"] %
                                    hostName)
        except Exception as e:
            self.logger.logExit(str(e))

        self.logger.debug("Instance information on local node:\n%s" %
                          str(self.dbNodeInfo))
Exemple #9
0
    def cleanOtherNodesLog(self):
        """
        function: clean other nodes log
        input : NA
        output: NA
        """
        # check if local mode
        if self.localMode:
            return
        self.logger.log("Deleting logs of other nodes.")
        try:
            # get other nodes
            hostName = DefaultValue.GetHostIpOrName()
            otherNodes = self.clusterInfo.getClusterNodeNames()
            for otherNode in otherNodes:
                if (otherNode == hostName):
                    otherNodes.remove(otherNode)

            # clean log
            cmd = "rm -rf '%s/%s'; rm -rf /tmp/gauss_*;" % (
                self.clusterInfo.logPath, self.user)
            cmd += "rm -rf '%s/Python-2.7.9'" \
                   % DefaultValue.getClusterToolPath()
            self.logger.debug("Command for deleting logs of other nodes: %s" %
                              cmd)
            DefaultValue.execCommandWithMode(cmd, "delete user log directory",
                                             self.sshTool, self.localMode,
                                             self.mpprcFile, otherNodes)
            self.logger.debug("Successfully deleted logs of the nodes: %s." %
                              otherNodes)
        except Exception as e:
            self.logger.logExit(ErrorCode.GAUSS_502["GAUSS_50207"] %
                                "other nodes log" + " Error: \n%s." % str(e))
        self.logger.log("Successfully deleted logs of other nodes.")
Exemple #10
0
    def checkLogFilePath(self):
        """
        function: Check log file path
        input : NA
        output: NA
        """
        clusterPath = []

        try:
            self.logger.log("Check log file path.", "addStep")
            # get tool path
            clusterPath.append(DefaultValue.getClusterToolPath())

            # get tmp path
            tmpDir = DefaultValue.getTmpDir(self.user, self.xmlFile)
            clusterPath.append(tmpDir)

            # get cluster  path
            hostName = DefaultValue.GetHostIpOrName()
            dirs = self.clusterInfo.getClusterDirectorys(hostName, False)
            for checkdir in dirs.values():
                clusterPath.extend(checkdir)

            self.logger.debug("Cluster paths %s." % clusterPath)
            # check directory
            g_file.checkIsInDirectory(self.logFile, clusterPath)
            self.logger.log("Successfully checked log file path.", "constant")
        except Exception as e:
            self.logger.logExit(str(e))
 def __init__(self, name):
     '''
     Constructor
     '''
     self.name = name
     self.title = None
     self.set = False
     self.log = None
     self.suggestion = None
     self.standard = None
     self.threshold = {}
     self.category = 'other'
     self.permission = 'user'
     self.analysis = 'default'
     self.scope = 'all'
     self.cluster = None
     self.port = None
     self.user = None
     self.nodes = None
     self.mpprcFile = None
     self.thresholdDn = None
     self.context = None
     self.tmpPath = None
     self.outPath = None
     self.host = DefaultValue.GetHostIpOrName()
     self.result = LocalItemResult(name, self.host)
     self.routing = None
     self.skipSetItem = []
     self.ipAddr = None
     # self cluster name not only lc
     self.LCName = None
     self.ShrinkNodes = None
Exemple #12
0
def runSshCmd(cmd, host, user="", mpprcFile="", timeout=""):
    """
    function: run ssh cmd
    input  : cmd, host, user, mpprcFile, timeout
    output : str
    """
    if (timeout):
        timeout = "-o ConnectTimeout=%s" % timeout
    if (mpprcFile):
        cmd = "source '%s'; %s" % (mpprcFile, cmd)
    # Set the output LANG to English
    cmd = "export LC_ALL=C; %s" % cmd
    # RedHat does not automatically source /etc/profile
    # but SuSE executes when using ssh to remotely execute commands
    # Some environment variables are written in /etc/profile
    # when there is no separation of environment variables
    if (host == DefaultValue.GetHostIpOrName()):
        sshCmd = cmd
    else:
        sshCmd = "pssh -s -H %s %s 'source /etc/profile 2>/dev/null;%s'" % (
            host, timeout, cmd)
    if (user and user != getCurrentUser()):
        sshCmd = "su - %s -c \"%s\"" % (user, sshCmd)
    (status, output) = subprocess.getstatusoutput(sshCmd)
    if (status != 0):
        raise SshCommandException(host, sshCmd, output)
    return output
Exemple #13
0
    def __init__(self, dropnode):
        """
        """
        self.context = dropnode
        self.user = self.context.user
        self.userProfile = self.context.userProfile
        self.group = self.context.group
        self.backupFilePrimary = ''
        self.localhostname = DefaultValue.GetHostIpOrName()
        self.logger = self.context.logger
        self.resultDictOfPrimary = []
        self.replSlot = ''
        envFile = DefaultValue.getEnv("MPPDB_ENV_SEPARATE_PATH")
        if envFile:
            self.envFile = envFile
        else:
            self.envFile = "/etc/profile"
        gphomepath = DefaultValue.getEnv("GPHOME")
        if gphomepath:
            self.gphomepath = gphomepath
        else:
            (status, output) = subprocess.getstatusoutput("which gs_om")
            if "no gs_om in" in output:
                raise Exception(ErrorCode.GAUSS_518["GAUSS_51800"] % "$GPHOME")
            self.gphomepath = os.path.normpath(output.replace("/gs_om", ""))
        self.appPath = self.context.clusterInfo.appPath
        self.gsql_path = "source %s;%s/bin/gsql" % (self.userProfile,
                                                    self.appPath)

        currentTime = str(datetime.datetime.now()).replace(" ", "_").replace(
            ".", "_")
        self.dnIdForDel = []
        for hostDelName in self.context.hostMapForDel.keys():
            self.dnIdForDel += self.context.hostMapForDel[hostDelName]['dn_id']
        self.commonOper = OperCommon(dropnode)
 def doStartCluster(self):
     """
     function: do start cluster
     input: NA
     output: NA
     """
     self.logger.debug("Operating: Starting.")
     # Specifies the stop node
     # Gets the specified node id
     startType = "node" if self.context.g_opts.nodeName != "" else "cluster"
     # Perform a start operation
     self.logger.log("Starting %s." % startType)
     self.logger.log("=========================================")
     hostName = DefaultValue.GetHostIpOrName()
     #get the newest dynaminc config and send to other node
     self.clusterInfo.checkClusterDynamicConfig(self.context.user, hostName)
     if self.context.g_opts.nodeName == "":
         hostList = self.clusterInfo.getClusterNodeNames()
     else:
         hostList = []
         hostList.append(self.context.g_opts.nodeName)
     self.sshTool = SshTool(self.clusterInfo.getClusterNodeNames(), None,
                            DefaultValue.TIMEOUT_CLUSTER_START)
     if self.time_out is None:
         time_out = DefaultValue.TIMEOUT_CLUSTER_START
     else:
         time_out = self.time_out
     cmd = "source %s; %s -U %s -R %s -t %s --security-mode=%s" % (
     self.context.g_opts.mpprcFile,
     OMCommand.getLocalScript("Local_StartInstance"),
     self.context.user, self.context.clusterInfo.appPath, time_out,
     self.context.g_opts.security_mode)
     if self.dataDir != "":
         cmd += " -D %s" % self.dataDir
     starttime = time.time()
     (statusMap, output) = self.sshTool.getSshStatusOutput(cmd, hostList)
     for nodeName in hostList:
         if statusMap[nodeName] != 'Success':
             raise Exception(
                 ErrorCode.GAUSS_536["GAUSS_53600"] % (cmd, output))
     if re.search("another server might be running", output):
         self.logger.log(output)
     if startType == "cluster":
         cmd = "source %s; gs_om -t status|grep cluster_state|grep Normal" \
               % self.context.g_opts.mpprcFile
         while time.time() <= time_out + starttime:
             status = subprocess.getstatusoutput(cmd)[0]
             if status != 0:
                 self.logger.log("Waiting for check cluster state...")
                 time.sleep(5)
             else:
                 break
         if time.time() > time_out + starttime:
             raise Exception(ErrorCode.GAUSS_516["GAUSS_51610"] % "cluster"
                             + "Start timeout, please check the process"
                               " status manually")
     self.logger.log("=========================================")
     self.logger.log("Successfully started.")
     self.logger.debug("Operation succeeded: Start.")
Exemple #15
0
    def getScriptLogger():
        filePath = os.path.split(os.path.realpath(__file__))[0]
        afilename = "%s/../output/log/script_%s.log" % (
            filePath, DefaultValue.GetHostIpOrName())

        log = logging.getLogger()
        LoggerFactory._addFileHandle(log, afilename)
        return log
    def initFrom(self, context):
        '''
        initialize the check item from context
        '''
        item = next(i for i in context.items if i['name'] == self.name)
        if item:
            self.title = self.__getLocaleAttr(item, 'title')
            self.suggestion = self.__getLocaleAttr(item, 'suggestion')
            self.standard = self.__getLocaleAttr(item, 'standard')
            if (item.__contains__('threshold')):
                self.category = item['category']
            if (item.__contains__('threshold')):
                self.threshold = item['threshold']
            # set pre check method
            self.setScope(item['scope'])
            # set post analysis method
            self.setAnalysis(item['analysis'])

        self.context = context
        self.cluster = context.cluster
        self.user = context.user
        self.nodes = context.nodes
        self.mpprcFile = context.mpprc
        self.result.checkID = context.checkID
        self.result.user = context.user
        self.tmpPath = context.tmpPath
        self.outPath = context.outPath
        self.set = context.set
        self.log = context.log
        self.routing = context.routing
        self.skipSetItem = context.skipSetItem
        self.__getLocalIP(context.nodes)
        self.LCName = context.LCName
        self.ShrinkNodes = context.ShrinkNodes
        if not context.thresholdDn:
            self.thresholdDn = 90
        else:
            self.thresholdDn = context.thresholdDn
        # new host without cluster installed
        if (not self.user):
            self.host = DefaultValue.GetHostIpOrName()
            self.result.host = DefaultValue.GetHostIpOrName()
 def checkNode(self):
     """
     function: check if the current node is to be uninstalled
     input : NA
     output: NA
     """
     if (len(self.context.g_opts.nodeInfo) != 0
             and self.context.g_opts.hostname
             == DefaultValue.GetHostIpOrName()):
         raise Exception(ErrorCode.GAUSS_516["GAUSS_51631"] % "coordinate" +
                         "\nPlease perform this operation on other nodes "
                         "because this node will be deleted.")
 def distributeFiles(self):
     """
     function: distribute package to every host
     input : NA
     output: NA
     """
     self.logger.debug("Distributing files.")
     try:
         # get the all nodes
         hosts = self.clusterInfo.getClusterNodeNames()
         if DefaultValue.GetHostIpOrName() not in hosts:
             raise Exception(ErrorCode.GAUSS_516["GAUSS_51619"] %
                             DefaultValue.GetHostIpOrName())
         hosts.remove(DefaultValue.GetHostIpOrName())
         # Send xml file to every host
         DefaultValue.distributeXmlConfFile(self.sshTool, self.xmlFile,
                                            hosts, self.mpprcFile)
         # Successfully distributed files
         self.logger.debug("Successfully distributed files.")
     except Exception as e:
         # failed to distribute package to every host
         raise Exception(str(e))
Exemple #19
0
def is_local_node(host):
    """
    function: check whether is or not local node
    input  : NA
    output : NA
    """
    if (host == DefaultValue.GetHostIpOrName()):
        return True
    allNetworkInfo = g_network.getAllNetworkIp()
    for network in allNetworkInfo:
        if (host == network.ipAddress):
            return True
    return False
 def doQuery(self):
     """
     function: do query
     input  : NA
     output : NA
     """
     hostName = DefaultValue.GetHostIpOrName()
     sshtool = SshTool(self.context.clusterInfo.getClusterNodeNames())
     cmd = queryCmd()
     if (self.context.g_opts.outFile != ""):
         cmd.outputFile = self.context.g_opts.outFile
     self.context.clusterInfo.queryClsInfo(hostName, sshtool,
                                           self.context.mpprcFile, cmd)
Exemple #21
0
    def cleanRemoteOsUser(self):
        """
        function: Clean remote os user
        input : NA
        output: NA
        """
        # check if local mode
        if (self.localMode):
            return

        if (not self.deleteUser):
            # clean static config file
            cmd = "rm -rf '%s'" % self.clusterInfo.appPath
            DefaultValue.execCommandWithMode(cmd, "delete install directory",
                                             self.sshTool, self.localMode,
                                             self.mpprcFile)
            return

        group = grp.getgrgid(pwd.getpwnam(self.user).pw_gid).gr_name

        # get other nodes
        hostName = DefaultValue.GetHostIpOrName()
        otherNodes = self.clusterInfo.getClusterNodeNames()
        for otherNode in otherNodes:
            if (otherNode == hostName):
                otherNodes.remove(otherNode)

        # clean remote user
        self.logger.log("Deleting remote OS user.")
        cmd = "%s -U %s -l %s" % (OMCommand.getLocalScript(
            "Local_Clean_OsUser"), self.user, self.localLog)
        self.logger.debug("Command for deleting remote OS user: %s" % cmd)
        DefaultValue.execCommandWithMode(cmd, "delete OS user", self.sshTool,
                                         self.localMode, self.mpprcFile,
                                         otherNodes)
        self.logger.log("Successfully deleted remote OS user.")

        if (self.deleteGroup):
            # clean remote group
            self.logger.debug("Deleting remote OS group.")
            cmd = "%s -t %s -u %s -l '%s' -X '%s'" % (
                OMCommand.getLocalScript("Local_UnPreInstall"),
                ACTION_DELETE_GROUP, group, self.localLog, self.xmlFile)
            self.logger.debug("Command for deleting remote OS group: %s" % cmd)
            status = self.sshTool.getSshStatusOutput(cmd, otherNodes,
                                                     self.mpprcFile)[0]
            outputMap = self.sshTool.parseSshOutput(otherNodes)
            for node in status.keys():
                if (status[node] != DefaultValue.SUCCESS):
                    self.logger.log((outputMap[node]).strip("\n"))
            self.logger.debug("Deleting remote group is completed.")
    def checkRemoteFileExist(self, filepath):
        """
        funciton:check file exist on remote node
        input:filepath
        output:dictionary
        """
        existNodes = []
        for nodeName in self.context.clusterInfo.getClusterNodeNames():
            if (nodeName == DefaultValue.GetHostIpOrName()):
                continue
            if (self.sshTool.checkRemoteFileExist(nodeName, filepath, "")):
                existNodes.append(nodeName)

        return existNodes
Exemple #23
0
    def cleanDirectory(self):
        """
        function: clean install/instance/temp dirs
        input : NA
        output: NA
        """
        # clean instance path
        hostName = DefaultValue.GetHostIpOrName()
        dbNodeInfo = self.clusterInfo.getDbNodeByName(hostName)
        instanceDirs = []
        # get DB instance
        for dbInst in dbNodeInfo.datanodes:
            instanceDirs.append(dbInst.datadir)
            if (len(dbInst.ssdDir) != 0):
                instanceDirs.append(dbInst.ssdDir)
        # clean all instances
        if (len(instanceDirs) > 0):
            if (os.path.exists(instanceDirs[0])
                    and len(os.listdir(instanceDirs[0])) == 0):
                self.CleanInstanceDir()
            else:
                self.logger.debug("Instance directory [%s] is not empty. "
                                  "Skip to delete instance's directory." %
                                  instanceDirs[0])
        else:
            self.logger.debug("Instance's directory is not been found. "
                              "Skip to delete instance's directory.")

        # clean install path
        if (os.path.exists(self.clusterInfo.appPath)):
            self.logger.log("Deleting the installation directory.")
            cmd = "rm -rf '%s'" % self.clusterInfo.appPath
            self.logger.debug(
                "Command for deleting the installation path: %s" % cmd)
            DefaultValue.execCommandWithMode(cmd, "delete install path",
                                             self.sshTool, self.localMode,
                                             self.mpprcFile)
            self.logger.log("Successfully deleted the installation directory.")

        # clean tmp dir
        self.logger.log("Deleting the temporary directory.")
        tmpDir = DefaultValue.getTmpDir(self.user, self.xmlFile)
        cmd = "rm -rf '%s'; rm -rf /tmp/gs_checkos; rm -rf /tmp/gs_virtualip" \
              % tmpDir
        self.logger.debug("Command for deleting the temporary directory: %s" %
                          cmd)
        DefaultValue.execCommandWithMode(cmd, "delete the temporary directory",
                                         self.sshTool, self.localMode,
                                         self.mpprcFile)
        self.logger.log("Successfully deleted the temporary directory.")
Exemple #24
0
 def distributeEncryptFiles(self):
     """
     function: distribute encrypt files
     input: NA
     output: NA
     """
     # distribute encrypt files to remote host
     # get local hostname
     localHostName = DefaultValue.GetHostIpOrName()
     # get all node names
     hostList = self.context.clusterInfo.getClusterNodeNames()
     # remove the local hostname from hostList
     hostList.remove(localHostName)
     DefaultValue.distributeEncryptFiles(self.context.clusterInfo.appPath,
                                         hostList)
 def doRefreshConf(self):
     """
     function: do refresh conf
     input  : NA
     output : NA
     """
     if self.context.clusterInfo.isSingleNode():
         self.logger.log(
             "No need to generate dynamic configuration file for one node.")
         return
     self.logger.log("Generating dynamic configuration file for all nodes.")
     hostName = DefaultValue.GetHostIpOrName()
     sshtool = SshTool(self.context.clusterInfo.getClusterNodeNames())
     self.context.clusterInfo.createDynamicConfig(self.context.user,
                                                  hostName, sshtool)
     self.logger.log("Successfully generated dynamic configuration file.")
 def del_remote_pkgpath(self):
     """
     delete remote package path om scripts, lib and version.cfg
     :return:
     """
     if not self.context.is_new_root_path:
         current_path = self.get_package_path()
         script = os.path.join(current_path, "script")
         hostList = self.context.clusterInfo.getClusterNodeNames()
         hostList.remove(DefaultValue.GetHostIpOrName())
         if not self.context.localMode and hostList:
             cmd = "rm -f %s/gs_*" % script
             self.context.sshTool.executeCommand(cmd, "",
                                                 DefaultValue.SUCCESS,
                                                 hostList,
                                                 self.context.mpprcFile)
Exemple #27
0
    def __init__(self,
                 logFile,
                 user,
                 clusterConf,
                 dwsMode=False,
                 mpprcFile="",
                 installPath="",
                 alarmComponent="",
                 upgrade=False):
        """
        function: Constructor
        input : logFile, user, clusterConf, dwsMode, mpprcFile, installPath
                alarmComponent, upgrade
        output: NA
        """
        LocalBaseOM.__init__(self, logFile, user, clusterConf, dwsMode)

        if self.clusterConfig == "":
            # Read config from static config file
            self.readConfigInfo()
        else:
            self.clusterInfo = dbClusterInfo()
            self.clusterInfo.initFromXml(self.clusterConfig,
                                         g_opts.static_config_file)
            hostName = DefaultValue.GetHostIpOrName()
            self.dbNodeInfo = self.clusterInfo.getDbNodeByName(hostName)
            if self.dbNodeInfo is None:
                self.logger.logExit(ErrorCode.GAUSS_516["GAUSS_51619"] %
                                    hostName)
        # get user info
        self.getUserInfo()
        if user != "" and self.user != user.strip():
            self.logger.debug("User parameter : %s." % user)
            self.logger.logExit(ErrorCode.GAUSS_503["GAUSS_50315"] %
                                (self.user, self.clusterInfo.appPath))
        # init every component
        self.initComponent()

        self.mpprcFile = mpprcFile
        self.installPath = installPath
        self.alarmComponent = alarmComponent
        self.upgrade = upgrade
        # This script will be not validating the parameters.
        # Because this should be detected by which instance call
        #  this local script.
        self.productVersion = None
        self.time_out = None
 def checkLogicCluster(self):
     clusterInfo = dbClusterInfo()
     staticConfigDir = os.path.join(self.cluster.appPath, "bin")
     cmd = "find %s -name *.cluster_static_config" % staticConfigDir
     output = SharedFuncs.runShellCmd(cmd)
     if output:
         for staticConfigFile in output.splitlines():
             clusterInfo.initFromStaticConfig(self.user, staticConfigFile,
                                              True)
             lcName = os.path.splitext(
                 os.path.basename(staticConfigFile))[0]
             for dbnode in clusterInfo.dbNodes:
                 if (dbnode.name == DefaultValue.GetHostIpOrName()):
                     return [lcName, dbnode]
         return ["", None]
     else:
         return ["", None]
    def doRemoteBackup(self):
        """
        function: Get user and group
        input : NA
        output: NA
        """
        self.context.logger.log("Performing remote backup.")
        localHostName = DefaultValue.GetHostIpOrName()
        tmp_backupDir = "%s/backupTemp_%d" % (
            DefaultValue.getTmpDirFromEnv(), os.getpid())
        cmd = "%s -U %s --nodeName %s -P %s -B %s  -l %s --ingore_miss" % \
              (OMCommand.getLocalScript("Local_Backup"),
               self.context.user,
               localHostName,
               tmp_backupDir,
               self.context.backupDir,
               self.context.localLog)

        if self.context.isParameter:
            cmd += " -p"
        if self.context.isBinary:
            cmd += " -b"
        self.context.logger.debug("Remote backup command is %s." % cmd)

        try:
            if (not os.path.exists(tmp_backupDir)):
                os.makedirs(tmp_backupDir,
                            DefaultValue.KEY_DIRECTORY_PERMISSION)

            (status, output) = self.context.sshTool.getSshStatusOutput(cmd)
            for node in status.keys():
                if (status[node] != DefaultValue.SUCCESS):
                    raise Exception(output)

            if self.context.isParameter:
                self.__distributeBackupFile(tmp_backupDir, "parameter")
            if self.context.isBinary:
                self.__distributeBackupFile(tmp_backupDir, "binary")

            DefaultValue.cleanFileDir(tmp_backupDir, self.context.sshTool)

            self.context.logger.log("Remote backup succeeded.")
            self.context.logger.log("Successfully backed up cluster files.")
        except Exception as e:
            DefaultValue.cleanFileDir(tmp_backupDir, self.context.sshTool)
            raise Exception(str(e))
 def doStatus(self):
     """
     function:Get the status of cluster or node
     input:NA
     output:NA
     """
     hostName = DefaultValue.GetHostIpOrName()
     sshtool = SshTool(self.context.clusterInfo.getClusterNodeNames())
     nodeId = 0
     if (self.context.g_opts.nodeName != ""):
         for dbnode in self.context.clusterInfo.dbNodes:
             if dbnode.name == self.context.g_opts.nodeName:
                 nodeId = dbnode.id
         if (nodeId == 0):
             raise Exception(ErrorCode.GAUSS_516["GAUSS_51619"] %
                             self.context.g_opts.nodeName)
     cmd = queryCmd()
     if (self.context.g_opts.outFile != ""):
         cmd.outputFile = self.context.g_opts.outFile
     else:
         cmd.outputFile = self.logger.logFile
     if (self.context.g_opts.show_detail):
         if (self.context.clusterInfo.clusterType ==
                 DefaultValue.CLUSTER_TYPE_SINGLE_PRIMARY_MULTI_STANDBY):
             cmd.dataPathQuery = True
             cmd.azNameQuery = True
         else:
             cmd.dataPathQuery = True
     else:
         if (nodeId > 0):
             self.context.clusterInfo.queryNodeInfo(sshtool, hostName,
                                                    nodeId, cmd.outputFile)
             return
         az_name = self.context.g_opts.azName
         if (az_name):
             self.context.clusterInfo.queryNodeInfo(sshtool, hostName,
                                                    nodeId, cmd.outputFile,
                                                    az_name)
             return
         if (self.context.g_opts.showAll):
             self.context.clusterInfo.queryNodeInfo(sshtool, hostName,
                                                    nodeId, cmd.outputFile)
             return
         cmd.clusterStateQuery = True
     self.context.clusterInfo.queryClsInfo(hostName, sshtool,
                                           self.context.mpprcFile, cmd)