Exemple #1
0
    def __init__(self, Environment, randseed=None):
        self.Env = EnvFactory().getInstance()
        self.templates = PatternSelector(self.Env["Name"])
        self.__InitialConditions()
        self.logger = LogFactory()
        self.TestLoggingLevel = 0
        self.data = {}
        self.name = self.Env["Name"]

        self.rsh = RemoteFactory().getInstance()
        self.ShouldBeStatus = {}
        self.ns = NodeStatus(self.Env)
        self.OurNode = os.uname()[1].lower()
        self.__instance_errorstoignore = []

        self.fastfail = 0
        self.cib_installed = 0
        self.config = None
        self.cluster_monitor = 0
        self.use_short_names = 1

        if self.Env["DoBSC"]:
            del self.templates["Pat:They_stopped"]

        self._finalConditions()

        self.check_transitions = 0
        self.check_elections = 0
        self.CIBsync = {}
        self.CibFactory = ConfigFactory(self)
        self.cib = self.CibFactory.createConfig(self.Env["Schema"])
Exemple #2
0
class crm_ais(crm_lha):
    '''
    The crm version 3 cluster manager class.
    It implements the things we need to talk to and manipulate
    crm clusters running on top of openais
    '''
    def __init__(self, Environment, randseed=None, name=None):
        if not name: name="crm-ais"
        crm_lha.__init__(self, Environment, randseed=randseed, name=name)

        self.fullcomplist = {}
        self.templates = PatternSelector(self.name)

    def NodeUUID(self, node):
        return node

    def ais_components(self, extra={}):

        complist = []
        if not len(self.fullcomplist.keys()):
            for c in ["cib", "lrmd", "crmd", "attrd" ]:
                self.fullcomplist[c] = Process(
                    self, c, 
                    pats = self.templates.get_component(self.name, c),
                    badnews_ignore = self.templates.get_component(self.name, "%s-ignore" % c),
                    common_ignore = self.templates.get_component(self.name, "common-ignore"))

            # pengine uses dc_pats instead of pats
            self.fullcomplist["pengine"] = Process(
                self, "pengine", 
                dc_pats = self.templates.get_component(self.name, "pengine"),
                badnews_ignore = self.templates.get_component(self.name, "pengine-ignore"),
                common_ignore = self.templates.get_component(self.name, "common-ignore"))

            # stonith-ng's process name is different from its component name
            self.fullcomplist["stonith-ng"] = Process(
                self, "stonith-ng", process="stonithd", 
                pats = self.templates.get_component(self.name, "stonith"),
                badnews_ignore = self.templates.get_component(self.name, "stonith-ignore"),
                common_ignore = self.templates.get_component(self.name, "common-ignore"))

            # add (or replace) any extra components passed in
            self.fullcomplist.update(extra)

        # Processes running under valgrind can't be shot with "killall -9 processname",
        # so don't include them in the returned list
        vgrind = self.Env["valgrind-procs"].split()
        for key in list(self.fullcomplist.keys()):
            if self.Env["valgrind-tests"]:
                if key in vgrind:
                    self.log("Filtering %s from the component list as it is being profiled by valgrind" % key)
                    continue
            if key == "stonith-ng" and not self.Env["DoFencing"]:
                continue
            complist.append(self.fullcomplist[key])

        return complist
Exemple #3
0
    def __init__(self, Environment, randseed=None, name=None):
        if not name: name = "crm-corosync"
        crm_common.__init__(self, Environment, randseed=randseed, name=name)

        self.fullcomplist = {}
        self.templates = PatternSelector(self.name)

        if self.Env["have_systemd"]:
            self.update({
                # When systemd is in use, we can look for this instead
                "Pat:We_stopped":
                "%s.*Corosync Cluster Engine exiting normally",
            })
Exemple #4
0
    def __init__(self, Environment, randseed=None):
        self.Env = EnvFactory().getInstance()
        self.templates = PatternSelector(self.Env["Name"])
        self.__InitialConditions()
        self.logger = LogFactory()
        self.TestLoggingLevel=0
        self.data = {}
        self.name = self.Env["Name"]

        self.rsh = RemoteFactory().getInstance()
        self.ShouldBeStatus={}
        self.ns = NodeStatus(self.Env)
        self.OurNode = os.uname()[1].lower()
        self.__instance_errorstoignore = []
Exemple #5
0
    def __init__(self, Environment, randseed=None, name=None):
        if not name:
            name = "crm-ais"
        crm_lha.__init__(self, Environment, randseed=randseed, name=name)

        self.fullcomplist = {}
        self.templates = PatternSelector(self.name)
class crm_corosync(crm_common):
    '''
    Corosync version 2 cluster manager class
    '''
    def __init__(self, Environment, randseed=None, name=None):
        if not name: name="crm-corosync"
        crm_common.__init__(self, Environment, randseed=randseed, name=name)

        self.fullcomplist = {}
        self.templates = PatternSelector(self.name)

    def Components(self):
        complist = []
        if not len(list(self.fullcomplist.keys())):
            for c in [ "pacemaker-based", "pacemaker-controld", "pacemaker-attrd", "pacemaker-execd", "pacemaker-fenced" ]:
                self.fullcomplist[c] = Process(
                    self, c, 
                    pats = self.templates.get_component(self.name, c),
                    badnews_ignore = self.templates.get_component(self.name, "%s-ignore" % c),
                    common_ignore = self.templates.get_component(self.name, "common-ignore"))

            # the scheduler uses dc_pats instead of pats
            self.fullcomplist["pacemaker-schedulerd"] = Process(
                self, "pacemaker-schedulerd", 
                dc_pats = self.templates.get_component(self.name, "pacemaker-schedulerd"),
                badnews_ignore = self.templates.get_component(self.name, "pacemaker-schedulerd-ignore"),
                common_ignore = self.templates.get_component(self.name, "common-ignore"))

            # add (or replace) extra components
            self.fullcomplist["corosync"] = Process(
                self, "corosync", 
                pats = self.templates.get_component(self.name, "corosync"),
                badnews_ignore = self.templates.get_component(self.name, "corosync-ignore"),
                common_ignore = self.templates.get_component(self.name, "common-ignore")
            )

        # Processes running under valgrind can't be shot with "killall -9 processname",
        # so don't include them in the returned list
        vgrind = self.Env["valgrind-procs"].split()
        for key in list(self.fullcomplist.keys()):
            if self.Env["valgrind-tests"]:
                if key in vgrind:
                    self.log("Filtering %s from the component list as it is being profiled by valgrind" % key)
                    continue
            if key == "pacemaker-fenced" and not self.Env["DoFencing"]:
                continue
            complist.append(self.fullcomplist[key])

        return complist
Exemple #7
0
class crm_corosync(ClusterManager):
    '''
    Corosync version 2 cluster manager class
    '''
    def __init__(self, Environment, randseed=None, name=None):
        if not name: name="crm-corosync"
        ClusterManager.__init__(self, Environment, randseed=randseed)

        self.fullcomplist = {}
        self.templates = PatternSelector(self.name)

    def Components(self):
        complist = []
        if not len(list(self.fullcomplist.keys())):
            for c in [ "pacemaker-based", "pacemaker-controld", "pacemaker-attrd", "pacemaker-execd", "pacemaker-fenced" ]:
                self.fullcomplist[c] = Process(
                    self, c, 
                    pats = self.templates.get_component(self.name, c),
                    badnews_ignore = self.templates.get_component(self.name, "%s-ignore" % c),
                    common_ignore = self.templates.get_component(self.name, "common-ignore"))

            # the scheduler uses dc_pats instead of pats
            self.fullcomplist["pacemaker-schedulerd"] = Process(
                self, "pacemaker-schedulerd", 
                dc_pats = self.templates.get_component(self.name, "pacemaker-schedulerd"),
                badnews_ignore = self.templates.get_component(self.name, "pacemaker-schedulerd-ignore"),
                common_ignore = self.templates.get_component(self.name, "common-ignore"))

            # add (or replace) extra components
            self.fullcomplist["corosync"] = Process(
                self, "corosync", 
                pats = self.templates.get_component(self.name, "corosync"),
                badnews_ignore = self.templates.get_component(self.name, "corosync-ignore"),
                common_ignore = self.templates.get_component(self.name, "common-ignore")
            )

        # Processes running under valgrind can't be shot with "killall -9 processname",
        # so don't include them in the returned list
        vgrind = self.Env["valgrind-procs"].split()
        for key in list(self.fullcomplist.keys()):
            if self.Env["valgrind-tests"]:
                if key in vgrind:
                    self.log("Filtering %s from the component list as it is being profiled by valgrind" % key)
                    continue
            if key == "pacemaker-fenced" and not self.Env["DoFencing"]:
                continue
            complist.append(self.fullcomplist[key])

        return complist
Exemple #8
0
    def __init__(self, Environment, randseed=None, name=None):
        if not name: name="crm-corosync"
        crm_common.__init__(self, Environment, randseed=randseed, name=name)

        self.fullcomplist = {}
        self.templates = PatternSelector(self.name)

        if self.Env["have_systemd"]:
            self.update({
                # When systemd is in use, we can look for this instead
                "Pat:We_stopped"   : "%s.*Corosync Cluster Engine exiting normally",
            })
Exemple #9
0
class ClusterManager(UserDict):
    '''The Cluster Manager class.
    This is an subclass of the Python dictionary class.
    (this is because it contains lots of {name,value} pairs,
    not because it's behavior is that terribly similar to a
    dictionary in other ways.)

    This is an abstract class which class implements high-level
    operations on the cluster and/or its cluster managers.
    Actual cluster managers classes are subclassed from this type.

    One of the things we do is track the state we think every node should
    be in.
    '''
    def __InitialConditions(self):
        #if os.geteuid() != 0:
        #  raise ValueError("Must Be Root!")
        None

    def _finalConditions(self):
        for key in list(self.keys()):
            if self[key] == None:
                raise ValueError("Improper derivation: self[" + key +
                                 "] must be overridden by subclass.")

    def __init__(self, Environment, randseed=None):
        self.Env = EnvFactory().getInstance()
        self.templates = PatternSelector(self.Env["Name"])
        self.__InitialConditions()
        self.logger = LogFactory()
        self.TestLoggingLevel = 0
        self.data = {}
        self.name = self.Env["Name"]

        self.rsh = RemoteFactory().getInstance()
        self.ShouldBeStatus = {}
        self.ns = NodeStatus(self.Env)
        self.OurNode = os.uname()[1].lower()
        self.__instance_errorstoignore = []

        self.fastfail = 0
        self.cib_installed = 0
        self.config = None
        self.cluster_monitor = 0
        self.use_short_names = 1

        if self.Env["DoBSC"]:
            del self.templates["Pat:They_stopped"]

        self._finalConditions()

        self.check_transitions = 0
        self.check_elections = 0
        self.CIBsync = {}
        self.CibFactory = ConfigFactory(self)
        self.cib = self.CibFactory.createConfig(self.Env["Schema"])

    def __getitem__(self, key):
        if key == "Name":
            return self.name

        print("FIXME: Getting %s from %s" % (key, repr(self)))
        if key in self.data:
            return self.data[key]

        return self.templates.get_patterns(self.Env["Name"], key)

    def __setitem__(self, key, value):
        print("FIXME: Setting %s=%s on %s" % (key, value, repr(self)))
        self.data[key] = value

    def key_for_node(self, node):
        return node

    def instance_errorstoignore_clear(self):
        '''Allows the test scenario to reset instance errors to ignore on each iteration.'''
        self.__instance_errorstoignore = []

    def instance_errorstoignore(self):
        '''Return list of errors which are 'normal' for a specific test instance'''
        return self.__instance_errorstoignore

    def log(self, args):
        self.logger.log(args)

    def debug(self, args):
        self.logger.debug(args)

    def upcount(self):
        '''How many nodes are up?'''
        count = 0
        for node in self.Env["nodes"]:
            if self.ShouldBeStatus[node] == "up":
                count = count + 1
        return count

    def install_support(self, command="install"):
        for node in self.Env["nodes"]:
            self.rsh(node, CTSvars.CRM_DAEMON_DIR + "/cts-support " + command)

    def prepare_fencing_watcher(self, name):
        # If we don't have quorum now but get it as a result of starting this node,
        # then a bunch of nodes might get fenced
        upnode = None
        if self.HasQuorum(None):
            self.debug("Have quorum")
            return None

        if not self.templates["Pat:Fencing_start"]:
            print("No start pattern")
            return None

        if not self.templates["Pat:Fencing_ok"]:
            print("No ok pattern")
            return None

        stonith = None
        stonithPats = []
        for peer in self.Env["nodes"]:
            if self.ShouldBeStatus[peer] != "up":
                stonithPats.append(self.templates["Pat:Fencing_ok"] % peer)
                stonithPats.append(self.templates["Pat:Fencing_start"] % peer)

        stonith = LogWatcher(self.Env["LogFileName"],
                             stonithPats,
                             "StartupFencing",
                             0,
                             hosts=self.Env["nodes"],
                             kind=self.Env["LogWatcher"])
        stonith.setwatch()
        return stonith

    def fencing_cleanup(self, node, stonith):
        peer_list = []
        peer_state = {}

        self.debug(
            "Looking for nodes that were fenced as a result of %s starting" %
            node)

        # If we just started a node, we may now have quorum (and permission to fence)
        if not stonith:
            self.debug("Nothing to do")
            return peer_list

        q = self.HasQuorum(None)
        if not q and len(self.Env["nodes"]) > 2:
            # We didn't gain quorum - we shouldn't have shot anyone
            self.debug("Quorum: %d Len: %d" % (q, len(self.Env["nodes"])))
            return peer_list

        for n in self.Env["nodes"]:
            peer_state[n] = "unknown"

        # Now see if any states need to be updated
        self.debug("looking for: " + repr(stonith.regexes))
        shot = stonith.look(0)
        while shot:
            line = repr(shot)
            self.debug("Found: " + line)
            del stonith.regexes[stonith.whichmatch]

            # Extract node name
            for n in self.Env["nodes"]:
                if re.search(self.templates["Pat:Fencing_ok"] % n, shot):
                    peer = n
                    peer_state[peer] = "complete"
                    self.__instance_errorstoignore.append(
                        self.templates["Pat:Fencing_ok"] % peer)

                elif peer_state[n] != "complete" and re.search(
                        self.templates["Pat:Fencing_start"] % n, shot):
                    # TODO: Correctly detect multiple fencing operations for the same host
                    peer = n
                    peer_state[peer] = "in-progress"
                    self.__instance_errorstoignore.append(
                        self.templates["Pat:Fencing_start"] % peer)

            if not peer:
                self.logger.log("ERROR: Unknown stonith match: %s" % line)

            elif not peer in peer_list:
                self.debug("Found peer: " + peer)
                peer_list.append(peer)

            # Get the next one
            shot = stonith.look(60)

        for peer in peer_list:

            self.debug("   Peer %s was fenced as a result of %s starting: %s" %
                       (peer, node, peer_state[peer]))
            if self.Env["at-boot"]:
                self.ShouldBeStatus[peer] = "up"
            else:
                self.ShouldBeStatus[peer] = "down"

            if peer_state[peer] == "in-progress":
                # Wait for any in-progress operations to complete
                shot = stonith.look(60)
                while len(stonith.regexes) and shot:
                    line = repr(shot)
                    self.debug("Found: " + line)
                    del stonith.regexes[stonith.whichmatch]
                    shot = stonith.look(60)

            # Now make sure the node is alive too
            self.ns.WaitForNodeToComeUp(peer, self.Env["DeadTime"])

            # Poll until it comes up
            if self.Env["at-boot"]:
                if not self.StataCM(peer):
                    time.sleep(self.Env["StartTime"])

                if not self.StataCM(peer):
                    self.logger.log(
                        "ERROR: Peer %s failed to restart after being fenced" %
                        peer)
                    return None

        return peer_list

    def StartaCM(self, node, verbose=False):
        '''Start up the cluster manager on a given node'''
        if verbose:
            self.logger.log("Starting %s on node %s" %
                            (self.templates["Name"], node))
        else:
            self.debug("Starting %s on node %s" %
                       (self.templates["Name"], node))
        ret = 1

        if not node in self.ShouldBeStatus:
            self.ShouldBeStatus[node] = "down"

        if self.ShouldBeStatus[node] != "down":
            return 1

        patterns = []
        # Technically we should always be able to notice ourselves starting
        patterns.append(self.templates["Pat:Local_started"] % node)
        if self.upcount() == 0:
            patterns.append(self.templates["Pat:DC_started"] % node)
        else:
            patterns.append(self.templates["Pat:NonDC_started"] % node)

        watch = LogWatcher(self.Env["LogFileName"],
                           patterns,
                           "StartaCM",
                           self.Env["StartTime"] + 10,
                           hosts=self.Env["nodes"],
                           kind=self.Env["LogWatcher"])

        self.install_config(node)

        self.ShouldBeStatus[node] = "any"
        if self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
            self.logger.log("%s was already started" % (node))
            return 1

        stonith = self.prepare_fencing_watcher(node)
        watch.setwatch()

        if self.rsh(node, self.templates["StartCmd"]) != 0:
            self.logger.log("Warn: Start command failed on node %s" % (node))
            self.fencing_cleanup(node, stonith)
            return None

        self.ShouldBeStatus[node] = "up"
        watch_result = watch.lookforall()

        if watch.unmatched:
            for regex in watch.unmatched:
                self.logger.log("Warn: Startup pattern not found: %s" %
                                (regex))

        if watch_result and self.cluster_stable(self.Env["DeadTime"]):
            #self.debug("Found match: "+ repr(watch_result))
            self.fencing_cleanup(node, stonith)
            return 1

        elif self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
            self.fencing_cleanup(node, stonith)
            return 1

        self.logger.log("Warn: Start failed for node %s" % (node))
        return None

    def StartaCMnoBlock(self, node, verbose=False):
        '''Start up the cluster manager on a given node with none-block mode'''

        if verbose:
            self.logger.log("Starting %s on node %s" % (self["Name"], node))
        else:
            self.debug("Starting %s on node %s" % (self["Name"], node))

        self.install_config(node)
        self.rsh(node, self.templates["StartCmd"], synchronous=0)
        self.ShouldBeStatus[node] = "up"
        return 1

    def StopaCM(self, node, verbose=False, force=False):
        '''Stop the cluster manager on a given node'''

        if verbose:
            self.logger.log("Stopping %s on node %s" % (self["Name"], node))
        else:
            self.debug("Stopping %s on node %s" % (self["Name"], node))

        if self.ShouldBeStatus[node] != "up" and force == False:
            return 1

        if self.rsh(node, self.templates["StopCmd"]) == 0:
            # Make sure we can continue even if corosync leaks
            # fdata-* is the old name
            #self.rsh(node, "rm -rf /dev/shm/qb-* /dev/shm/fdata-*")
            self.ShouldBeStatus[node] = "down"
            self.cluster_stable(self.Env["DeadTime"])
            return 1
        else:
            self.logger.log("ERROR: Could not stop %s on node %s" %
                            (self["Name"], node))

        return None

    def StopaCMnoBlock(self, node):
        '''Stop the cluster manager on a given node with none-block mode'''

        self.debug("Stopping %s on node %s" % (self["Name"], node))

        self.rsh(node, self.templates["StopCmd"], synchronous=0)
        self.ShouldBeStatus[node] = "down"
        return 1

    def RereadCM(self, node):
        '''Force the cluster manager on a given node to reread its config
           This may be a no-op on certain cluster managers.
        '''
        rc = self.rsh(node, self.templates["RereadCmd"])
        if rc == 0:
            return 1
        else:
            self.logger.log(
                "Could not force %s on node %s to reread its config" %
                (self["Name"], node))
        return None

    def startall(self, nodelist=None, verbose=False, quick=False):
        '''Start the cluster manager on every node in the cluster.
        We can do it on a subset of the cluster if nodelist is not None.
        '''
        map = {}
        if not nodelist:
            nodelist = self.Env["nodes"]

        for node in nodelist:
            if self.ShouldBeStatus[node] == "down":
                self.ns.WaitForAllNodesToComeUp(nodelist, 300)

        if not quick:
            # This is used for "basic sanity checks", so only start one node ...
            if not self.StartaCM(node, verbose=verbose):
                return 0
            return 1

        # Approximation of SimulStartList for --boot
        watchpats = []
        watchpats.append(self.templates["Pat:DC_IDLE"])
        for node in nodelist:
            watchpats.append(self.templates["Pat:InfraUp"] % node)
            watchpats.append(self.templates["Pat:PacemakerUp"] % node)
            watchpats.append(self.templates["Pat:Local_started"] % node)
            watchpats.append(self.templates["Pat:They_up"] %
                             (nodelist[0], node))

        #   Start all the nodes - at about the same time...
        watch = LogWatcher(self.Env["LogFileName"],
                           watchpats,
                           "fast-start",
                           self.Env["DeadTime"] + 10,
                           hosts=self.Env["nodes"],
                           kind=self.Env["LogWatcher"])
        watch.setwatch()

        if not self.StartaCM(nodelist[0], verbose=verbose):
            return 0
        for node in nodelist:
            self.StartaCMnoBlock(node, verbose=verbose)

        watch.lookforall()
        if watch.unmatched:
            for regex in watch.unmatched:
                self.logger.log("Warn: Startup pattern not found: %s" %
                                (regex))

        if not self.cluster_stable():
            self.logger.log("Cluster did not stabilize")
            return 0

        return 1

    def stopall(self, nodelist=None, verbose=False, force=False):
        '''Stop the cluster managers on every node in the cluster.
        We can do it on a subset of the cluster if nodelist is not None.
        '''

        ret = 1
        map = {}
        if not nodelist:
            nodelist = self.Env["nodes"]
        for node in self.Env["nodes"]:
            if self.ShouldBeStatus[node] == "up" or force == True:
                if not self.StopaCM(node, verbose=verbose, force=force):
                    ret = 0
        return ret

    def rereadall(self, nodelist=None):
        '''Force the cluster managers on every node in the cluster
        to reread their config files.  We can do it on a subset of the
        cluster if nodelist is not None.
        '''

        map = {}
        if not nodelist:
            nodelist = self.Env["nodes"]
        for node in self.Env["nodes"]:
            if self.ShouldBeStatus[node] == "up":
                self.RereadCM(node)

    def statall(self, nodelist=None):
        '''Return the status of the cluster managers in the cluster.
        We can do it on a subset of the cluster if nodelist is not None.
        '''

        result = {}
        if not nodelist:
            nodelist = self.Env["nodes"]
        for node in nodelist:
            if self.StataCM(node):
                result[node] = "up"
            else:
                result[node] = "down"
        return result

    def isolate_node(self, target, nodes=None):
        '''isolate the communication between the nodes'''
        if not nodes:
            nodes = self.Env["nodes"]

        for node in nodes:
            if node != target:
                rc = self.rsh(
                    target,
                    self.templates["BreakCommCmd"] % self.key_for_node(node))
                if rc != 0:
                    self.logger.log(
                        "Could not break the communication between %s and %s: %d"
                        % (target, node, rc))
                    return None
                else:
                    self.debug("Communication cut between %s and %s" %
                               (target, node))
        return 1

    def unisolate_node(self, target, nodes=None):
        '''fix the communication between the nodes'''
        if not nodes:
            nodes = self.Env["nodes"]

        for node in nodes:
            if node != target:
                restored = 0

                # Limit the amount of time we have asynchronous connectivity for
                # Restore both sides as simultaneously as possible
                self.rsh(target,
                         self.templates["FixCommCmd"] %
                         self.key_for_node(node),
                         synchronous=0)
                self.rsh(node,
                         self.templates["FixCommCmd"] %
                         self.key_for_node(target),
                         synchronous=0)
                self.debug("Communication restored between %s and %s" %
                           (target, node))

    def reducecomm_node(self, node):
        '''reduce the communication between the nodes'''
        rc = self.rsh(
            node, self.templates["ReduceCommCmd"] %
            (self.Env["XmitLoss"], self.Env["RecvLoss"]))
        if rc == 0:
            return 1
        else:
            self.logger.log(
                "Could not reduce the communication between the nodes from node: %s"
                % node)
        return None

    def restorecomm_node(self, node):
        '''restore the saved communication between the nodes'''
        rc = 0
        if float(self.Env["XmitLoss"]) != 0 or float(
                self.Env["RecvLoss"]) != 0:
            rc = self.rsh(node, self.templates["RestoreCommCmd"])
        if rc == 0:
            return 1
        else:
            self.logger.log(
                "Could not restore the communication between the nodes from node: %s"
                % node)
        return None

    def oprofileStart(self, node=None):
        if not node:
            for n in self.Env["oprofile"]:
                self.oprofileStart(n)

        elif node in self.Env["oprofile"]:
            self.debug("Enabling oprofile on %s" % node)
            self.rsh(node, "opcontrol --init")
            self.rsh(
                node,
                "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all"
            )
            self.rsh(node, "opcontrol --start")
            self.rsh(node, "opcontrol --reset")

    def oprofileSave(self, test, node=None):
        if not node:
            for n in self.Env["oprofile"]:
                self.oprofileSave(test, n)

        elif node in self.Env["oprofile"]:
            self.rsh(node, "opcontrol --dump")
            self.rsh(node, "opcontrol --save=cts.%d" % test)
            # Read back with: opreport -l session:cts.0 image:<directory>/c*
            if None:
                self.rsh(node, "opcontrol --reset")
            else:
                self.oprofileStop(node)
                self.oprofileStart(node)

    def oprofileStop(self, node=None):
        if not node:
            for n in self.Env["oprofile"]:
                self.oprofileStop(n)

        elif node in self.Env["oprofile"]:
            self.debug("Stopping oprofile on %s" % node)
            self.rsh(node, "opcontrol --reset")
            self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null")

    def StatsExtract(self):
        if not self.Env["stats"]:
            return

        for host in self.Env["nodes"]:
            log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR
            if host in has_log_stats:
                self.rsh(
                    host,
                    '''bash %s %s stop''' % (log_stats_bin, log_stats_file))
                (rc, lines) = self.rsh(host,
                                       '''cat %s''' % log_stats_file,
                                       stdout=2)
                self.rsh(
                    host,
                    '''bash %s %s delete''' % (log_stats_bin, log_stats_file))

                fname = "cts-stats-%d-nodes-%s.csv" % (len(
                    self.Env["nodes"]), host)
                print("Extracted stats: %s" % fname)
                fd = open(fname, "a")
                fd.writelines(lines)
                fd.close()

    def StatsMark(self, testnum):
        '''Mark the test number in the stats log'''

        global has_log_stats
        if not self.Env["stats"]:
            return

        for host in self.Env["nodes"]:
            log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR
            if not host in has_log_stats:

                global log_stats
                global log_stats_bin
                script = log_stats
                #script = re.sub("\\\\", "\\\\", script)
                script = re.sub('\"', '\\\"', script)
                script = re.sub("'", "\'", script)
                script = re.sub("`", "\`", script)
                script = re.sub("\$", "\\\$", script)

                self.debug("Installing %s on %s" % (log_stats_bin, host))
                self.rsh(host,
                         '''echo "%s" > %s''' % (script, log_stats_bin),
                         silent=True)
                self.rsh(
                    host,
                    '''bash %s %s delete''' % (log_stats_bin, log_stats_file))
                has_log_stats[host] = 1

            # Now mark it
            self.rsh(host,
                     '''bash %s %s mark %s''' %
                     (log_stats_bin, log_stats_file, testnum),
                     synchronous=0)

    def errorstoignore(self):
        # At some point implement a more elegant solution that
        #   also produces a report at the end
        """ Return a list of known error messages that should be ignored """
        return PatternSelector().get_patterns(self.name, "BadNewsIgnore")

    def install_config(self, node):
        if not self.ns.WaitForNodeToComeUp(node):
            self.log("Node %s is not up." % node)
            return None

        if not node in self.CIBsync and self.Env["ClobberCIB"] == 1:
            self.CIBsync[node] = 1
            self.rsh(node, "rm -f " + CTSvars.CRM_CONFIG_DIR + "/cib*")

            # Only install the CIB on the first node, all the other ones will pick it up from there
            if self.cib_installed == 1:
                return None

            self.cib_installed = 1
            if self.Env["CIBfilename"] == None:
                self.log("Installing Generated CIB on node %s" % (node))
                self.cib.install(node)

            else:
                self.log("Installing CIB (%s) on node %s" %
                         (self.Env["CIBfilename"], node))
                if 0 != self.rsh.cp(
                        self.Env["CIBfilename"], "root@" +
                    (self.templates["CIBfile"] % node)):
                    raise ValueError("Can not scp file to %s %d" % (node))

            self.rsh(
                node, "chown " + CTSvars.CRM_DAEMON_USER + " " +
                CTSvars.CRM_CONFIG_DIR + "/cib.xml")

    def prepare(self):
        '''Finish the Initialization process. Prepare to test...'''

        self.partitions_expected = 1
        for node in self.Env["nodes"]:
            self.ShouldBeStatus[node] = ""
            if self.Env["experimental-tests"]:
                self.unisolate_node(node)
            self.StataCM(node)

    def test_node_CM(self, node):
        '''Report the status of the cluster manager on a given node'''

        watchpats = []
        watchpats.append("Current ping state: (S_IDLE|S_NOT_DC)")
        watchpats.append(self.templates["Pat:NonDC_started"] % node)
        watchpats.append(self.templates["Pat:DC_started"] % node)
        idle_watch = LogWatcher(self.Env["LogFileName"],
                                watchpats,
                                "ClusterIdle",
                                hosts=[node],
                                kind=self.Env["LogWatcher"])
        idle_watch.setwatch()

        out = self.rsh(node, self.templates["StatusCmd"] % node, 1)
        self.debug("Node %s status: '%s'" % (node, out))

        if not out or (out.find('ok') < 0):
            if self.ShouldBeStatus[node] == "up":
                self.log(
                    "Node status for %s is %s but we think it should be %s" %
                    (node, "down", self.ShouldBeStatus[node]))
            self.ShouldBeStatus[node] = "down"
            return 0

        if self.ShouldBeStatus[node] == "down":
            self.log(
                "Node status for %s is %s but we think it should be %s: %s" %
                (node, "up", self.ShouldBeStatus[node], out))

        self.ShouldBeStatus[node] = "up"

        # check the output first - because syslog-ng loses messages
        if out.find('S_NOT_DC') != -1:
            # Up and stable
            return 2
        if out.find('S_IDLE') != -1:
            # Up and stable
            return 2

        # fall back to syslog-ng and wait
        if not idle_watch.look():
            # just up
            self.debug("Warn: Node %s is unstable: %s" % (node, out))
            return 1

        # Up and stable
        return 2

    # Is the node up or is the node down
    def StataCM(self, node):
        '''Report the status of the cluster manager on a given node'''

        if self.test_node_CM(node) > 0:
            return 1
        return None

    # Being up and being stable is not the same question...
    def node_stable(self, node):
        '''Report the status of the cluster manager on a given node'''

        if self.test_node_CM(node) == 2:
            return 1
        self.log("Warn: Node %s not stable" % (node))
        return None

    def partition_stable(self, nodes, timeout=None):
        watchpats = []
        watchpats.append("Current ping state: S_IDLE")
        watchpats.append(self.templates["Pat:DC_IDLE"])
        self.debug("Waiting for cluster stability...")

        if timeout == None:
            timeout = self.Env["DeadTime"]

        if len(nodes) < 3:
            self.debug("Cluster is inactive")
            return 1

        idle_watch = LogWatcher(self.Env["LogFileName"],
                                watchpats,
                                "ClusterStable",
                                timeout,
                                hosts=nodes.split(),
                                kind=self.Env["LogWatcher"])
        idle_watch.setwatch()

        for node in nodes.split():
            # have each node dump its current state
            self.rsh(node, self.templates["StatusCmd"] % node, 1)

        ret = idle_watch.look()
        while ret:
            self.debug(ret)
            for node in nodes.split():
                if re.search(node, ret):
                    return 1
            ret = idle_watch.look()

        self.debug("Warn: Partition %s not IDLE after %ds" %
                   (repr(nodes), timeout))
        return None

    def cluster_stable(self, timeout=None, double_check=False):
        partitions = self.find_partitions()

        for partition in partitions:
            if not self.partition_stable(partition, timeout):
                return None

        if double_check:
            # Make sure we are really stable and that all resources,
            # including those that depend on transient node attributes,
            # are started if they were going to be
            time.sleep(5)
            for partition in partitions:
                if not self.partition_stable(partition, timeout):
                    return None

        return 1

    def is_node_dc(self, node, status_line=None):
        rc = 0

        if not status_line:
            status_line = self.rsh(node, self.templates["StatusCmd"] % node, 1)

        if not status_line:
            rc = 0
        elif status_line.find('S_IDLE') != -1:
            rc = 1
        elif status_line.find('S_INTEGRATION') != -1:
            rc = 1
        elif status_line.find('S_FINALIZE_JOIN') != -1:
            rc = 1
        elif status_line.find('S_POLICY_ENGINE') != -1:
            rc = 1
        elif status_line.find('S_TRANSITION_ENGINE') != -1:
            rc = 1

        return rc

    def active_resources(self, node):
        (rc, output) = self.rsh(node, """crm_resource -c""", None)
        resources = []
        for line in output:
            if re.search("^Resource", line):
                tmp = AuditResource(self, line)
                if tmp.type == "primitive" and tmp.host == node:
                    resources.append(tmp.id)
        return resources

    def ResourceLocation(self, rid):
        ResourceNodes = []
        for node in self.Env["nodes"]:
            if self.ShouldBeStatus[node] == "up":

                cmd = self.templates["RscRunning"] % (rid)
                (rc, lines) = self.rsh(node, cmd, None)

                if rc == 127:
                    self.log(
                        "Command '%s' failed. Binary or pacemaker-cts package not installed?"
                        % cmd)
                    for line in lines:
                        self.log("Output: " + line)
                elif rc == 0:
                    ResourceNodes.append(node)

        return ResourceNodes

    def find_partitions(self):
        ccm_partitions = []

        for node in self.Env["nodes"]:
            if self.ShouldBeStatus[node] == "up":
                partition = self.rsh(node, self.templates["PartitionCmd"], 1)

                if not partition:
                    self.log("no partition details for %s" % node)
                elif len(partition) > 2:
                    nodes = partition.split()
                    nodes.sort()
                    partition = ' '.join(nodes)

                    found = 0
                    for a_partition in ccm_partitions:
                        if partition == a_partition:
                            found = 1
                    if found == 0:
                        self.debug("Adding partition from %s: %s" %
                                   (node, partition))
                        ccm_partitions.append(partition)
                    else:
                        self.debug(
                            "Partition '%s' from %s is consistent with existing entries"
                            % (partition, node))

                else:
                    self.log("bad partition details for %s" % node)
            else:
                self.debug("Node %s is down... skipping" % node)

        self.debug("Found partitions: %s" % repr(ccm_partitions))
        return ccm_partitions

    def HasQuorum(self, node_list):
        # If we are auditing a partition, then one side will
        #   have quorum and the other not.
        # So the caller needs to tell us which we are checking
        # If no value for node_list is specified... assume all nodes
        if not node_list:
            node_list = self.Env["nodes"]

        for node in node_list:
            if self.ShouldBeStatus[node] == "up":
                quorum = self.rsh(node, self.templates["QuorumCmd"], 1)
                if quorum.find("1") != -1:
                    return 1
                elif quorum.find("0") != -1:
                    return 0
                else:
                    self.debug("WARN: Unexpected quorum test result from " +
                               node + ":" + quorum)

        return 0

    def Components(self):
        complist = []
        common_ignore = [
            "Pending action:",
            "(ERROR|error): crm_log_message_adv:",
            "(ERROR|error): MSG: No message to dump",
            "pending LRM operations at shutdown",
            "Lost connection to the CIB manager",
            "Connection to the CIB terminated...",
            "Sending message to the CIB manager FAILED",
            "Action A_RECOVER .* not supported",
            "(ERROR|error): stonithd_op_result_ready: not signed on",
            "pingd.*(ERROR|error): send_update: Could not send update",
            "send_ipc_message: IPC Channel to .* is not connected",
            "unconfirmed_actions: Waiting on .* unconfirmed actions",
            "cib_native_msgready: Message pending on command channel",
            r": Performing A_EXIT_1 - forcefully exiting ",
            r"Resource .* was active at shutdown.  You may ignore this error if it is unmanaged.",
        ]

        stonith_ignore = [
            r"Updating failcount for child_DoFencing",
            r"error.*: Fencer connection failed \(will retry\)",
            "pacemaker-execd.*(ERROR|error): stonithd_receive_ops_result failed.",
        ]

        stonith_ignore.extend(common_ignore)

        ccm = Process(
            self,
            "ccm",
            triggersreboot=self.fastfail,
            pats=[
                "State transition .* S_RECOVERY",
                "pacemaker-controld.*Action A_RECOVER .* not supported",
                r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
                r"pacemaker-controld.*: Could not recover from internal error",
                "pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
                # these status numbers are likely wrong now
                r"pacemaker-controld.*exited with status 2",
                r"attrd.*exited with status 1",
                r"cib.*exited with status 2",

                # Not if it was fenced
                #                    "A new node joined the cluster",

                #                    "WARN: determine_online_status: Node .* is unclean",
                #                    "Scheduling Node .* for STONITH",
                #                    "Executing .* fencing operation",
                #                    "tengine_stonith_callback: .*result=0",
                #                    "Processing I_NODE_JOIN:.* cause=C_HA_MESSAGE",
                #                    "State transition S_.* -> S_INTEGRATION.*input=I_NODE_JOIN",
                "State transition S_STARTING -> S_PENDING",
            ],
            badnews_ignore=common_ignore)

        based = Process(
            self,
            "pacemaker-based",
            triggersreboot=self.fastfail,
            pats=[
                "State transition .* S_RECOVERY",
                "Lost connection to the CIB manager",
                "Connection to the CIB manager terminated",
                r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
                "pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
                r"pacemaker-controld.*: Could not recover from internal error",
                # these status numbers are likely wrong now
                r"pacemaker-controld.*exited with status 2",
                r"attrd.*exited with status 1",
            ],
            badnews_ignore=common_ignore)

        execd = Process(
            self,
            "pacemaker-execd",
            triggersreboot=self.fastfail,
            pats=[
                "State transition .* S_RECOVERY",
                "LRM Connection failed",
                "pacemaker-controld.*I_ERROR.*lrm_connection_destroy",
                "State transition S_STARTING -> S_PENDING",
                r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
                r"pacemaker-controld.*: Could not recover from internal error",
                # this status number is likely wrong now
                r"pacemaker-controld.*exited with status 2",
            ],
            badnews_ignore=common_ignore)

        controld = Process(
            self,
            "pacemaker-controld",
            triggersreboot=self.fastfail,
            pats=[
                #                    "WARN: determine_online_status: Node .* is unclean",
                #                    "Scheduling Node .* for STONITH",
                #                    "Executing .* fencing operation",
                #                    "tengine_stonith_callback: .*result=0",
                "State transition .* S_IDLE",
                "State transition S_STARTING -> S_PENDING",
            ],
            badnews_ignore=common_ignore)

        schedulerd = Process(
            self,
            "pacemaker-schedulerd",
            triggersreboot=self.fastfail,
            pats=[
                "State transition .* S_RECOVERY",
                r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
                r"pacemaker-controld.*: Could not recover from internal error",
                r"pacemaker-controld.*CRIT.*: Connection to the scheduler failed",
                "pacemaker-controld.*I_ERROR.*save_cib_contents",
                # this status number is likely wrong now
                r"pacemaker-controld.*exited with status 2",
            ],
            badnews_ignore=common_ignore,
            dc_only=1)

        if self.Env["DoFencing"] == 1:
            complist.append(
                Process(
                    self,
                    "stoniths",
                    triggersreboot=self.fastfail,
                    dc_pats=[
                        r"pacemaker-controld.*CRIT.*: Fencing daemon connection failed",
                        "Attempting connection to fencing daemon",
                    ],
                    badnews_ignore=stonith_ignore))

        if self.fastfail == 0:
            ccm.pats.extend([
                # these status numbers are likely wrong now
                r"attrd.*exited with status 1",
                r"pacemaker-(based|controld).*exited with status 2",
            ])
            based.pats.extend([
                # these status numbers are likely wrong now
                r"attrd.*exited with status 1",
                r"pacemaker-controld.*exited with status 2",
            ])
            execd.pats.extend([
                # these status numbers are likely wrong now
                r"pacemaker-controld.*exited with status 2",
            ])

        complist.append(ccm)
        complist.append(based)
        complist.append(execd)
        complist.append(controld)
        complist.append(schedulerd)

        return complist

    def StandbyStatus(self, node):
        out = self.rsh(node, self.templates["StandbyQueryCmd"] % node, 1)
        if not out:
            return "off"
        out = out[:-1]
        self.debug("Standby result: " + out)
        return out

    # status == "on" : Enter Standby mode
    # status == "off": Enter Active mode
    def SetStandbyMode(self, node, status):
        current_status = self.StandbyStatus(node)
        cmd = self.templates["StandbyCmd"] % (node, status)
        ret = self.rsh(node, cmd)
        return True

    def AddDummyRsc(self, node, rid):
        rsc_xml = """ '<resources>
                <primitive class=\"ocf\" id=\"%s\" provider=\"pacemaker\" type=\"Dummy\">
                    <operations>
                        <op id=\"%s-interval-10s\" interval=\"10s\" name=\"monitor\"/
                    </operations>
                </primitive>
            </resources>'""" % (rid, rid)
        constraint_xml = """ '<constraints>
                <rsc_location id=\"location-%s-%s\" node=\"%s\" rsc=\"%s\" score=\"INFINITY\"/>
            </constraints>'
            """ % (rid, node, node, rid)

        self.rsh(node, self.templates['CibAddXml'] % (rsc_xml))
        self.rsh(node, self.templates['CibAddXml'] % (constraint_xml))

    def RemoveDummyRsc(self, node, rid):
        constraint = "\"//rsc_location[@rsc='%s']\"" % (rid)
        rsc = "\"//primitive[@id='%s']\"" % (rid)

        self.rsh(node, self.templates['CibDelXpath'] % constraint)
        self.rsh(node, self.templates['CibDelXpath'] % rsc)
Exemple #10
0
class crm_corosync(crm_common):
    '''
    Corosync version 2 cluster manager class
    '''
    def __init__(self, Environment, randseed=None, name=None):
        if not name: name="crm-corosync"
        crm_common.__init__(self, Environment, randseed=randseed, name=name)

        self.fullcomplist = {}
        self.templates = PatternSelector(self.name)

        if self.Env["have_systemd"]:
            self.update({
                # When systemd is in use, we can look for this instead
                "Pat:We_stopped"   : "%s.*Corosync Cluster Engine exiting normally",
            })

    def Components(self):
        complist = []
        if not len(list(self.fullcomplist.keys())):
            for c in ["cib", "lrmd", "crmd", "attrd" ]:
                self.fullcomplist[c] = Process(
                    self, c, 
                    pats = self.templates.get_component(self.name, c),
                    badnews_ignore = self.templates.get_component(self.name, "%s-ignore" % c),
                    common_ignore = self.templates.get_component(self.name, "common-ignore"))

            # pengine uses dc_pats instead of pats
            self.fullcomplist["pengine"] = Process(
                self, "pengine", 
                dc_pats = self.templates.get_component(self.name, "pengine"),
                badnews_ignore = self.templates.get_component(self.name, "pengine-ignore"),
                common_ignore = self.templates.get_component(self.name, "common-ignore"))

            # stonith-ng's process name is different from its component name
            self.fullcomplist["stonith-ng"] = Process(
                self, "stonith-ng", process="stonithd", 
                pats = self.templates.get_component(self.name, "stonith"),
                badnews_ignore = self.templates.get_component(self.name, "stonith-ignore"),
                common_ignore = self.templates.get_component(self.name, "common-ignore"))

            # add (or replace) extra components
            self.fullcomplist["corosync"] = Process(
                self, "corosync", 
                pats = self.templates.get_component(self.name, "corosync"),
                badnews_ignore = self.templates.get_component(self.name, "corosync-ignore"),
                common_ignore = self.templates.get_component(self.name, "common-ignore")
            )

        # Processes running under valgrind can't be shot with "killall -9 processname",
        # so don't include them in the returned list
        vgrind = self.Env["valgrind-procs"].split()
        for key in list(self.fullcomplist.keys()):
            if self.Env["valgrind-tests"]:
                if key in vgrind:
                    self.log("Filtering %s from the component list as it is being profiled by valgrind" % key)
                    continue
            if key == "stonith-ng" and not self.Env["DoFencing"]:
                continue
            complist.append(self.fullcomplist[key])

        return complist
Exemple #11
0
class ClusterManager(UserDict):
    '''The Cluster Manager class.
    This is an subclass of the Python dictionary class.
    (this is because it contains lots of {name,value} pairs,
    not because it's behavior is that terribly similar to a
    dictionary in other ways.)

    This is an abstract class which class implements high-level
    operations on the cluster and/or its cluster managers.
    Actual cluster managers classes are subclassed from this type.

    One of the things we do is track the state we think every node should
    be in.
    '''

    def __InitialConditions(self):
        #if os.geteuid() != 0:
        #  raise ValueError("Must Be Root!")
        None

    def _finalConditions(self):
        for key in list(self.keys()):
            if self[key] == None:
                raise ValueError("Improper derivation: self[" + key +   "] must be overridden by subclass.")

    def __init__(self, Environment, randseed=None):
        self.Env = EnvFactory().getInstance()
        self.templates = PatternSelector(self.Env["Name"])
        self.__InitialConditions()
        self.logger = LogFactory()
        self.clear_cache = 0
        self.TestLoggingLevel=0
        self.data = {}
        self.name = self.Env["Name"]

        self.rsh = RemoteFactory().getInstance()
        self.ShouldBeStatus={}
        self.ns = NodeStatus(self.Env)
        self.OurNode = string.lower(os.uname()[1])
        self.__instance_errorstoignore = []

    def __getitem__(self, key):
        if key == "Name":
            return self.name

        print("FIXME: Getting %s from %s" % (key, repr(self)))
        if key in self.data:
            return self.data[key]

        return self.templates.get_patterns(self.Env["Name"], key)

    def __setitem__(self, key, value):
        print("FIXME: Setting %s=%s on %s" % (key, value, repr(self)))
        self.data[key] = value

    def key_for_node(self, node):
        return node

    def instance_errorstoignore_clear(self):
        '''Allows the test scenario to reset instance errors to ignore on each iteration.'''
        self.__instance_errorstoignore = []

    def instance_errorstoignore(self):
        '''Return list of errors which are 'normal' for a specific test instance'''
        return self.__instance_errorstoignore

    def errorstoignore(self):
        '''Return list of errors which are 'normal' and should be ignored'''
        return []

    def log(self, args):
        self.logger.log(args)

    def debug(self, args):
        self.logger.debug(args)

    def prepare(self):
        '''Finish the Initialization process. Prepare to test...'''

        print(repr(self)+"prepare")
        for node in self.Env["nodes"]:
            if self.StataCM(node):
                self.ShouldBeStatus[node] = "up"
            else:
                self.ShouldBeStatus[node] = "down"

            self.unisolate_node(node)

    def upcount(self):
        '''How many nodes are up?'''
        count = 0
        for node in self.Env["nodes"]:
          if self.ShouldBeStatus[node] == "up":
            count = count + 1
        return count

    def install_helper(self, filename, destdir=None, nodes=None, sourcedir=None):
        if sourcedir == None:
            sourcedir = CTSvars.CTS_home
        file_with_path = "%s/%s" % (sourcedir, filename)
        if not nodes:
            nodes = self.Env["nodes"]

        if not destdir:
            destdir = CTSvars.CTS_home

        self.debug("Installing %s to %s on %s" % (filename, destdir, repr(self.Env["nodes"])))
        for node in nodes:
            self.rsh(node, "mkdir -p %s" % destdir)
            self.rsh.cp(file_with_path, "root@%s:%s/%s" % (node, destdir, filename))
        return file_with_path

    def install_config(self, node):
        return None

    def clear_all_caches(self):
        if self.clear_cache:
            for node in self.Env["nodes"]:
                if self.ShouldBeStatus[node] == "down":
                    self.debug("Removing cache file on: "+node)
                    self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")
                else:
                    self.debug("NOT Removing cache file on: "+node)

    def prepare_fencing_watcher(self, name):
        # If we don't have quorum now but get it as a result of starting this node,
        # then a bunch of nodes might get fenced
        upnode = None
        if self.HasQuorum(None):
            self.debug("Have quorum")
            return None

        if not self.templates["Pat:Fencing_start"]:
            print("No start pattern")
            return None

        if not self.templates["Pat:Fencing_ok"]:
            print("No ok pattern")
            return None

        stonith = None
        stonithPats = []
        for peer in self.Env["nodes"]:
            if self.ShouldBeStatus[peer] != "up":
                stonithPats.append(self.templates["Pat:Fencing_ok"] % peer)
                stonithPats.append(self.templates["Pat:Fencing_start"] % peer)
            elif self.Env["Stack"] == "corosync (cman)":
                # There is a delay between gaining quorum and CMAN starting fencing
                # This can mean that even nodes that are fully up get fenced
                # There is no use fighting it, just look for everyone so that CTS doesn't get confused
                stonithPats.append(self.templates["Pat:Fencing_ok"] % peer)
                stonithPats.append(self.templates["Pat:Fencing_start"] % peer)

        stonith = LogWatcher(self.Env["LogFileName"], stonithPats, "StartupFencing", 0, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
        stonith.setwatch()
        return stonith

    def fencing_cleanup(self, node, stonith):
        peer_list = []
        peer_state = {}

        self.debug("Looking for nodes that were fenced as a result of %s starting" % node)

        # If we just started a node, we may now have quorum (and permission to fence)
        if not stonith:
            self.debug("Nothing to do")
            return peer_list

        q = self.HasQuorum(None)
        if not q and len(self.Env["nodes"]) > 2:
            # We didn't gain quorum - we shouldn't have shot anyone
            self.debug("Quorum: %d Len: %d" % (q, len(self.Env["nodes"])))
            return peer_list

        for n in self.Env["nodes"]:
            peer_state[n] = "unknown"

        # Now see if any states need to be updated
        self.debug("looking for: " + repr(stonith.regexes))
        shot = stonith.look(0)
        while shot:
            line = repr(shot)
            self.debug("Found: " + line)
            del stonith.regexes[stonith.whichmatch]

            # Extract node name
            for n in self.Env["nodes"]:
                if re.search(self.templates["Pat:Fencing_ok"] % n, shot):
                    peer = n
                    peer_state[peer] = "complete"
                    self.__instance_errorstoignore.append(self.templates["Pat:Fencing_ok"] % peer)

                elif peer_state[n] != "complete" and re.search(self.templates["Pat:Fencing_start"] % n, shot):
                    # TODO: Correctly detect multiple fencing operations for the same host
                    peer = n
                    peer_state[peer] = "in-progress"
                    self.__instance_errorstoignore.append(self.templates["Pat:Fencing_start"] % peer)

            if not peer:
                self.logger.log("ERROR: Unknown stonith match: %s" % line)

            elif not peer in peer_list:
                self.debug("Found peer: " + peer)
                peer_list.append(peer)

            # Get the next one
            shot = stonith.look(60)

        for peer in peer_list:

            self.debug("   Peer %s was fenced as a result of %s starting: %s" % (peer, node, peer_state[peer]))
            if self.Env["at-boot"]:
                self.ShouldBeStatus[peer] = "up"
            else:
                self.ShouldBeStatus[peer] = "down"

            if peer_state[peer] == "in-progress":
                # Wait for any in-progress operations to complete
                shot = stonith.look(60)
                while len(stonith.regexes) and shot:
                    line = repr(shot)
                    self.debug("Found: " + line)
                    del stonith.regexes[stonith.whichmatch]
                    shot = stonith.look(60)

            # Now make sure the node is alive too
            self.ns.WaitForNodeToComeUp(peer, self.Env["DeadTime"])

            # Poll until it comes up
            if self.Env["at-boot"]:
                if not self.StataCM(peer):
                    time.sleep(self.Env["StartTime"])

                if not self.StataCM(peer):
                    self.logger.log("ERROR: Peer %s failed to restart after being fenced" % peer)
                    return None

        return peer_list

    def StartaCM(self, node, verbose=False):

        '''Start up the cluster manager on a given node'''
        if verbose: self.logger.log("Starting %s on node %s" % (self.templates["Name"], node))
        else: self.debug("Starting %s on node %s" % (self.templates["Name"], node))
        ret = 1

        if not node in self.ShouldBeStatus:
            self.ShouldBeStatus[node] = "down"

        if self.ShouldBeStatus[node] != "down":
            return 1

        patterns = []
        # Technically we should always be able to notice ourselves starting
        patterns.append(self.templates["Pat:Local_started"] % node)
        if self.upcount() == 0:
            patterns.append(self.templates["Pat:Master_started"] % node)
        else:
            patterns.append(self.templates["Pat:Slave_started"] % node)

        watch = LogWatcher(
            self.Env["LogFileName"], patterns, "StartaCM", self.Env["StartTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])

        self.install_config(node)

        self.ShouldBeStatus[node] = "any"
        if self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
            self.logger.log ("%s was already started" % (node))
            return 1

        # Clear out the host cache so autojoin can be exercised
        if self.clear_cache:
            self.debug("Removing cache file on: "+node)
            self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")

        if not(self.Env["valgrind-tests"]):
            startCmd = self.templates["StartCmd"]
        else:
            if self.Env["valgrind-prefix"]:
                prefix = self.Env["valgrind-prefix"]
            else:
                prefix = "cts"

            startCmd = """G_SLICE=always-malloc HA_VALGRIND_ENABLED='%s' VALGRIND_OPTS='%s --log-file=/tmp/%s-%s.valgrind' %s""" % (
                self.Env["valgrind-procs"], self.Env["valgrind-opts"], prefix, """%p""", self.templates["StartCmd"])

        stonith = self.prepare_fencing_watcher(node)

        watch.setwatch()

        if self.rsh(node, startCmd) != 0:
            self.logger.log ("Warn: Start command failed on node %s" % (node))
            self.fencing_cleanup(node, stonith)
            return None

        self.ShouldBeStatus[node] = "up"
        watch_result = watch.lookforall()

        if watch.unmatched:
            for regex in watch.unmatched:
                self.logger.log ("Warn: Startup pattern not found: %s" % (regex))

        if watch_result and self.cluster_stable(self.Env["DeadTime"]):
            #self.debug("Found match: "+ repr(watch_result))
            self.fencing_cleanup(node, stonith)
            return 1

        elif self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
            self.fencing_cleanup(node, stonith)
            return 1

        self.logger.log ("Warn: Start failed for node %s" % (node))
        return None

    def StartaCMnoBlock(self, node, verbose=False):

        '''Start up the cluster manager on a given node with none-block mode'''

        if verbose: self.logger.log("Starting %s on node %s" % (self["Name"], node))
        else: self.debug("Starting %s on node %s" % (self["Name"], node))

        # Clear out the host cache so autojoin can be exercised
        if self.clear_cache:
            self.debug("Removing cache file on: "+node)
            self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")

        self.install_config(node)
        if not(self.Env["valgrind-tests"]):
            startCmd = self.templates["StartCmd"]
        else:
            if self.Env["valgrind-prefix"]:
                prefix = self.Env["valgrind-prefix"]
            else:
                prefix = "cts"

            startCmd = """G_SLICE=always-malloc HA_VALGRIND_ENABLED='%s' VALGRIND_OPTS='%s --log-file=/tmp/%s-%s.valgrind' %s""" % (
                self.Env["valgrind-procs"], self.Env["valgrind-opts"], prefix, """%p""", self.templates["StartCmd"])

        self.rsh(node, startCmd, synchronous=0)
        self.ShouldBeStatus[node] = "up"
        return 1

    def StopaCM(self, node, verbose=False, force=False):

        '''Stop the cluster manager on a given node'''

        if verbose: self.logger.log("Stopping %s on node %s" % (self["Name"], node))
        else: self.debug("Stopping %s on node %s" % (self["Name"], node))

        if self.ShouldBeStatus[node] != "up" and force == False:
            return 1

        if self.rsh(node, self.templates["StopCmd"]) == 0:
            # Make sure we can continue even if corosync leaks
            # fdata-* is the old name
            #self.rsh(node, "rm -f /dev/shm/qb-* /dev/shm/fdata-*")
            self.ShouldBeStatus[node] = "down"
            self.cluster_stable(self.Env["DeadTime"])
            return 1
        else:
            self.logger.log ("ERROR: Could not stop %s on node %s" % (self["Name"], node))

        return None

    def StopaCMnoBlock(self, node):

        '''Stop the cluster manager on a given node with none-block mode'''

        self.debug("Stopping %s on node %s" % (self["Name"], node))

        self.rsh(node, self.templates["StopCmd"], synchronous=0)
        self.ShouldBeStatus[node] = "down"
        return 1

    def cluster_stable(self, timeout = None):
        time.sleep(self.Env["StableTime"])
        return 1

    def node_stable(self, node):
        return 1

    def RereadCM(self, node):

        '''Force the cluster manager on a given node to reread its config
           This may be a no-op on certain cluster managers.
        '''
        rc=self.rsh(node, self.templates["RereadCmd"])
        if rc == 0:
            return 1
        else:
            self.logger.log ("Could not force %s on node %s to reread its config"
            %        (self["Name"], node))
        return None

    def StataCM(self, node):

        '''Report the status of the cluster manager on a given node'''

        out=self.rsh(node, self.templates["StatusCmd"] % node, 1)
        ret= (string.find(out, 'stopped') == -1)

        try:
            if ret:
                if self.ShouldBeStatus[node] == "down":
                    self.logger.log(
                    "Node status for %s is %s but we think it should be %s"
                    %        (node, "up", self.ShouldBeStatus[node]))
            else:
                if self.ShouldBeStatus[node] == "up":
                    self.logger.log(
                    "Node status for %s is %s but we think it should be %s"
                    %        (node, "down", self.ShouldBeStatus[node]))
        except KeyError:        pass

        if ret:
            self.ShouldBeStatus[node] = "up"
        else:
            self.ShouldBeStatus[node] = "down"
        return ret

    def startall(self, nodelist=None, verbose=False, quick=False):

        '''Start the cluster manager on every node in the cluster.
        We can do it on a subset of the cluster if nodelist is not None.
        '''
        map = {}
        if not nodelist:
            nodelist = self.Env["nodes"]

        for node in nodelist:
            if self.ShouldBeStatus[node] == "down":
                self.ns.WaitForAllNodesToComeUp(nodelist, 300)

        if not quick:
            # This is used for "basic sanity checks", so only start one node ...
            if not self.StartaCM(node, verbose=verbose):
                return 0
            return 1

        # Approximation of SimulStartList for --boot 
        watchpats = [ ]
        watchpats.append(self.templates["Pat:DC_IDLE"])
        for node in nodelist:
            watchpats.append(self.templates["Pat:Local_started"] % node)
            watchpats.append(self.templates["Pat:InfraUp"] % node)
            watchpats.append(self.templates["Pat:PacemakerUp"] % node)

        #   Start all the nodes - at about the same time...
        watch = LogWatcher(self.Env["LogFileName"], watchpats, "fast-start", self.Env["DeadTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
        watch.setwatch()

        if not self.StartaCM(nodelist[0], verbose=verbose):
            return 0
        for node in nodelist:
            self.StartaCMnoBlock(node, verbose=verbose)

        watch.lookforall()
        if watch.unmatched:
            for regex in watch.unmatched:
                self.logger.log ("Warn: Startup pattern not found: %s" % (regex))

        if not self.cluster_stable():
            self.logger.log("Cluster did not stabilize")
            return 0

        return 1

    def stopall(self, nodelist=None, verbose=False, force=False):

        '''Stop the cluster managers on every node in the cluster.
        We can do it on a subset of the cluster if nodelist is not None.
        '''

        ret = 1
        map = {}
        if not nodelist:
            nodelist = self.Env["nodes"]
        for node in self.Env["nodes"]:
            if self.ShouldBeStatus[node] == "up" or force == True:
                if not self.StopaCM(node, verbose=verbose, force=force):
                    ret = 0
        return ret

    def rereadall(self, nodelist=None):

        '''Force the cluster managers on every node in the cluster
        to reread their config files.  We can do it on a subset of the
        cluster if nodelist is not None.
        '''

        map = {}
        if not nodelist:
            nodelist = self.Env["nodes"]
        for node in self.Env["nodes"]:
            if self.ShouldBeStatus[node] == "up":
                self.RereadCM(node)

    def statall(self, nodelist=None):

        '''Return the status of the cluster managers in the cluster.
        We can do it on a subset of the cluster if nodelist is not None.
        '''

        result = {}
        if not nodelist:
            nodelist = self.Env["nodes"]
        for node in nodelist:
            if self.StataCM(node):
                result[node] = "up"
            else:
                result[node] = "down"
        return result

    def isolate_node(self, target, nodes=None):
        '''isolate the communication between the nodes'''
        if not nodes:
            nodes = self.Env["nodes"]

        for node in nodes:
            if node != target:
                rc = self.rsh(target, self.templates["BreakCommCmd"] % self.key_for_node(node))
                if rc != 0:
                    self.logger.log("Could not break the communication between %s and %s: %d" % (target, node, rc))
                    return None
                else:
                    self.debug("Communication cut between %s and %s" % (target, node))
        return 1

    def unisolate_node(self, target, nodes=None):
        '''fix the communication between the nodes'''
        if not nodes:
            nodes = self.Env["nodes"]

        for node in nodes:
            if node != target:
                restored = 0

                # Limit the amount of time we have asynchronous connectivity for
                # Restore both sides as simultaneously as possible
                self.rsh(target, self.templates["FixCommCmd"] % self.key_for_node(node), synchronous=0)
                self.rsh(node, self.templates["FixCommCmd"] % self.key_for_node(target), synchronous=0)
                self.debug("Communication restored between %s and %s" % (target, node))

    def reducecomm_node(self,node):
        '''reduce the communication between the nodes'''
        rc = self.rsh(node, self.templates["ReduceCommCmd"]%(self.Env["XmitLoss"],self.Env["RecvLoss"]))
        if rc == 0:
            return 1
        else:
            self.logger.log("Could not reduce the communication between the nodes from node: %s" % node)
        return None

    def restorecomm_node(self,node):
        '''restore the saved communication between the nodes'''
        rc = 0
        if float(self.Env["XmitLoss"]) != 0 or float(self.Env["RecvLoss"]) != 0 :
            rc = self.rsh(node, self.templates["RestoreCommCmd"]);
        if rc == 0:
            return 1
        else:
            self.logger.log("Could not restore the communication between the nodes from node: %s" % node)
        return None

    def HasQuorum(self, node_list):
        "Return TRUE if the cluster currently has quorum"
        # If we are auditing a partition, then one side will
        #   have quorum and the other not.
        # So the caller needs to tell us which we are checking
        # If no value for node_list is specified... assume all nodes
        raise ValueError("Abstract Class member (HasQuorum)")

    def Components(self):
        raise ValueError("Abstract Class member (Components)")

    def oprofileStart(self, node=None):
        if not node:
            for n in self.Env["oprofile"]:
                self.oprofileStart(n)

        elif node in self.Env["oprofile"]:
            self.debug("Enabling oprofile on %s" % node)
            self.rsh(node, "opcontrol --init")
            self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all")
            self.rsh(node, "opcontrol --start")
            self.rsh(node, "opcontrol --reset")

    def oprofileSave(self, test, node=None):
        if not node:
            for n in self.Env["oprofile"]:
                self.oprofileSave(test, n)

        elif node in self.Env["oprofile"]:
            self.rsh(node, "opcontrol --dump")
            self.rsh(node, "opcontrol --save=cts.%d" % test)
            # Read back with: opreport -l session:cts.0 image:/usr/lib/heartbeat/c*
            if None:
                self.rsh(node, "opcontrol --reset")
            else:
                self.oprofileStop(node)
                self.oprofileStart(node)

    def oprofileStop(self, node=None):
        if not node:
            for n in self.Env["oprofile"]:
                self.oprofileStop(n)

        elif node in self.Env["oprofile"]:
            self.debug("Stopping oprofile on %s" % node)
            self.rsh(node, "opcontrol --reset")
            self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null")


    def StatsExtract(self):
        if not self.Env["stats"]:
            return

        for host in self.Env["nodes"]:
            log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR
            if host in has_log_stats:
                self.rsh(host, '''bash %s %s stop''' % (log_stats_bin, log_stats_file))
                (rc, lines) = self.rsh(host, '''cat %s''' % log_stats_file, stdout=2)
                self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file))

                fname = "cts-stats-%d-nodes-%s.csv" % (len(self.Env["nodes"]), host)
                print("Extracted stats: %s" % fname)
                fd = open(fname, "a")
                fd.writelines(lines)
                fd.close()

    def StatsMark(self, testnum):
        '''Mark the test number in the stats log'''

        global has_log_stats
        if not self.Env["stats"]:
            return

        for host in self.Env["nodes"]:
            log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR
            if not host in has_log_stats:

                global log_stats
                global log_stats_bin
                script=log_stats
                #script = re.sub("\\\\", "\\\\", script)
                script = re.sub('\"', '\\\"', script)
                script = re.sub("'", "\'", script)
                script = re.sub("`", "\`", script)
                script = re.sub("\$", "\\\$", script)

                self.debug("Installing %s on %s" % (log_stats_bin, host))
                self.rsh(host, '''echo "%s" > %s''' % (script, log_stats_bin), silent=True)
                self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file))
                has_log_stats[host] = 1

            # Now mark it
            self.rsh(host, '''bash %s %s mark %s''' % (log_stats_bin, log_stats_file, testnum), synchronous=0)
Exemple #12
0
class ClusterManager(UserDict):
    '''The Cluster Manager class.
    This is an subclass of the Python dictionary class.
    (this is because it contains lots of {name,value} pairs,
    not because it's behavior is that terribly similar to a
    dictionary in other ways.)

    This is an abstract class which class implements high-level
    operations on the cluster and/or its cluster managers.
    Actual cluster managers classes are subclassed from this type.

    One of the things we do is track the state we think every node should
    be in.
    '''

    def __InitialConditions(self):
        #if os.geteuid() != 0:
        #  raise ValueError("Must Be Root!")
        None

    def _finalConditions(self):
        for key in list(self.keys()):
            if self[key] == None:
                raise ValueError("Improper derivation: self[" + key +   "] must be overridden by subclass.")

    def __init__(self, Environment, randseed=None):
        self.Env = EnvFactory().getInstance()
        self.templates = PatternSelector(self.Env["Name"])
        self.__InitialConditions()
        self.logger = LogFactory()
        self.clear_cache = 0
        self.TestLoggingLevel=0
        self.data = {}
        self.name = self.Env["Name"]

        self.rsh = RemoteFactory().getInstance()
        self.ShouldBeStatus={}
        self.ns = NodeStatus(self.Env)
        self.OurNode = string.lower(os.uname()[1])
        self.__instance_errorstoignore = []

    def __getitem__(self, key):
        if key == "Name":
            return self.name

        print("FIXME: Getting %s from %s" % (key, repr(self)))
        if key in self.data:
            return self.data[key]

        return self.templates.get_patterns(self.Env["Name"], key)

    def __setitem__(self, key, value):
        print("FIXME: Setting %s=%s on %s" % (key, value, repr(self)))
        self.data[key] = value

    def key_for_node(self, node):
        return node

    def instance_errorstoignore_clear(self):
        '''Allows the test scenario to reset instance errors to ignore on each iteration.'''
        self.__instance_errorstoignore = []

    def instance_errorstoignore(self):
        '''Return list of errors which are 'normal' for a specific test instance'''
        return self.__instance_errorstoignore

    def errorstoignore(self):
        '''Return list of errors which are 'normal' and should be ignored'''
        return []

    def log(self, args):
        self.logger.log(args)

    def debug(self, args):
        self.logger.debug(args)

    def prepare(self):
        '''Finish the Initialization process. Prepare to test...'''

        print(repr(self)+"prepare")
        for node in self.Env["nodes"]:
            if self.StataCM(node):
                self.ShouldBeStatus[node] = "up"
            else:
                self.ShouldBeStatus[node] = "down"

            self.unisolate_node(node)

    def upcount(self):
        '''How many nodes are up?'''
        count = 0
        for node in self.Env["nodes"]:
          if self.ShouldBeStatus[node] == "up":
            count = count + 1
        return count

    def install_helper(self, filename, destdir=None, nodes=None, sourcedir=None):
        if sourcedir == None:
            sourcedir = CTSvars.CTS_home
        file_with_path = "%s/%s" % (sourcedir, filename)
        if not nodes:
            nodes = self.Env["nodes"]

        if not destdir:
            destdir = CTSvars.CTS_home

        self.debug("Installing %s to %s on %s" % (filename, destdir, repr(self.Env["nodes"])))
        for node in nodes:
            self.rsh(node, "mkdir -p %s" % destdir)
            self.rsh.cp(file_with_path, "root@%s:%s/%s" % (node, destdir, filename))
        return file_with_path

    def install_config(self, node):
        return None

    def clear_all_caches(self):
        if self.clear_cache:
            for node in self.Env["nodes"]:
                if self.ShouldBeStatus[node] == "down":
                    self.debug("Removing cache file on: "+node)
                    self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")
                else:
                    self.debug("NOT Removing cache file on: "+node)

    def prepare_fencing_watcher(self, name):
        # If we don't have quorum now but get it as a result of starting this node,
        # then a bunch of nodes might get fenced
        upnode = None
        if self.HasQuorum(None):
            self.debug("Have quorum")
            return None

        if not self.templates["Pat:Fencing_start"]:
            print("No start pattern")
            return None

        if not self.templates["Pat:Fencing_ok"]:
            print("No ok pattern")
            return None

        stonith = None
        stonithPats = []
        for peer in self.Env["nodes"]:
            if self.ShouldBeStatus[peer] != "up":
                stonithPats.append(self.templates["Pat:Fencing_ok"] % peer)
                stonithPats.append(self.templates["Pat:Fencing_start"] % peer)
            elif self.Env["Stack"] == "corosync (cman)":
                # There is a delay between gaining quorum and CMAN starting fencing
                # This can mean that even nodes that are fully up get fenced
                # There is no use fighting it, just look for everyone so that CTS doesn't get confused
                stonithPats.append(self.templates["Pat:Fencing_ok"] % peer)
                stonithPats.append(self.templates["Pat:Fencing_start"] % peer)

        stonith = LogWatcher(self.Env["LogFileName"], stonithPats, "StartupFencing", 0, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
        stonith.setwatch()
        return stonith

    def fencing_cleanup(self, node, stonith):
        peer_list = []
        peer_state = {}

        self.debug("Looking for nodes that were fenced as a result of %s starting" % node)

        # If we just started a node, we may now have quorum (and permission to fence)
        if not stonith:
            self.debug("Nothing to do")
            return peer_list

        q = self.HasQuorum(None)
        if not q and len(self.Env["nodes"]) > 2:
            # We didn't gain quorum - we shouldn't have shot anyone
            self.debug("Quorum: %d Len: %d" % (q, len(self.Env["nodes"])))
            return peer_list

        for n in self.Env["nodes"]:
            peer_state[n] = "unknown"

        # Now see if any states need to be updated
        self.debug("looking for: " + repr(stonith.regexes))
        shot = stonith.look(0)
        while shot:
            line = repr(shot)
            self.debug("Found: " + line)
            del stonith.regexes[stonith.whichmatch]

            # Extract node name
            for n in self.Env["nodes"]:
                if re.search(self.templates["Pat:Fencing_ok"] % n, shot):
                    peer = n
                    peer_state[peer] = "complete"
                    self.__instance_errorstoignore.append(self.templates["Pat:Fencing_ok"] % peer)

                elif peer_state[n] != "complete" and re.search(self.templates["Pat:Fencing_start"] % n, shot):
                    # TODO: Correctly detect multiple fencing operations for the same host
                    peer = n
                    peer_state[peer] = "in-progress"
                    self.__instance_errorstoignore.append(self.templates["Pat:Fencing_start"] % peer)

            if not peer:
                self.logger.log("ERROR: Unknown stonith match: %s" % line)

            elif not peer in peer_list:
                self.debug("Found peer: " + peer)
                peer_list.append(peer)

            # Get the next one
            shot = stonith.look(60)

        for peer in peer_list:

            self.debug("   Peer %s was fenced as a result of %s starting: %s" % (peer, node, peer_state[peer]))
            if self.Env["at-boot"]:
                self.ShouldBeStatus[peer] = "up"
            else:
                self.ShouldBeStatus[peer] = "down"

            if peer_state[peer] == "in-progress":
                # Wait for any in-progress operations to complete
                shot = stonith.look(60)
                while len(stonith.regexes) and shot:
                    line = repr(shot)
                    self.debug("Found: " + line)
                    del stonith.regexes[stonith.whichmatch]
                    shot = stonith.look(60)

            # Now make sure the node is alive too
            self.ns.WaitForNodeToComeUp(peer, self.Env["DeadTime"])

            # Poll until it comes up
            if self.Env["at-boot"]:
                if not self.StataCM(peer):
                    time.sleep(self.Env["StartTime"])

                if not self.StataCM(peer):
                    self.logger.log("ERROR: Peer %s failed to restart after being fenced" % peer)
                    return None

        return peer_list

    def StartaCM(self, node, verbose=False):

        '''Start up the cluster manager on a given node'''
        if verbose: self.logger.log("Starting %s on node %s" % (self.templates["Name"], node))
        else: self.debug("Starting %s on node %s" % (self.templates["Name"], node))
        ret = 1

        if not node in self.ShouldBeStatus:
            self.ShouldBeStatus[node] = "down"

        if self.ShouldBeStatus[node] != "down":
            return 1

        patterns = []
        # Technically we should always be able to notice ourselves starting
        patterns.append(self.templates["Pat:Local_started"] % node)
        if self.upcount() == 0:
            patterns.append(self.templates["Pat:Master_started"] % node)
        else:
            patterns.append(self.templates["Pat:Slave_started"] % node)

        watch = LogWatcher(
            self.Env["LogFileName"], patterns, "StartaCM", self.Env["StartTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])

        self.install_config(node)

        self.ShouldBeStatus[node] = "any"
        if self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
            self.logger.log ("%s was already started" % (node))
            return 1

        # Clear out the host cache so autojoin can be exercised
        if self.clear_cache:
            self.debug("Removing cache file on: "+node)
            self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")

        if not(self.Env["valgrind-tests"]):
            startCmd = self.templates["StartCmd"]
        else:
            if self.Env["valgrind-prefix"]:
                prefix = self.Env["valgrind-prefix"]
            else:
                prefix = "cts"

            startCmd = """G_SLICE=always-malloc HA_VALGRIND_ENABLED='%s' VALGRIND_OPTS='%s --log-file=/tmp/%s-%s.valgrind' %s""" % (
                self.Env["valgrind-procs"], self.Env["valgrind-opts"], prefix, """%p""", self.templates["StartCmd"])

        stonith = self.prepare_fencing_watcher(node)

        watch.setwatch()

        if self.rsh(node, startCmd) != 0:
            self.logger.log ("Warn: Start command failed on node %s" % (node))
            self.fencing_cleanup(node, stonith)
            return None

        self.ShouldBeStatus[node] = "up"
        watch_result = watch.lookforall()

        if watch.unmatched:
            for regex in watch.unmatched:
                self.logger.log ("Warn: Startup pattern not found: %s" % (regex))

        if watch_result and self.cluster_stable(self.Env["DeadTime"]):
            #self.debug("Found match: "+ repr(watch_result))
            self.fencing_cleanup(node, stonith)
            return 1

        elif self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
            self.fencing_cleanup(node, stonith)
            return 1

        self.logger.log ("Warn: Start failed for node %s" % (node))
        return None

    def StartaCMnoBlock(self, node, verbose=False):

        '''Start up the cluster manager on a given node with none-block mode'''

        if verbose: self.logger.log("Starting %s on node %s" % (self["Name"], node))
        else: self.debug("Starting %s on node %s" % (self["Name"], node))

        # Clear out the host cache so autojoin can be exercised
        if self.clear_cache:
            self.debug("Removing cache file on: "+node)
            self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")

        self.install_config(node)
        if not(self.Env["valgrind-tests"]):
            startCmd = self.templates["StartCmd"]
        else:
            if self.Env["valgrind-prefix"]:
                prefix = self.Env["valgrind-prefix"]
            else:
                prefix = "cts"

            startCmd = """G_SLICE=always-malloc HA_VALGRIND_ENABLED='%s' VALGRIND_OPTS='%s --log-file=/tmp/%s-%s.valgrind' %s""" % (
                self.Env["valgrind-procs"], self.Env["valgrind-opts"], prefix, """%p""", self.templates["StartCmd"])

        self.rsh(node, startCmd, synchronous=0)
        self.ShouldBeStatus[node] = "up"
        return 1

    def StopaCM(self, node, verbose=False, force=False):

        '''Stop the cluster manager on a given node'''

        if verbose: self.logger.log("Stopping %s on node %s" % (self["Name"], node))
        else: self.debug("Stopping %s on node %s" % (self["Name"], node))

        if self.ShouldBeStatus[node] != "up" and force == False:
            return 1

        if self.rsh(node, self.templates["StopCmd"]) == 0:
            # Make sure we can continue even if corosync leaks
            # fdata-* is the old name
            #self.rsh(node, "rm -f /dev/shm/qb-* /dev/shm/fdata-*")
            self.ShouldBeStatus[node] = "down"
            self.cluster_stable(self.Env["DeadTime"])
            return 1
        else:
            self.logger.log ("ERROR: Could not stop %s on node %s" % (self["Name"], node))

        return None

    def StopaCMnoBlock(self, node):

        '''Stop the cluster manager on a given node with none-block mode'''

        self.debug("Stopping %s on node %s" % (self["Name"], node))

        self.rsh(node, self.templates["StopCmd"], synchronous=0)
        self.ShouldBeStatus[node] = "down"
        return 1

    def cluster_stable(self, timeout = None):
        time.sleep(self.Env["StableTime"])
        return 1

    def node_stable(self, node):
        return 1

    def RereadCM(self, node):

        '''Force the cluster manager on a given node to reread its config
           This may be a no-op on certain cluster managers.
        '''
        rc=self.rsh(node, self.templates["RereadCmd"])
        if rc == 0:
            return 1
        else:
            self.logger.log ("Could not force %s on node %s to reread its config"
            %        (self["Name"], node))
        return None

    def StataCM(self, node):

        '''Report the status of the cluster manager on a given node'''

        out=self.rsh(node, self.templates["StatusCmd"] % node, 1)
        ret= (string.find(out, 'stopped') == -1)

        try:
            if ret:
                if self.ShouldBeStatus[node] == "down":
                    self.logger.log(
                    "Node status for %s is %s but we think it should be %s"
                    %        (node, "up", self.ShouldBeStatus[node]))
            else:
                if self.ShouldBeStatus[node] == "up":
                    self.logger.log(
                    "Node status for %s is %s but we think it should be %s"
                    %        (node, "down", self.ShouldBeStatus[node]))
        except KeyError:        pass

        if ret:
            self.ShouldBeStatus[node] = "up"
        else:
            self.ShouldBeStatus[node] = "down"
        return ret

    def startall(self, nodelist=None, verbose=False, quick=False):

        '''Start the cluster manager on every node in the cluster.
        We can do it on a subset of the cluster if nodelist is not None.
        '''
        map = {}
        if not nodelist:
            nodelist = self.Env["nodes"]

        for node in nodelist:
            if self.ShouldBeStatus[node] == "down":
                self.ns.WaitForAllNodesToComeUp(nodelist, 300)

        if not quick:
            if not self.StartaCM(node, verbose=verbose):
                return 0
            return 1

        # Approximation of SimulStartList for --boot 
        watchpats = [ ]
        watchpats.append(self.templates["Pat:DC_IDLE"])
        for node in nodelist:
            watchpats.append(self.templates["Pat:Local_started"] % node)
            watchpats.append(self.templates["Pat:InfraUp"] % node)
            watchpats.append(self.templates["Pat:PacemakerUp"] % node)

        #   Start all the nodes - at about the same time...
        watch = LogWatcher(self.Env["LogFileName"], watchpats, "fast-start", self.Env["DeadTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
        watch.setwatch()

        if not self.StartaCM(nodelist[0], verbose=verbose):
            return 0
        for node in nodelist:
            self.StartaCMnoBlock(node, verbose=verbose)

        watch.lookforall()
        if watch.unmatched:
            for regex in watch.unmatched:
                self.logger.log ("Warn: Startup pattern not found: %s" % (regex))

        if not self.cluster_stable():
            self.logger.log("Cluster did not stabilize")
            return 0

        return 1

    def stopall(self, nodelist=None, verbose=False, force=False):

        '''Stop the cluster managers on every node in the cluster.
        We can do it on a subset of the cluster if nodelist is not None.
        '''

        ret = 1
        map = {}
        if not nodelist:
            nodelist = self.Env["nodes"]
        for node in self.Env["nodes"]:
            if self.ShouldBeStatus[node] == "up" or force == True:
                if not self.StopaCM(node, verbose=verbose, force=force):
                    ret = 0
        return ret

    def rereadall(self, nodelist=None):

        '''Force the cluster managers on every node in the cluster
        to reread their config files.  We can do it on a subset of the
        cluster if nodelist is not None.
        '''

        map = {}
        if not nodelist:
            nodelist = self.Env["nodes"]
        for node in self.Env["nodes"]:
            if self.ShouldBeStatus[node] == "up":
                self.RereadCM(node)

    def statall(self, nodelist=None):

        '''Return the status of the cluster managers in the cluster.
        We can do it on a subset of the cluster if nodelist is not None.
        '''

        result = {}
        if not nodelist:
            nodelist = self.Env["nodes"]
        for node in nodelist:
            if self.StataCM(node):
                result[node] = "up"
            else:
                result[node] = "down"
        return result

    def isolate_node(self, target, nodes=None):
        '''isolate the communication between the nodes'''
        if not nodes:
            nodes = self.Env["nodes"]

        for node in nodes:
            if node != target:
                rc = self.rsh(target, self.templates["BreakCommCmd"] % self.key_for_node(node))
                if rc != 0:
                    self.logger.log("Could not break the communication between %s and %s: %d" % (target, node, rc))
                    return None
                else:
                    self.debug("Communication cut between %s and %s" % (target, node))
        return 1

    def unisolate_node(self, target, nodes=None):
        '''fix the communication between the nodes'''
        if not nodes:
            nodes = self.Env["nodes"]

        for node in nodes:
            if node != target:
                restored = 0

                # Limit the amount of time we have asynchronous connectivity for
                # Restore both sides as simultaneously as possible
                self.rsh(target, self.templates["FixCommCmd"] % self.key_for_node(node), synchronous=0)
                self.rsh(node, self.templates["FixCommCmd"] % self.key_for_node(target), synchronous=0)
                self.debug("Communication restored between %s and %s" % (target, node))

    def reducecomm_node(self,node):
        '''reduce the communication between the nodes'''
        rc = self.rsh(node, self.templates["ReduceCommCmd"]%(self.Env["XmitLoss"],self.Env["RecvLoss"]))
        if rc == 0:
            return 1
        else:
            self.logger.log("Could not reduce the communication between the nodes from node: %s" % node)
        return None

    def restorecomm_node(self,node):
        '''restore the saved communication between the nodes'''
        rc = 0
        if float(self.Env["XmitLoss"]) != 0 or float(self.Env["RecvLoss"]) != 0 :
            rc = self.rsh(node, self.templates["RestoreCommCmd"]);
        if rc == 0:
            return 1
        else:
            self.logger.log("Could not restore the communication between the nodes from node: %s" % node)
        return None

    def HasQuorum(self, node_list):
        "Return TRUE if the cluster currently has quorum"
        # If we are auditing a partition, then one side will
        #   have quorum and the other not.
        # So the caller needs to tell us which we are checking
        # If no value for node_list is specified... assume all nodes
        raise ValueError("Abstract Class member (HasQuorum)")

    def Components(self):
        raise ValueError("Abstract Class member (Components)")

    def oprofileStart(self, node=None):
        if not node:
            for n in self.Env["oprofile"]:
                self.oprofileStart(n)

        elif node in self.Env["oprofile"]:
            self.debug("Enabling oprofile on %s" % node)
            self.rsh(node, "opcontrol --init")
            self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all")
            self.rsh(node, "opcontrol --start")
            self.rsh(node, "opcontrol --reset")

    def oprofileSave(self, test, node=None):
        if not node:
            for n in self.Env["oprofile"]:
                self.oprofileSave(test, n)

        elif node in self.Env["oprofile"]:
            self.rsh(node, "opcontrol --dump")
            self.rsh(node, "opcontrol --save=cts.%d" % test)
            # Read back with: opreport -l session:cts.0 image:/usr/lib/heartbeat/c*
            if None:
                self.rsh(node, "opcontrol --reset")
            else:
                self.oprofileStop(node)
                self.oprofileStart(node)

    def oprofileStop(self, node=None):
        if not node:
            for n in self.Env["oprofile"]:
                self.oprofileStop(n)

        elif node in self.Env["oprofile"]:
            self.debug("Stopping oprofile on %s" % node)
            self.rsh(node, "opcontrol --reset")
            self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null")


    def StatsExtract(self):
        if not self.Env["stats"]:
            return

        for host in self.Env["nodes"]:
            log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR
            if host in has_log_stats:
                self.rsh(host, '''bash %s %s stop''' % (log_stats_bin, log_stats_file))
                (rc, lines) = self.rsh(host, '''cat %s''' % log_stats_file, stdout=2)
                self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file))

                fname = "cts-stats-%d-nodes-%s.csv" % (len(self.Env["nodes"]), host)
                print("Extracted stats: %s" % fname)
                fd = open(fname, "a")
                fd.writelines(lines)
                fd.close()

    def StatsMark(self, testnum):
        '''Mark the test number in the stats log'''

        global has_log_stats
        if not self.Env["stats"]:
            return

        for host in self.Env["nodes"]:
            log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR
            if not host in has_log_stats:

                global log_stats
                global log_stats_bin
                script=log_stats
                #script = re.sub("\\\\", "\\\\", script)
                script = re.sub('\"', '\\\"', script)
                script = re.sub("'", "\'", script)
                script = re.sub("`", "\`", script)
                script = re.sub("\$", "\\\$", script)

                self.debug("Installing %s on %s" % (log_stats_bin, host))
                self.rsh(host, '''echo "%s" > %s''' % (script, log_stats_bin), silent=True)
                self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file))
                has_log_stats[host] = 1

            # Now mark it
            self.rsh(host, '''bash %s %s mark %s''' % (log_stats_bin, log_stats_file, testnum), synchronous=0)
Exemple #13
0
    def __init__(self, Environment, randseed=None, name=None):
        if not name: name="crm-corosync"
        ClusterManager.__init__(self, Environment, randseed=randseed)

        self.fullcomplist = {}
        self.templates = PatternSelector(self.name)
Exemple #14
0
    def __init__(self, Environment, randseed=None, name=None):
        if not name: name = "crm-ais"
        crm_lha.__init__(self, Environment, randseed=randseed, name=name)

        self.fullcomplist = {}
        self.templates = PatternSelector(self.name)
Exemple #15
0
class crm_ais(crm_lha):
    '''
    The crm version 3 cluster manager class.
    It implements the things we need to talk to and manipulate
    crm clusters running on top of openais
    '''
    def __init__(self, Environment, randseed=None, name=None):
        if not name: name = "crm-ais"
        crm_lha.__init__(self, Environment, randseed=randseed, name=name)

        self.fullcomplist = {}
        self.templates = PatternSelector(self.name)

    def NodeUUID(self, node):
        return node

    def ais_components(self, extra={}):

        complist = []
        if not len(self.fullcomplist.keys()):
            for c in ["cib", "lrmd", "crmd", "attrd"]:
                self.fullcomplist[c] = Process(
                    self,
                    c,
                    pats=self.templates.get_component(self.name, c),
                    badnews_ignore=self.templates.get_component(
                        self.name, "%s-ignore" % c),
                    common_ignore=self.templates.get_component(
                        self.name, "common-ignore"))

            # pengine uses dc_pats instead of pats
            self.fullcomplist["pengine"] = Process(
                self,
                "pengine",
                dc_pats=self.templates.get_component(self.name, "pengine"),
                badnews_ignore=self.templates.get_component(
                    self.name, "pengine-ignore"),
                common_ignore=self.templates.get_component(
                    self.name, "common-ignore"))

            # stonith-ng's process name is different from its component name
            self.fullcomplist["stonith-ng"] = Process(
                self,
                "stonith-ng",
                process="stonithd",
                pats=self.templates.get_component(self.name, "stonith"),
                badnews_ignore=self.templates.get_component(
                    self.name, "stonith-ignore"),
                common_ignore=self.templates.get_component(
                    self.name, "common-ignore"))

            # add (or replace) any extra components passed in
            self.fullcomplist.update(extra)

        # Processes running under valgrind can't be shot with "killall -9 processname",
        # so don't include them in the returned list
        vgrind = self.Env["valgrind-procs"].split()
        for key in list(self.fullcomplist.keys()):
            if self.Env["valgrind-tests"]:
                if key in vgrind:
                    self.log(
                        "Filtering %s from the component list as it is being profiled by valgrind"
                        % key)
                    continue
            if key == "stonith-ng" and not self.Env["DoFencing"]:
                continue
            complist.append(self.fullcomplist[key])

        return complist
Exemple #16
0
    def __init__(self, name=None):
        if not name: name = "crm-corosync"
        ClusterManager.__init__(self)

        self.fullcomplist = {}
        self.templates = PatternSelector(self.name)
Exemple #17
0
 def errorstoignore(self):
     # At some point implement a more elegant solution that
     #   also produces a report at the end
     """ Return a list of known error messages that should be ignored """
     return PatternSelector().get_patterns(self.name, "BadNewsIgnore")
Exemple #18
0
class crm_corosync(crm_common):
    '''
    Corosync version 2 cluster manager class
    '''
    def __init__(self, Environment, randseed=None, name=None):
        if not name: name = "crm-corosync"
        crm_common.__init__(self, Environment, randseed=randseed, name=name)

        self.fullcomplist = {}
        self.templates = PatternSelector(self.name)

        if self.Env["have_systemd"]:
            self.update({
                # When systemd is in use, we can look for this instead
                "Pat:We_stopped":
                "%s.*Corosync Cluster Engine exiting normally",
            })

    def Components(self):
        complist = []
        if not len(list(self.fullcomplist.keys())):
            for c in ["cib", "lrmd", "crmd", "attrd"]:
                self.fullcomplist[c] = Process(
                    self,
                    c,
                    pats=self.templates.get_component(self.name, c),
                    badnews_ignore=self.templates.get_component(
                        self.name, "%s-ignore" % c),
                    common_ignore=self.templates.get_component(
                        self.name, "common-ignore"))

            # pengine uses dc_pats instead of pats
            self.fullcomplist["pengine"] = Process(
                self,
                "pengine",
                dc_pats=self.templates.get_component(self.name, "pengine"),
                badnews_ignore=self.templates.get_component(
                    self.name, "pengine-ignore"),
                common_ignore=self.templates.get_component(
                    self.name, "common-ignore"))

            # stonith-ng's process name is different from its component name
            self.fullcomplist["stonith-ng"] = Process(
                self,
                "stonith-ng",
                process="stonithd",
                pats=self.templates.get_component(self.name, "stonith"),
                badnews_ignore=self.templates.get_component(
                    self.name, "stonith-ignore"),
                common_ignore=self.templates.get_component(
                    self.name, "common-ignore"))

            # add (or replace) extra components
            self.fullcomplist["corosync"] = Process(
                self,
                "corosync",
                pats=self.templates.get_component(self.name, "corosync"),
                badnews_ignore=self.templates.get_component(
                    self.name, "corosync-ignore"),
                common_ignore=self.templates.get_component(
                    self.name, "common-ignore"))

        # Processes running under valgrind can't be shot with "killall -9 processname",
        # so don't include them in the returned list
        vgrind = self.Env["valgrind-procs"].split()
        for key in list(self.fullcomplist.keys()):
            if self.Env["valgrind-tests"]:
                if key in vgrind:
                    self.log(
                        "Filtering %s from the component list as it is being profiled by valgrind"
                        % key)
                    continue
            if key == "stonith-ng" and not self.Env["DoFencing"]:
                continue
            complist.append(self.fullcomplist[key])

        return complist