Beispiel #1
0
    def loadTargetSegments(self):

        targetHost = self.options.targetHost
        targetRole = self.options.targetRole
        targetDbId = self.options.targetDbId

        if targetHost is None and targetDbId is None:
            raise ProgramArgumentValidationException(
                "neither --host nor --seg_dbid specified.  " "Exactly one should be specified."
            )
        if targetHost is not None and targetDbId is not None:
            raise ProgramArgumentValidationException(
                "both --host nor --seg_dbid specified.  " "Exactly one should be specified."
            )
        if targetHost is not None and targetRole is None:
            raise ProgramArgumentValidationException(
                "--role not specified when --host is specified.  " "Role is required when targeting a host."
            )
        if targetDbId is not None and targetRole is not None:
            raise ProgramArgumentValidationException(
                "--role specified when --seg_dbid is specified.  "
                "Role should not be specified when targeting a single dbid."
            )

        #
        # load from master db
        #
        masterPort = self.options.masterPort
        if masterPort is None:
            gpEnv = GpMasterEnvironment(self.options.masterDataDirectory, False)
            masterPort = gpEnv.getMasterPort()
        conf = configurationInterface.getConfigurationProvider().initializeProvider(masterPort)
        gpArray = conf.loadSystemConfig(useUtilityMode=True)
        segments = gpArray.getDbList()

        #
        # prune gpArray according to filter settings
        #
        segments = [seg for seg in segments if seg.isSegmentQE()]
        if targetHost is not None and targetHost != "ALL":
            segments = [seg for seg in segments if seg.getSegmentHostName() == targetHost]

        if targetDbId is not None:
            segments = gpArray.getDbList()
            dbId = int(targetDbId)
            segments = [seg for seg in segments if seg.getSegmentDbId() == dbId]

        if targetRole is not None:
            segments = [seg for seg in segments if self.isMatchingRole(targetRole, seg)]

        # only DOWN segments remaining?  Error out
        downSegments = [seg for seg in segments if seg.getSegmentStatus() != "u"]
        if len(downSegments) > 0:
            downSegStr = "\n     Down Segment: "
            raise ExceptionNoStackTraceNeeded(
                "Unable to inject fault.  At least one segment is marked as down in the database.%s%s"
                % (downSegStr, downSegStr.join([str(downSeg) for downSeg in downSegments]))
            )

        return segments
    def loadTargetSegments(self) :

        targetHost              = self.options.targetHost
        targetRole              = self.options.targetRole
        targetRegistrationOrder = self.options.targetRegistrationOrder

        if targetHost is None and targetRegistrationOrder is None:
            raise ProgramArgumentValidationException(\
                            "neither --host nor --registration_order is specified.  " \
                            "Exactly one should be specified.")
        if targetHost is not None and targetRegistrationOrder is not None:
            raise ProgramArgumentValidationException(\
                            "both --host and --registration_order are specified.  " \
                            "Exactly one should be specified.")
        if targetHost is not None and targetRole is None:
            raise ProgramArgumentValidationException(\
                            "--role is not specified when --host is specified.  " \
                            "Role is required when targeting a host.")
        if targetRegistrationOrder is not None and targetRole is not None:
            raise ProgramArgumentValidationException(\
                            "--role is specified when --registration_order is specified.  " \
                            "Role should not be specified when targeting a single registration_order.")

        #
        # load from master db
        #
        masterPort = self.options.masterPort
        if masterPort is None:
            gpEnv = GpMasterEnvironment(self.options.masterDataDirectory, False)
            masterPort = gpEnv.getMasterPort()
        conf = configurationInterface.getConfigurationProvider().initializeProvider(masterPort)
        hawqArray = conf.loadSystemConfig(useUtilityMode=True)
        hawqdbs = hawqArray.getDbList()
        
        #
        # prune gpArray according to filter settings
        #
        if targetHost is not None and targetHost != "ALL":
            hawqdbs = [hdb for hdb in hawqdbs if hdb.getHostName() == targetHost]

        if targetRegistrationOrder is not None:
            hawqdbs = gpArray.getDbList()
            regorder = int(targetRegistrationOrder)
            hawqdbs = [hdb for hdb in hawqdbs if hdb.getRegistrationOrder() == regorder]

        if targetRole is not None:
            hawqdbs = [hdb for hdb in hawqdbs if self.isMatchingRole(targetRole, hdb)]

        # only DOWN segments remaining?  Error out
        downhawqdbs = [hdb for hdb in hawqdbs if hdb.getStatus() != 'u']
        if len(downhawqdbs) > 0:
            downhawqdbStr = "\n     Down Segment: "
            raise ExceptionNoStackTraceNeeded(
                "Unable to inject fault.  At least one segment is marked as down in the database.%s%s" % 
                (downhawqdbStr, downhawqdbStr.join([str(downhdb) for downhdb in downhawqdbs])))

        print "### DEBUG: loadTargetSegments"
        print "### DEBUG: HAWQDBS "
        print hawqdbs
        return hawqdbs
Beispiel #3
0
    def run(self):
        if self.__options.parallelDegree < 1 or self.__options.parallelDegree > 64:
            raise ProgramArgumentValidationException(
                "Invalid parallelDegree provided with -B argument: %d" % self.__options.parallelDegree)

        self.__pool = base.WorkerPool(self.__options.parallelDegree)
        gpEnv = GpMasterEnvironment(self.__options.masterDataDirectory, True)

        faultProberInterface.getFaultProber().initializeProber(gpEnv.getMasterPort())
        confProvider = configInterface.getConfigurationProvider().initializeProvider(gpEnv.getMasterPort())
        gpArray = confProvider.loadSystemConfig(useUtilityMode=False)

        # check that heap_checksums is consistent across cluster, fail immediately if not
        self.validate_heap_checksums(gpArray)

        self.checkMirrorOffset(gpArray)
        
        # check that we actually have mirrors
        if gpArray.hasMirrors:
            raise ExceptionNoStackTraceNeeded( \
                "GPDB physical mirroring cannot be added.  The cluster is already configured with Mirrors.")

        # figure out what needs to be done (AND update the gpArray!)
        mirrorBuilder = self.__getMirrorsToBuildBasedOnOptions(gpEnv, gpArray)
        mirrorBuilder.checkForPortAndDirectoryConflicts(gpArray)

        if self.__options.outputSampleConfigFile is not None:
            # just output config file and done
            self.__outputToFile(mirrorBuilder, self.__options.outputSampleConfigFile, gpArray)
            logger.info('Configuration file output to %s successfully.' % self.__options.outputSampleConfigFile)
        else:
            self.__displayAddMirrors(gpEnv, mirrorBuilder, gpArray)
            if self.__options.interactive:
                if not userinput.ask_yesno(None, "\nContinue with add mirrors procedure", 'N'):
                    raise UserAbortedException()

            self.config_primaries_for_replication(gpArray)
            if not mirrorBuilder.buildMirrors("add", gpEnv, gpArray):
                return 1

            logger.info("******************************************************************")
            logger.info("Mirror segments have been added; data synchronization is in progress.")
            logger.info("Data synchronization will continue in the background.")
            logger.info("Use  gpstate -s  to check the resynchronization progress.")
            logger.info("******************************************************************")

        return 0  # success -- exit code 0!
Beispiel #4
0
    def run(self):
        if self.__options.parallelDegree < 1 or self.__options.parallelDegree > 64:
            raise ProgramArgumentValidationException(
                "Invalid parallelDegree provided with -B argument: %d" % self.__options.parallelDegree
            )

        self.__pool = base.WorkerPool(self.__options.parallelDegree)
        gpEnv = GpMasterEnvironment(self.__options.masterDataDirectory, True)

        faultProberInterface.getFaultProber().initializeProber(gpEnv.getMasterPort())
        confProvider = configInterface.getConfigurationProvider().initializeProvider(gpEnv.getMasterPort())
        gpArray = confProvider.loadSystemConfig(useUtilityMode=False)

        # check that we actually have mirrors
        if gpArray.getFaultStrategy() != gparray.FAULT_STRATEGY_NONE:
            raise ExceptionNoStackTraceNeeded(
                "GPDB physical mirroring cannot be added.  The cluster is already configured with %s."
                % gparray.getFaultStrategyLabel(gpArray.getFaultStrategy())
            )

        # figure out what needs to be done
        mirrorBuilder = self.__getMirrorsToBuildBasedOnOptions(gpEnv, gpArray)
        mirrorBuilder.checkForPortAndDirectoryConflicts(gpArray)

        if self.__options.outputSampleConfigFile is not None:
            # just output config file and done
            self.__outputToFile(mirrorBuilder, self.__options.outputSampleConfigFile, gpArray)
            logger.info("Configuration file output to %s successfully." % self.__options.outputSampleConfigFile)
        else:
            self.__displayAddMirrors(gpEnv, mirrorBuilder, gpArray)
            if self.__options.interactive:
                if not userinput.ask_yesno(None, "\nContinue with add mirrors procedure", "N"):
                    raise UserAbortedException()

            gpArray.setFaultStrategy(gparray.FAULT_STRATEGY_FILE_REPLICATION)
            mirrorBuilder.buildMirrors("add", gpEnv, gpArray)

            logger.info("******************************************************************")
            logger.info("Mirror segments have been added; data synchronization is in progress.")
            logger.info("Data synchronization will continue in the background.")
            logger.info("")
            logger.info("Use  gpstate -s  to check the resynchronization progress.")
            logger.info("******************************************************************")

        return 0  # success -- exit code 0!
Beispiel #5
0
    def run(self):
        if self.__options.parallelDegree < 1 or self.__options.parallelDegree > 64:
            raise ProgramArgumentValidationException(
                "Invalid parallelDegree provided with -B argument: %d" % self.__options.parallelDegree)

        self.__pool = WorkerPool(self.__options.parallelDegree)
        gpEnv = GpMasterEnvironment(self.__options.masterDataDirectory, True)

        # verify "where to recover" options
        optionCnt = 0
        if self.__options.newRecoverHosts is not None:
            optionCnt += 1
        if self.__options.recoveryConfigFile is not None:
            optionCnt += 1
        if self.__options.rebalanceSegments:
            optionCnt += 1
        if optionCnt > 1:
            raise ProgramArgumentValidationException("Only one of -i, -p, and -r may be specified")

        faultProberInterface.getFaultProber().initializeProber(gpEnv.getMasterPort())

        confProvider = configInterface.getConfigurationProvider().initializeProvider(gpEnv.getMasterPort())

        gpArray = confProvider.loadSystemConfig(useUtilityMode=False)

        if not gpArray.hasMirrors:
            raise ExceptionNoStackTraceNeeded(
                'GPDB Mirroring replication is not configured for this Greenplum Database instance.')

        # We have phys-rep/filerep mirrors.

        if self.__options.newRecoverHosts is not None:
            try:
                uniqueHosts = []
                for h in self.__options.newRecoverHosts.split(','):
                    if h.strip() not in uniqueHosts:
                        uniqueHosts.append(h.strip())
                self.__options.newRecoverHosts = uniqueHosts
            except Exception, ex:
                raise ProgramArgumentValidationException( \
                    "Invalid value for recover hosts: %s" % ex)
Beispiel #6
0
    def loadTargetSegments(self):

        targetHost = self.options.targetHost
        targetRole = self.options.targetRole
        targetDbId = self.options.targetDbId

        if targetHost is None and targetDbId is None:
            raise ProgramArgumentValidationException(\
                            "neither --host nor --seg_dbid specified.  " \
                            "Exactly one should be specified.")
        if targetHost is not None and targetDbId is not None:
            raise ProgramArgumentValidationException(\
                            "both --host nor --seg_dbid specified.  " \
                            "Exactly one should be specified.")
        if targetHost is not None and targetRole is None:
            raise ProgramArgumentValidationException(\
                            "--role not specified when --host is specified.  " \
                            "Role is required when targeting a host.")
        if targetDbId is not None and targetRole is not None:
            raise ProgramArgumentValidationException(\
                            "--role specified when --seg_dbid is specified.  " \
                            "Role should not be specified when targeting a single dbid.")

        #
        # load from master db
        #
        masterPort = self.options.masterPort
        if masterPort is None:
            gpEnv = GpMasterEnvironment(self.options.masterDataDirectory,
                                        False)
            masterPort = gpEnv.getMasterPort()
        conf = configurationInterface.getConfigurationProvider(
        ).initializeProvider(masterPort)
        gpArray = conf.loadSystemConfig(useUtilityMode=True)
        segments = gpArray.getDbList()

        #
        # prune gpArray according to filter settings
        #
        segments = [seg for seg in segments if seg.isSegmentQE()]
        if targetHost is not None and targetHost != "ALL":
            segments = [
                seg for seg in segments
                if seg.getSegmentHostName() == targetHost
            ]

        if targetDbId is not None:
            segments = gpArray.getDbList()
            dbId = int(targetDbId)
            segments = [
                seg for seg in segments if seg.getSegmentDbId() == dbId
            ]

        if targetRole is not None:
            segments = [
                seg for seg in segments
                if self.isMatchingRole(targetRole, seg)
            ]

        # only DOWN segments remaining?  Error out
        downSegments = [
            seg for seg in segments if seg.getSegmentStatus() != 'u'
        ]
        if len(downSegments) > 0:
            downSegStr = "\n     Down Segment: "
            raise ExceptionNoStackTraceNeeded(
                "Unable to inject fault.  At least one segment is marked as down in the database.%s%s"
                % (downSegStr,
                   downSegStr.join([str(downSeg)
                                    for downSeg in downSegments])))

        return segments
Beispiel #7
0
    def run(self):
        if self.__options.parallelDegree < 1 or self.__options.parallelDegree > 64:
            raise ProgramArgumentValidationException(
                "Invalid parallelDegree provided with -B argument: %d" %
                self.__options.parallelDegree)

        self.__pool = WorkerPool(self.__options.parallelDegree)
        gpEnv = GpMasterEnvironment(self.__options.masterDataDirectory, True)

        # verify "where to recover" options
        optionCnt = 0
        if self.__options.newRecoverHosts is not None:
            optionCnt += 1
        if self.__options.recoveryConfigFile is not None:
            optionCnt += 1
        if self.__options.rebalanceSegments:
            optionCnt += 1
        if optionCnt > 1:
            raise ProgramArgumentValidationException(
                "Only one of -i, -p, and -r may be specified")

        faultProberInterface.getFaultProber().initializeProber(
            gpEnv.getMasterPort())

        confProvider = configInterface.getConfigurationProvider(
        ).initializeProvider(gpEnv.getMasterPort())

        gpArray = confProvider.loadSystemConfig(useUtilityMode=False)

        if not gpArray.hasMirrors:
            raise ExceptionNoStackTraceNeeded(
                'GPDB Mirroring replication is not configured for this Greenplum Database instance.'
            )

        # We have phys-rep/filerep mirrors.

        if self.__options.newRecoverHosts is not None:
            try:
                uniqueHosts = []
                for h in self.__options.newRecoverHosts.split(','):
                    if h.strip() not in uniqueHosts:
                        uniqueHosts.append(h.strip())
                self.__options.newRecoverHosts = uniqueHosts
            except Exception as ex:
                raise ProgramArgumentValidationException( \
                    "Invalid value for recover hosts: %s" % ex)

        # If it's a rebalance operation, make sure we are in an acceptable state to do that
        # Acceptable state is:
        #    - No segments down
        #    - No segments in change tracking or unsynchronized state
        if self.__options.rebalanceSegments:
            if len(gpArray.get_invalid_segdbs()) > 0:
                raise Exception(
                    "Down segments still exist.  All segments must be up to rebalance."
                )
            if len(gpArray.get_synchronized_segdbs()) != len(
                    gpArray.getSegDbList()):
                raise Exception(
                    "Some segments are not yet synchronized.  All segments must be synchronized to rebalance."
                )

        # retain list of hosts that were existing in the system prior to getRecoverActions...
        # this will be needed for later calculations that determine whether
        # new hosts were added into the system
        existing_hosts = set(gpArray.getHostList())

        # figure out what needs to be done
        mirrorBuilder = self.getRecoveryActionsBasedOnOptions(gpEnv, gpArray)

        if self.__options.outputSampleConfigFile is not None:
            # just output config file and done
            self.outputToFile(mirrorBuilder, gpArray,
                              self.__options.outputSampleConfigFile)
            self.logger.info('Configuration file output to %s successfully.' %
                             self.__options.outputSampleConfigFile)
        elif self.__options.rebalanceSegments:
            assert (isinstance(mirrorBuilder, GpSegmentRebalanceOperation))

            # Make sure we have work to do
            if len(gpArray.get_unbalanced_segdbs()) == 0:
                self.logger.info(
                    "No segments are running in their non-preferred role and need to be rebalanced."
                )
            else:
                self.displayRecovery(mirrorBuilder, gpArray)

                if self.__options.interactive:
                    self.logger.warn(
                        "This operation will cancel queries that are currently executing."
                    )
                    self.logger.warn(
                        "Connections to the database however will not be interrupted."
                    )
                    if not userinput.ask_yesno(
                            None,
                            "\nContinue with segment rebalance procedure",
                            'N'):
                        raise UserAbortedException()

                fullRebalanceDone = mirrorBuilder.rebalance()
                self.logger.info(
                    "******************************************************************"
                )
                if fullRebalanceDone:
                    self.logger.info(
                        "The rebalance operation has completed successfully.")
                else:
                    self.logger.info(
                        "The rebalance operation has completed with WARNINGS."
                        " Please review the output in the gprecoverseg log.")
                self.logger.info(
                    "There is a resynchronization running in the background to bring all"
                )
                self.logger.info("segments in sync.")
                self.logger.info(
                    "Use gpstate -e to check the resynchronization progress.")
                self.logger.info(
                    "******************************************************************"
                )

        elif len(mirrorBuilder.getMirrorsToBuild()) == 0:
            self.logger.info('No segments to recover')
        else:
            mirrorBuilder.checkForPortAndDirectoryConflicts(gpArray)
            self.validate_heap_checksum_consistency(gpArray, mirrorBuilder)

            self.displayRecovery(mirrorBuilder, gpArray)
            self.__displayRecoveryWarnings(mirrorBuilder)

            if self.__options.interactive:
                if not userinput.ask_yesno(
                        None, "\nContinue with segment recovery procedure",
                        'N'):
                    raise UserAbortedException()

            # sync packages
            current_hosts = set(gpArray.getHostList())
            new_hosts = current_hosts - existing_hosts
            if new_hosts:
                self.syncPackages(new_hosts)

            if not mirrorBuilder.buildMirrors("recover", gpEnv, gpArray):
                sys.exit(1)

            self.trigger_fts_probe(port=gpEnv.getMasterPort())

            self.logger.info(
                "******************************************************************"
            )
            self.logger.info("Updating segments for streaming is completed.")
            self.logger.info(
                "For segments updated successfully, streaming will continue in the background."
            )
            self.logger.info(
                "Use  gpstate -s  to check the streaming progress.")
            self.logger.info(
                "******************************************************************"
            )

        sys.exit(0)
Beispiel #8
0
    def run(self):
        if self.__options.parallelDegree < 1 or self.__options.parallelDegree > 64:
            raise ProgramArgumentValidationException(
                "Invalid parallelDegree provided with -B argument: %d" %
                self.__options.parallelDegree)

        self.__pool = base.WorkerPool(self.__options.parallelDegree)
        gpEnv = GpMasterEnvironment(self.__options.masterDataDirectory, True)

        faultProberInterface.getFaultProber().initializeProber(
            gpEnv.getMasterPort())
        confProvider = configInterface.getConfigurationProvider(
        ).initializeProvider(gpEnv.getMasterPort())
        gpArray = confProvider.loadSystemConfig(useUtilityMode=False)

        # check that heap_checksums is consistent across cluster, fail immediately if not
        self.validate_heap_checksums(gpArray)

        self.checkMirrorOffset(gpArray)

        # check that we actually have mirrors
        if gpArray.hasMirrors:
            raise ExceptionNoStackTraceNeeded( \
                "GPDB physical mirroring cannot be added.  The cluster is already configured with Mirrors.")

        # figure out what needs to be done (AND update the gpArray!)
        mirrorBuilder = self.__getMirrorsToBuildBasedOnOptions(gpEnv, gpArray)
        mirrorBuilder.checkForPortAndDirectoryConflicts(gpArray)

        if self.__options.outputSampleConfigFile is not None:
            # just output config file and done
            self.__outputToFile(mirrorBuilder,
                                self.__options.outputSampleConfigFile, gpArray)
            logger.info('Configuration file output to %s successfully.' %
                        self.__options.outputSampleConfigFile)
        else:
            self.__displayAddMirrors(gpEnv, mirrorBuilder, gpArray)
            if self.__options.interactive:
                if not userinput.ask_yesno(
                        None, "\nContinue with add mirrors procedure", 'N'):
                    raise UserAbortedException()

            config_primaries_for_replication(gpArray,
                                             self.__options.hba_hostnames)
            if not mirrorBuilder.buildMirrors("add", gpEnv, gpArray):
                return 1

            logger.info(
                "******************************************************************"
            )
            logger.info(
                "Mirror segments have been added; data synchronization is in progress."
            )
            logger.info(
                "Data synchronization will continue in the background.")
            logger.info(
                "Use  gpstate -s  to check the resynchronization progress.")
            logger.info(
                "******************************************************************"
            )

        return 0  # success -- exit code 0!
    def loadTargetSegments(self):

        targetHost = self.options.targetHost
        targetRole = self.options.targetRole
        targetRegistrationOrder = self.options.targetRegistrationOrder

        if targetHost is None and targetRegistrationOrder is None:
            raise ProgramArgumentValidationException(\
                            "neither --host nor --registration_order is specified.  " \
                            "Exactly one should be specified.")
        if targetHost is not None and targetRegistrationOrder is not None:
            raise ProgramArgumentValidationException(\
                            "both --host and --registration_order are specified.  " \
                            "Exactly one should be specified.")
        if targetHost is not None and targetRole is None:
            raise ProgramArgumentValidationException(\
                            "--role is not specified when --host is specified.  " \
                            "Role is required when targeting a host.")
        if targetRegistrationOrder is not None and targetRole is not None:
            raise ProgramArgumentValidationException(\
                            "--role is specified when --registration_order is specified.  " \
                            "Role should not be specified when targeting a single registration_order.")

        #
        # load from master db
        #
        masterPort = self.options.masterPort
        if masterPort is None:
            gpEnv = GpMasterEnvironment(self.options.masterDataDirectory,
                                        False)
            masterPort = gpEnv.getMasterPort()
        conf = configurationInterface.getConfigurationProvider(
        ).initializeProvider(masterPort)
        hawqArray = conf.loadSystemConfig(useUtilityMode=True)
        hawqdbs = hawqArray.getDbList()

        #
        # prune gpArray according to filter settings
        #
        hawqdbs = [hdb for hdb in hawqdbs if hdb.isSegment()]
        if targetHost is not None and targetHost != "ALL":
            hawqdbs = [
                hdb for hdb in hawqdbs if hdb.getHostName() == targetHost
            ]

        if targetRegistrationOrder is not None:
            hawqdbs = gpArray.getDbList()
            regorder = int(targetRegistrationOrder)
            hawqdbs = [
                hdb for hdb in hawqdbs
                if hdb.getRegistrationOrder() == regorder
            ]

        if targetRole is not None:
            hawqdbs = [
                hdb for hdb in hawqdbs if self.isMatchingRole(targetRole, hdb)
            ]

        # only DOWN segments remaining?  Error out
        downhawqdbs = [hdb for hdb in hawqdbs if hdb.getStatus() != 'u']
        if len(downhawqdbs) > 0:
            downhawqdbStr = "\n     Down Segment: "
            raise ExceptionNoStackTraceNeeded(
                "Unable to inject fault.  At least one segment is marked as down in the database.%s%s"
                %
                (downhawqdbStr,
                 downhawqdbStr.join([str(downhdb)
                                     for downhdb in downhawqdbs])))

        print "### DEBUG: loadTargetSegments"
        print "### DEBUG: HAWQDBS "
        print hawqdbs
        return hawqdbs