コード例 #1
0
    def test_both_seg_unreachable(self, mock_entries, mock_update):
        pair0, pair1 = self.gparray.getSegmentList()
        pair0.primaryDB.unreachable = True
        pair1.primaryDB.unreachable = True
        os.environ["GPHOME"] = "/usr/local/gpdb"
        expected_batch_size = 16
        update_pg_hba_on_segments(self.gparray, False, expected_batch_size)

        self.logger.info.assert_any_call(
            "None of the reachable segments require update to pg_hba.conf")
        self.assertEqual(mock_update.call_count, 0)
コード例 #2
0
    def test_both_seg_unreachable(self, mock_entries, mock_update):
        pair0, pair1 = self.gparray.getSegmentList()
        pair0.primaryDB.unreachable = True
        pair1.primaryDB.unreachable = True
        os.environ["GPHOME"] = "/usr/local/gpdb"
        expected_batch_size = 16
        update_pg_hba_on_segments(self.gparray, False, expected_batch_size)

        self.logger.warning.assert_any_call(
            "Not updating pg_hba.conf for segments on unreachable hosts: sdw1, sdw2."
            "You can manually update pg_hba.conf once you make the hosts reachable."
        )
        self.logger.info.assert_any_call(
            "None of the reachable segments require update to pg_hba.conf")
        self.assertEqual(mock_update.call_count, 0)
コード例 #3
0
    def test_one_seg_to_update(self, mock_entries, mock_update):
        pair0, pair1 = self.gparray.getSegmentList()
        os.environ["GPHOME"] = "/usr/local/gpdb"
        expected_batch_size = 16
        update_pg_hba_on_segments(self.gparray, False, expected_batch_size,
                                  [0])

        self.assertEqual(mock_update.call_count, 1)
        mock_call_args = mock_update.call_args[0]

        result_cmds = mock_call_args[0]
        result_batch_size = mock_call_args[1]

        expected_string = "$GPHOME/sbin/seg_update_pg_hba.py --data-dir /data/primary0 --entries 'entry0\nentry1'"

        self.assertEqual(len(result_cmds), 1)
        self.assertEqual(result_batch_size, expected_batch_size)
        self.assertEqual(result_cmds[0].cmdStr, expected_string)
コード例 #4
0
    def test_update_pg_hba_on_segments_updated_successfully_all_failed_segments(
            self, mock_entry, mock_update):
        os.environ["GPHOME"] = "/usr/local/gpdb"
        expected_batch_size = 16
        update_pg_hba_on_segments(self.gparray, False, expected_batch_size)
        self.logger.info.assert_any_call(
            "Starting to create new pg_hba.conf on primary segments")
        self.logger.info.assert_any_call(
            "Successfully modified pg_hba.conf on primary segments to allow replication connections"
        )

        self.assertEqual(mock_update.call_count, 1)
        mock_call_args = mock_update.call_args[0]

        result_cmds = mock_call_args[0]
        result_batch_size = mock_call_args[1]

        expected_string0 = "$GPHOME/sbin/seg_update_pg_hba.py --data-dir /data/primary0 --entries 'entry0\nentry1'"
        expected_string1 = "$GPHOME/sbin/seg_update_pg_hba.py --data-dir /data/primary1 --entries 'entry1\nentry2'"

        self.assertEqual(len(result_cmds), 2)
        self.assertEqual(result_batch_size, expected_batch_size)
        self.assertEqual(result_cmds[0].cmdStr, expected_string0)
        self.assertEqual(result_cmds[1].cmdStr, expected_string1)
コード例 #5
0
    def run(self):
        if self.__options.batch_size < 1 or self.__options.batch_size > gp.MAX_COORDINATOR_NUM_WORKERS:
            raise ProgramArgumentValidationException(
                "Invalid batch_size provided with -B argument: %d" %
                self.__options.batch_size)
        if self.__options.segment_batch_size < 1 or self.__options.segment_batch_size > gp.MAX_SEGHOST_NUM_WORKERS:
            raise ProgramArgumentValidationException(
                "Invalid segment_batch_size provided with -b argument: %d" %
                self.__options.segment_batch_size)

        self.__pool = base.WorkerPool(self.__options.batch_size)
        gpEnv = GpCoordinatorEnvironment(
            self.__options.coordinatorDataDirectory, True)

        faultProberInterface.getFaultProber().initializeProber(
            gpEnv.getCoordinatorPort())
        confProvider = configInterface.getConfigurationProvider(
        ).initializeProvider(gpEnv.getCoordinatorPort())
        gpArray = confProvider.loadSystemConfig(useUtilityMode=False)

        # check that heap_checksums is consistent across cluster, fail immediately if not
        self.validate_heap_checksums(gpArray)

        if self.__options.mirrorConfigFile is None:
            self.checkMirrorOffset(gpArray)

        # check that we actually have mirrors
        if gpArray.hasMirrors:
            raise ExceptionNoStackTraceNeeded( \
                "GPDB physical mirroring cannot be added.  The cluster is already configured with Mirrors.")

        # figure out what needs to be done (AND update the gpArray!)
        mirrorBuilder = self.__getMirrorsToBuildBasedOnOptions(gpEnv, gpArray)
        mirrorBuilder.checkForPortAndDirectoryConflicts(gpArray)

        if self.__options.outputSampleConfigFile is not None:
            # just output config file and done
            self.__outputToFile(mirrorBuilder,
                                self.__options.outputSampleConfigFile, gpArray)
            logger.info('Configuration file output to %s successfully.' %
                        self.__options.outputSampleConfigFile)
        else:
            self.__displayAddMirrors(gpEnv, mirrorBuilder, gpArray)
            if self.__options.interactive:
                if not userinput.ask_yesno(
                        None, "\nContinue with add mirrors procedure", 'N'):
                    raise UserAbortedException()

            update_pg_hba_on_segments(gpArray, self.__options.hba_hostnames,
                                      self.__options.batch_size)
            if not mirrorBuilder.buildMirrors("add", gpEnv, gpArray):
                return 1

            logger.info(
                "******************************************************************"
            )
            logger.info(
                "Mirror segments have been added; data synchronization is in progress."
            )
            logger.info(
                "Data synchronization will continue in the background.")
            logger.info(
                "Use  gpstate -s  to check the resynchronization progress.")
            logger.info(
                "******************************************************************"
            )

        return 0  # success -- exit code 0!
コード例 #6
0
ファイル: clsRecoverSegment.py プロジェクト: petersky/gpdb
    def run(self):
        if self.__options.parallelDegree < 1 or self.__options.parallelDegree > gp.MAX_COORDINATOR_NUM_WORKERS:
            raise ProgramArgumentValidationException(
                "Invalid parallelDegree value provided with -B argument: %d" % self.__options.parallelDegree)
        if self.__options.parallelPerHost < 1 or self.__options.parallelPerHost > gp.MAX_SEGHOST_NUM_WORKERS:
            raise ProgramArgumentValidationException(
                "Invalid parallelPerHost value provided with -b argument: %d" % self.__options.parallelPerHost)

        self.__pool = WorkerPool(self.__options.parallelDegree)
        gpEnv = GpCoordinatorEnvironment(self.__options.coordinatorDataDirectory, True)

        # verify "where to recover" options
        optionCnt = 0
        if self.__options.newRecoverHosts is not None:
            optionCnt += 1
        if self.__options.recoveryConfigFile is not None:
            optionCnt += 1
        if self.__options.rebalanceSegments:
            optionCnt += 1
        if optionCnt > 1:
            raise ProgramArgumentValidationException("Only one of -i, -p, and -r may be specified")

        faultProberInterface.getFaultProber().initializeProber(gpEnv.getCoordinatorPort())

        confProvider = configInterface.getConfigurationProvider().initializeProvider(gpEnv.getCoordinatorPort())

        gpArray = confProvider.loadSystemConfig(useUtilityMode=False)

        if not gpArray.hasMirrors:
            raise ExceptionNoStackTraceNeeded(
                'GPDB Mirroring replication is not configured for this Greenplum Database instance.')

        num_workers = min(len(gpArray.get_hostlist()), self.__options.parallelDegree)
        hosts = set(gpArray.get_hostlist(includeCoordinator=False))
        unreachable_hosts = get_unreachable_segment_hosts(hosts, num_workers)
        update_unreachable_flag_for_segments(gpArray, unreachable_hosts)

        # We have phys-rep/filerep mirrors.

        if self.__options.newRecoverHosts is not None:
            try:
                uniqueHosts = []
                for h in self.__options.newRecoverHosts.split(','):
                    if h.strip() not in uniqueHosts:
                        uniqueHosts.append(h.strip())
                self.__options.newRecoverHosts = uniqueHosts
            except Exception as ex:
                raise ProgramArgumentValidationException( \
                    "Invalid value for recover hosts: %s" % ex)

        # retain list of hosts that were existing in the system prior to getRecoverActions...
        # this will be needed for later calculations that determine whether
        # new hosts were added into the system
        existing_hosts = set(gpArray.getHostList())

        # figure out what needs to be done
        mirrorBuilder = self.getRecoveryActionsBasedOnOptions(gpEnv, gpArray)

        if self.__options.outputSampleConfigFile is not None:
            # just output config file and done
            self.outputToFile(mirrorBuilder, gpArray, self.__options.outputSampleConfigFile)
            self.logger.info('Configuration file output to %s successfully.' % self.__options.outputSampleConfigFile)
        elif self.__options.rebalanceSegments:
            assert (isinstance(mirrorBuilder, GpSegmentRebalanceOperation))

            # Make sure we have work to do
            if len(gpArray.get_unbalanced_segdbs()) == 0:
                self.logger.info("No segments are running in their non-preferred role and need to be rebalanced.")
            else:
                self.displayRecovery(mirrorBuilder, gpArray)

                if self.__options.interactive:
                    self.logger.warn("This operation will cancel queries that are currently executing.")
                    self.logger.warn("Connections to the database however will not be interrupted.")
                    if not userinput.ask_yesno(None, "\nContinue with segment rebalance procedure", 'N'):
                        raise UserAbortedException()

                fullRebalanceDone = mirrorBuilder.rebalance()
                self.logger.info("******************************************************************")
                if fullRebalanceDone:
                    self.logger.info("The rebalance operation has completed successfully.")
                else:
                    self.logger.info("The rebalance operation has completed with WARNINGS."
                                     " Please review the output in the gprecoverseg log.")
                self.logger.info("******************************************************************")

        elif len(mirrorBuilder.getMirrorsToBuild()) == 0:
            self.logger.info('No segments to recover')
        else:
            #TODO this already happens in buildMirrors function
            mirrorBuilder.checkForPortAndDirectoryConflicts(gpArray)
            self.validate_heap_checksum_consistency(gpArray, mirrorBuilder)

            self.displayRecovery(mirrorBuilder, gpArray)
            self.__displayRecoveryWarnings(mirrorBuilder)

            if self.__options.interactive:
                if not userinput.ask_yesno(None, "\nContinue with segment recovery procedure", 'N'):
                    raise UserAbortedException()

            # sync packages
            current_hosts = set(gpArray.getHostList())
            new_hosts = current_hosts - existing_hosts
            if new_hosts:
                self.syncPackages(new_hosts)

            contentsToUpdate = [seg.getLiveSegment().getSegmentContentId() for seg in mirrorBuilder.getMirrorsToBuild()]
            update_pg_hba_on_segments(gpArray, self.__options.hba_hostnames, self.__options.parallelDegree, contentsToUpdate)
            if not mirrorBuilder.recover_mirrors(gpEnv, gpArray):
                self.logger.error("gprecoverseg failed. Please check the output for more details.")
                sys.exit(1)

            self.logger.info("********************************")
            self.logger.info("Segments successfully recovered.")
            self.logger.info("********************************")

            self.logger.info("Recovered mirror segments need to sync WAL with primary segments.")
            self.logger.info("Use 'gpstate -e' to check progress of WAL sync remaining bytes")

        sys.exit(0)