Пример #1
0
    def run_cmd_list(self, cmd_list, logger, options, pool):
        for cmd in cmd_list:
            pool.addCommand(cmd)
        pool.join()

        errors = []
        for item in pool.getCompletedItems():
            if not item.get_results().wasSuccessful():
                err_str = item.get_results().stderr
                try:
                    error_obj = json.loads(err_str)
                except ValueError:
                    #TODO Do we need this except and can we set dbid to None or should we rely on item.recovery_info?
                    error_obj = recoveryinfo.RecoveryError(
                        recoveryinfo.RecoveryErrorType.DEFAULT_ERROR, err_str,
                        None, None, None, None)
                errors.append(error_obj)
        if not errors:
            sys.exit(0)

        str_error = recoveryinfo.serialize_list(errors)
        print(str_error, file=sys.stderr)
        if options.verbose:
            logger.exception(str_error)
        logger.error(str_error)
        sys.exit(1)
Пример #2
0
 def _do_setup_for_recovery(self, recovery_info_by_host):
     self.__logger.info('Setting up the required segments for recovery')
     cmds = []
     for host_name, recovery_info_list in recovery_info_by_host.items():
         cmds.append(
             gp.GpSegSetupRecovery(
                 'Run validation checks and setup data directories for recovery',
                 recoveryinfo.serialize_list(recovery_info_list),
                 gplog.get_logger_dir(),
                 verbose=gplog.logging_is_verbose(),
                 batchSize=self.__parallelPerHost,
                 remoteHost=host_name,
                 forceoverwrite=self.__forceoverwrite))
     for cmd in cmds:
         self.__pool.addCommand(cmd)
     if self.__quiet:
         self.__pool.join()
     else:
         base.join_and_indicate_progress(self.__pool)
     completed_results = self.__pool.getCompletedItems()
     self.__pool.empty_completed_items()
     return completed_results
Пример #3
0
    def _do_recovery(self, recovery_info_by_host, gpEnv):
        """
        # Recover and start segments using gpsegrecovery, which will internally call either
        # pg_basebackup or pg_rewind. gprecoverseg generates a log filename which is
        # passed to gpsegrecovery using the confinfo parameter. gprecoverseg
        # tails this file to show recovery progress to the user, and removes the
        # file when done. A new file is generated for each run of gprecoverseg
        # based on a timestamp.
        :param gpEnv:
        :param recovery_info_by_host:
        :return:
        """
        self.__logger.info(
            'Initiating segment recovery. Upon completion, will start the successfully recovered segments'
        )
        cmds = []
        progress_cmds = []
        era = read_era(gpEnv.getCoordinatorDataDir(), logger=self.__logger)
        for hostName, recovery_info_list in recovery_info_by_host.items():
            for ri in recovery_info_list:
                progressCmd = self._get_progress_cmd(ri.progress_file,
                                                     ri.target_segment_dbid,
                                                     hostName)
                if progressCmd:
                    progress_cmds.append(progressCmd)

            cmds.append(
                gp.GpSegRecovery(
                    'Recover segments',
                    recoveryinfo.serialize_list(recovery_info_list),
                    gplog.get_logger_dir(),
                    verbose=gplog.logging_is_verbose(),
                    batchSize=self.__parallelPerHost,
                    remoteHost=hostName,
                    era=era,
                    forceoverwrite=self.__forceoverwrite))
        completed_recovery_results = self.__runWaitAndCheckWorkerPoolForErrorsAndClear(
            cmds, suppressErrorCheck=True, progressCmds=progress_cmds)
        return completed_recovery_results