def create_parser(): parser = OptParser(option_class=OptChecker, description="Greenplum Package Manager", version='%prog version $Revision: #1 $') parser.setHelp([]) addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True) parser.remove_option('-q') parser.remove_option('-l') add_to = OptionGroup(parser, 'General Options') parser.add_option_group(add_to) addMasterDirectoryOptionForSingleClusterProgram(add_to) # TODO: AK: Eventually, these options may need to be flexible enough to accept mutiple packages # in one invocation. If so, the structure of this parser may need to change. add_to.add_option('-i', '--install', help='install the given gppkg', metavar='<package>') add_to.add_option('-u', '--update', help='update the given gppkg', metavar='<package>') add_to.add_option('-r', '--remove', help='remove the given gppkg', metavar='<name>-<version>') add_to.add_option('-q', '--query', help='query the gppkg database or a particular gppkg', action='store_true') add_to.add_option('-b', '--build', help='build a gppkg', metavar='<directory>') add_to.add_option('-c', '--clean', help='clean the cluster of the given gppkg', action='store_true') add_to.add_option('--migrate', help='migrate gppkgs from a separate $GPHOME', metavar='<from_gphome> <to_gphome>', action='store_true', default=False) add_to = OptionGroup(parser, 'Query Options') parser.add_option_group(add_to) add_to.add_option('--info', action='store_true', help='print information about the gppkg including name, version, description') add_to.add_option('--list', action='store_true', help='print all the files present in the gppkg') add_to.add_option('--all', action='store_true', help='print all the gppkgs installed by gppkg') return parser
def createParser(): parser = OptParser( option_class=OptChecker, description="Gets status from segments on a single host " "using a transition message. Internal-use only.", version="%prog version $Revision: #1 $", ) parser.setHelp([]) addStandardLoggingAndHelpOptions(parser, True) addTo = parser addTo.add_option( "-s", None, type="string", dest="statusQueryRequests", metavar="<statusQueryRequests>", help="Status Query Message", ) addTo.add_option( "-D", "--dblist", type="string", action="append", dest="dirList", metavar="<dirList>", help="Directory List" ) parser.set_defaults() return parser
def parseargs(): parser = OptParser(option_class=OptChecker) parser.setHelp(_help) parser.remove_option('-h') parser.add_option('-h', '-?', '--help', action='help', help='show this help message and exit') parser.add_option('--entry', type='string') parser.add_option('--value', type='string') parser.add_option('--removeonly', action='store_true') parser.set_defaults(removeonly=False) # Parse the command line arguments (options, args) = parser.parse_args() # sanity check if not options.entry: print "--entry is required" sys.exit(1) if (not options.value) and (not options.removeonly): print "Select either --value or --removeonly" sys.exit(1) return options
def createParser(): parser = OptParser( option_class=OptChecker, description="Gets status from segments on a single host " "using a transition message. Internal-use only.", version='%prog version $Revision: #1 $') parser.setHelp([]) addStandardLoggingAndHelpOptions(parser, True) addTo = parser addTo.add_option("-s", None, type="string", dest="statusQueryRequests", metavar="<statusQueryRequests>", help="Status Query Message") addTo.add_option("-D", "--dblist", type="string", action="append", dest="dirList", metavar="<dirList>", help="Directory List") parser.set_defaults() return parser
def create_parser(): """Create the command line parser object for gpkill""" help = [] parser = OptParser( option_class=OptChecker, description='Check or Terminate a Greenplum Database process.', version='%prog version $Revision: #1 $') parser.setHelp(help) addStandardLoggingAndHelpOptions(parser, True) parser.remove_option('-l') parser.remove_option('-a') addTo = OptionGroup(parser, 'Check Options') parser.add_option_group(addTo) addTo.add_option( '--check', metavar='pid', help= 'Only returns status 0 if pid may be killed without gpkill, status 1 otherwise.', action='store_true') return parser
def createParser(): parser = OptParser(option_class=OptChecker, description=' '.join(description.split()), version='%prog version $Revision: #12 $') parser.setHelp([]) addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=False) parser.add_option("-D", "--db", dest="dblist", action="append", type="string") parser.add_option("-V", "--gp-version", dest="gpversion", metavar="GP_VERSION", help="expected software version") parser.add_option( "-m", "--mode", dest="mode", metavar="<MODE>", help="how to shutdown. modes are smart,fast, or immediate") parser.add_option("-t", "--timeout", dest="timeout", type="int", default=SEGMENT_STOP_TIMEOUT_DEFAULT, help="seconds to wait") return parser
def createParser(): description = (""" Clean segment directories. """) help = [""" To be used internally only. """] parser = OptParser(option_class=OptChecker, description=' '.join(description.split()), version='%prog version $Revision: #1 $') parser.setHelp(help) addStandardLoggingAndHelpOptions(parser, True) addTo = OptionGroup(parser, "Clean Segment Options") parser.add_option_group(addTo) addTo.add_option('-p', None, dest="pickledArguments", type='string', default=None, metavar="<pickledArguments>", help="The arguments passed from the original script") parser.set_defaults() return parser
def createParser(): """ Create parser expected by simple_main """ parser = OptParser(option_class=OptChecker, description=' '.join(DESCRIPTION.split()), version='%prog version main build dev') parser.setHelp(HELP) # # Note that this mirroringmode parameter should only be either mirrorless or quiescent. # If quiescent then it is implied that there is pickled transition data that will be # provided (using -p) to immediately convert to a primary or a mirror. # addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=False) parser.add_option("-C", "--collation", type="string", help="values for lc_collate, lc_monetary, lc_numeric separated by :") parser.add_option("-D", "--dblist", dest="dblist", action="append", type="string") parser.add_option("-M", "--mirroringmode", dest="mirroringMode", type="string") parser.add_option("-p", "--pickledTransitionData", dest="pickledTransitionData", type="string") parser.add_option("-V", "--gp-version", dest="gpversion", metavar="GP_VERSION", help="expected software version") parser.add_option("-n", "--numsegments", dest="num_cids", help="number of distinct content ids in cluster") parser.add_option("", "--era", dest="era", help="master era") parser.add_option("-t", "--timeout", dest="timeout", type="int", default=gp.SEGMENT_TIMEOUT_DEFAULT, help="seconds to wait") parser.add_option('-U', '--specialMode', type='choice', choices=['upgrade', 'maintenance'], metavar='upgrade|maintenance', action='store', default=None, help='start the instance in upgrade or maintenance mode') parser.add_option('', '--wrapper', dest="wrapper", default=None, type='string') parser.add_option('', '--wrapper-args', dest="wrapper_args", default=None, type='string') return parser
def parseargs(): parser = OptParser(option_class=OptChecker , description=' '.join(DESCRIPTION.split()) , version='%prog version $Revision: #12 $' ) parser.setHelp(_help) parser.set_usage('%prog ' + _usage) parser.remove_option('-h') parser.add_option('-f', '--file', default='', help='the name of a file containing the re-sync file list.') parser.add_option('-v', '--verbose', action='store_true', help='debug output.', default=False) parser.add_option('-h', '-?', '--help', action='help', help='show this help message and exit.', default=False) parser.add_option('--usage', action="briefhelp") parser.add_option('-d', '--master_data_directory', type='string', dest="masterDataDirectory", metavar="<master data directory>", help="Optional. The master host data directory. If not specified, the value set for $MASTER_DATA_DIRECTORY will be used.", default=get_masterdatadir() ) parser.add_option('-a', help='don\'t ask to confirm repairs', dest='confirm', default=True, action='store_false') """ Parse the command line arguments """ (options, args) = parser.parse_args() if len(args) > 0: logger.error('Unknown argument %s' % args[0]) parser.exit() return options, args
def createParser(): """ Create parser expected by simple_main """ parser = OptParser(option_class=OptChecker, description=' '.join(DESCRIPTION.split()), version='%prog version main build dev') parser.setHelp(HELP) # # Note that this mirroringmode parameter should only be either mirrorless or quiescent. # If quiescent then it is implied that there is pickled transition data that will be # provided (using -p) to immediately convert to a primary or a mirror. # addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=False) parser.add_option("-D", "--dblist", dest="dblist", action="append", type="string") parser.add_option("-M", "--mirroringmode", dest="mirroringMode", type="string") parser.add_option("-p", "--pickledTransitionData", dest="pickledTransitionData", type="string") parser.add_option("-V", "--gp-version", dest="gpversion", metavar="GP_VERSION", help="expected software version") parser.add_option("-n", "--numsegments", dest="num_cids", help="number of distinct content ids in cluster") parser.add_option("", "--era", dest="era", help="master era") parser.add_option("-t", "--timeout", dest="timeout", type="int", default=gp.SEGMENT_TIMEOUT_DEFAULT, help="seconds to wait") parser.add_option('-U', '--specialMode', type='choice', choices=['upgrade', 'maintenance'], metavar='upgrade|maintenance', action='store', default=None, help='start the instance in upgrade or maintenance mode') parser.add_option('', '--wrapper', dest="wrapper", default=None, type='string') parser.add_option('', '--wrapper-args', dest="wrapper_args", default=None, type='string') parser.add_option('', '--master-checksum-version', dest="master_checksum_version", default=None, type='string', action="store") parser.add_option('-B', '--parallel', type="int", dest="parallel", default=gp.DEFAULT_GPSTART_NUM_WORKERS, help='maximum size of a threadpool to start segments') return parser
def parseargs(): parser = OptParser(option_class=OptChecker) parser.setHelp(_help) parser.remove_option('-h') parser.add_option('-h', '-?', '--help', action='help', help='show this help message and exit') parser.add_option('--file', type='string', help='Required: The absolute path of postgresql.conf') parser.add_option( '--add-parameter', type='string', help='The configuration parameter to add. --value is required.') parser.add_option( '--value', type='string', help='The configuration value to add when using --add-parameter.') parser.add_option('--get-parameter', type='string', help='The configuration parameter value to return.') parser.add_option('--remove-parameter', type='string', help='The configuration parameter value to disable.') (options, args) = parser.parse_args() return validate_args(options)
def create_parser(): parser = OptParser(option_class=OptChecker, description="Greenplum Package Manager", version='%prog version $Revision: #1 $') parser.setHelp([]) addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True) parser.remove_option('-q') parser.remove_option('-l') add_to = OptionGroup(parser, 'General Options') parser.add_option_group(add_to) addMasterDirectoryOptionForSingleClusterProgram(add_to) # TODO: AK: Eventually, these options may need to be flexible enough to accept mutiple packages # in one invocation. If so, the structure of this parser may need to change. add_to.add_option('-i', '--install', help='install the given gppkg', metavar='<package>') add_to.add_option('-u', '--update', help='update the given gppkg', metavar='<package>') add_to.add_option('-r', '--remove', help='remove the given gppkg', metavar='<name>-<version>') add_to.add_option('-q', '--query', help='query the gppkg database or a particular gppkg', action='store_true') add_to.add_option('-b', '--build', help='build a gppkg', metavar='<directory>') add_to.add_option('-c', '--clean', help='clean the cluster of the given gppkg', action='store_true') add_to.add_option('--migrate', help='migrate gppkgs from a separate $GPHOME', metavar='<from_gphome> <to_gphome>', action='store_true', default=False) add_to.add_option('-f', '--filename', help='set specific package name', metavar='<name>') add_to = OptionGroup(parser, 'Query Options') parser.add_option_group(add_to) add_to.add_option('--info', action='store_true', help='print information about the gppkg including name, version, description') add_to.add_option('--list', action='store_true', help='print all the files present in the gppkg') add_to.add_option('--all', action='store_true', help='print all the gppkgs installed by gppkg') return parser
def createParser(): description = ("Recover a failed segment") help = [""] parser = OptParser(option_class=OptChecker, description=' '.join(description.split()), version='%prog version $Revision$') parser.setHelp(help) loggingGroup = addStandardLoggingAndHelpOptions(parser, True) loggingGroup.add_option("-s", None, default=None, action='store_false', dest='showProgressInplace', help='Show pg_basebackup/pg_rewind progress sequentially instead of inplace') loggingGroup.add_option("--no-progress", dest="showProgress", default=True, action="store_false", help="Suppress pg_basebackup/pg_rewind progress output") addTo = OptionGroup(parser, "Connection Options") parser.add_option_group(addTo) addCoordinatorDirectoryOptionForSingleClusterProgram(addTo) addTo = OptionGroup(parser, "Recovery Source Options") parser.add_option_group(addTo) addTo.add_option("-i", None, type="string", dest="recoveryConfigFile", metavar="<configFile>", help="Recovery configuration file") addTo.add_option("-o", None, dest="outputSampleConfigFile", metavar="<configFile>", type="string", help="Sample configuration file name to output; " "this file can be passed to a subsequent call using -i option") addTo = OptionGroup(parser, "Recovery Destination Options") parser.add_option_group(addTo) addTo.add_option("-p", None, type="string", dest="newRecoverHosts", metavar="<targetHosts>", help="Spare new hosts to which to recover segments") addTo = OptionGroup(parser, "Recovery Options") parser.add_option_group(addTo) addTo.add_option('-F', None, default=False, action='store_true', dest="forceFullResynchronization", metavar="<forceFullResynchronization>", help="Force full segment resynchronization") addTo.add_option("-B", None, type="int", default=16, dest="parallelDegree", metavar="<parallelDegree>", help="Max # of workers to use for building recovery segments. [default: %default]") addTo.add_option("-r", None, default=False, action='store_true', dest='rebalanceSegments', help='Rebalance synchronized segments.') addTo.add_option('', '--hba-hostnames', action='store_true', dest='hba_hostnames', help='use hostnames instead of CIDR in pg_hba.conf') parser.set_defaults() return parser
def createParser(): description = ("Recover a failed segment") help = [""] parser = OptParser(option_class=OptChecker, description=' '.join(description.split()), version='%prog version $Revision$') parser.setHelp(help) loggingGroup = addStandardLoggingAndHelpOptions(parser, True) loggingGroup.add_option("-s", None, default=None, action='store_false', dest='showProgressInplace', help='Show pg_basebackup progress sequentially instead of inplace') loggingGroup.add_option("--no-progress", dest="showProgress", default=True, action="store_false", help="Suppress pg_basebackup progress output") addTo = OptionGroup(parser, "Connection Options") parser.add_option_group(addTo) addMasterDirectoryOptionForSingleClusterProgram(addTo) addTo = OptionGroup(parser, "Recovery Source Options") parser.add_option_group(addTo) addTo.add_option("-i", None, type="string", dest="recoveryConfigFile", metavar="<configFile>", help="Recovery configuration file") addTo.add_option("-o", None, dest="outputSampleConfigFile", metavar="<configFile>", type="string", help="Sample configuration file name to output; " "this file can be passed to a subsequent call using -i option") addTo = OptionGroup(parser, "Recovery Destination Options") parser.add_option_group(addTo) addTo.add_option("-p", None, type="string", dest="newRecoverHosts", metavar="<targetHosts>", help="Spare new hosts to which to recover segments") addTo = OptionGroup(parser, "Recovery Options") parser.add_option_group(addTo) addTo.add_option('-F', None, default=False, action='store_true', dest="forceFullResynchronization", metavar="<forceFullResynchronization>", help="Force full segment resynchronization") addTo.add_option("-B", None, type="int", default=16, dest="parallelDegree", metavar="<parallelDegree>", help="Max # of workers to use for building recovery segments. [default: %default]") addTo.add_option("-r", None, default=False, action='store_true', dest='rebalanceSegments', help='Rebalance synchronized segments.') parser.set_defaults() return parser
def createParser(): description = ("Add mirrors to a system") help = [""] parser = OptParser(option_class=OptChecker, description=' '.join(description.split()), version='%prog version $Revision$') parser.setHelp(help) addStandardLoggingAndHelpOptions(parser, True) addTo = OptionGroup(parser, "Connection Options") parser.add_option_group(addTo) addMasterDirectoryOptionForSingleClusterProgram(addTo) addTo = OptionGroup(parser, "Mirroring Options") parser.add_option_group(addTo) addTo.add_option("-i", None, type="string", dest="mirrorConfigFile", metavar="<configFile>", help="Mirroring configuration file") addTo.add_option("-o", None, dest="outputSampleConfigFile", metavar="<configFile>", type="string", help="Sample configuration file name to output; " "this file can be passed to a subsequent call using -i option") addTo.add_option("-m", None, type="string", dest="mirrorDataDirConfigFile", metavar="<dataDirConfigFile>", help="Mirroring data directory configuration file") addTo.add_option('-s', default=False, action='store_true', dest="spreadMirroring", help="use spread mirroring for placing mirrors on hosts") addTo.add_option("-p", None, type="int", default=1000, dest="mirrorOffset", metavar="<mirrorOffset>", help="Mirror port offset. The mirror port offset will be used multiple times " "to derive three sets of ports [default: %default]") addTo.add_option("-B", None, type="int", default=16, dest="parallelDegree", metavar="<parallelDegree>", help="Max # of workers to use for building recovery segments. [default: %default]") addTo.add_option('', '--hba-hostnames', action='store_true', dest='hba_hostnames', help='use hostnames instead of CIDR in pg_hba.conf') parser.set_defaults() return parser
def createParser(): description = ("Add mirrors to a system") help = [""] parser = OptParser(option_class=OptChecker, description=' '.join(description.split()), version='%prog version $Revision$') parser.setHelp(help) addStandardLoggingAndHelpOptions(parser, True) addTo = OptionGroup(parser, "Connection Options") parser.add_option_group(addTo) addMasterDirectoryOptionForSingleClusterProgram(addTo) addTo = OptionGroup(parser, "Mirroring Options") parser.add_option_group(addTo) addTo.add_option("-i", None, type="string", dest="mirrorConfigFile", metavar="<configFile>", help="Mirroring configuration file") addTo.add_option("-o", None, dest="outputSampleConfigFile", metavar="<configFile>", type="string", help="Sample configuration file name to output; " "this file can be passed to a subsequent call using -i option") addTo.add_option("-m", None, type="string", dest="mirrorDataDirConfigFile", metavar="<dataDirConfigFile>", help="Mirroring data directory configuration file") addTo.add_option('-s', default=False, action='store_true', dest="spreadMirroring" , help="use spread mirroring for placing mirrors on hosts") addTo.add_option("-p", None, type="int", default=1000, dest="mirrorOffset", metavar="<mirrorOffset>", help="Mirror port offset. The mirror port offset will be used multiple times " "to derive three sets of ports [default: %default]") addTo.add_option("-B", None, type="int", default=16, dest="parallelDegree", metavar="<parallelDegree>", help="Max # of workers to use for building recovery segments. [default: %default]") parser.set_defaults() return parser
def createParser(): description = ("Recover a failed segment") help = [""] parser = OptParser(option_class=OptChecker, description=' '.join(description.split()), version='%prog version $Revision$') parser.setHelp(help) addStandardLoggingAndHelpOptions(parser, True) addTo = OptionGroup(parser, "Connection Options") parser.add_option_group(addTo) addMasterDirectoryOptionForSingleClusterProgram(addTo) addTo = OptionGroup(parser, "Recovery Source Options") parser.add_option_group(addTo) addTo.add_option("-i", None, type="string", dest="recoveryConfigFile", metavar="<configFile>", help="Recovery configuration file") addTo.add_option("-o", None, dest="outputSampleConfigFile", metavar="<configFile>", type="string", help="Sample configuration file name to output; " "this file can be passed to a subsequent call using -i option") addTo = OptionGroup(parser, "Recovery Destination Options") parser.add_option_group(addTo) addTo.add_option("-p", None, type="string", dest="newRecoverHosts", metavar="<targetHosts>", help="Spare new hosts to which to recover segments") addTo = OptionGroup(parser, "Recovery Options") parser.add_option_group(addTo) addTo.add_option('-F', None, default=False, action='store_true', dest="forceFullResynchronization", metavar="<forceFullResynchronization>", help="Force full segment resynchronization") addTo.add_option("-B", None, type="int", default=16, dest="parallelDegree", metavar="<parallelDegree>", help="Max # of workers to use for building recovery segments. [default: %default]") addTo.add_option("-r", None, default=False, action='store_true', dest='rebalanceSegments', help='Rebalance synchronized segments.') parser.set_defaults() return parser
def parseargs(): parser = OptParser(option_class=OptChecker, description=' '.join(DESCRIPTION.split()), version='%prog version $Revision: #12 $') parser.setHelp(_help) parser.set_usage('%prog ' + _usage) parser.remove_option('-h') parser.add_option( '-f', '--file', default='', help='the name of a file containing the re-sync file list.') parser.add_option('-v', '--verbose', action='store_true', help='debug output.', default=False) parser.add_option('-h', '-?', '--help', action='help', help='show this help message and exit.', default=False) parser.add_option('--usage', action="briefhelp") parser.add_option( '-d', '--master_data_directory', type='string', dest="masterDataDirectory", metavar="<master data directory>", help= "Optional. The master host data directory. If not specified, the value set for $MASTER_DATA_DIRECTORY will be used.", default=get_masterdatadir()) parser.add_option('-a', help='don\'t ask to confirm repairs', dest='confirm', default=True, action='store_false') """ Parse the command line arguments """ (options, args) = parser.parse_args() if len(args) > 0: logger.error('Unknown argument %s' % args[0]) parser.exit() return options, args
def parse_command_line(): parser = OptParser(option_class=OptChecker, description=' '.join(_description.split())) parser.setHelp(_help) parser.set_usage('%prog ' + _usage) parser.remove_option('-h') parser.add_option('--start', action='store_true', help='Start the Greenplum Performance Monitor web server.') parser.add_option('--stop', action='store_true', help='Stop the Greenplum Performance Monitor web server.') parser.add_option('--restart', action='store_true', help='Restart the Greenplum Performance Monitor web server.') parser.add_option('--status', action='store_true', help='Display the status of the Gerrnplum Performance Monitor web server.') parser.add_option('--setup', action='store_true', help='Setup the Greenplum Performance Monitor web server.') parser.add_option('--version', action='store_true', help='Display version information') parser.add_option('--upgrade', action='store_true', help='Upgrade a previous installation of the Greenplum Performance Monitors web UI') parser.set_defaults(verbose=False,filters=[], slice=(None, None)) # Parse the command line arguments (options, args) = parser.parse_args() if options.version: version() sys.exit(0) # check for too many options opt_count = 0 if options.start: opt_count+=1 if options.stop: opt_count+=1 if options.setup: opt_count+=1 if options.upgrade: opt_count+=1 if options.status: opt_count+=1 if opt_count > 1: parser.print_help() parser.exit() return options, args
def createParser(): parser = OptParser(option_class=OptChecker, description=' '.join(description.split()), version='%prog version $Revision: #12 $') parser.setHelp([]) addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=False) parser.add_option("-D","--db",dest="dblist", action="append", type="string") parser.add_option("-V", "--gp-version", dest="gpversion",metavar="GP_VERSION", help="expected software version") parser.add_option("-m", "--mode", dest="mode",metavar="<MODE>", help="how to shutdown. modes are smart,fast, or immediate") parser.add_option("-t", "--timeout", dest="timeout", type="int", default=SEGMENT_STOP_TIMEOUT_DEFAULT, help="seconds to wait") return parser
def createParser(): """ Constructs and returns an option parser. Called by simple_main() """ parser = OptParser(option_class=OptChecker, version="%prog version $Revision: $") parser.setHelp(__help__) addStandardLoggingAndHelpOptions(parser, False) opts = OptionGroup(parser, "Required Options") opts.add_option("-d", "--directory", type="string") opts.add_option("-i", "--dbid", type="int") parser.add_option_group(opts) parser.set_defaults() return parser
def parseargs(): parser = OptParser(option_class=OptChecker, description=' '.join(description.split())) parser.setHelp([]) parser.add_option('-d', '--data-dir', dest='datadir', metavar='<data dir of the segment>', help='Data dir of the segment to update pg_hba.conf') parser.add_option("-e", "--entries", dest="entries", metavar="<entries to be added>", help="entries to be added to pg_hba.conf") options, args = parser.parse_args() return validate_args(options)
def createParser(): """ Constructs and returns an option parser. Called by simple_main() """ parser = OptParser(option_class=OptChecker, version='%prog version $Revision: $') parser.setHelp(__help__) addStandardLoggingAndHelpOptions(parser, False) opts = OptionGroup(parser, "Required Options") opts.add_option('-d', '--directory', type='string') opts.add_option('-i', '--dbid', type='int') parser.add_option_group(opts) parser.set_defaults() return parser
def create_parser(): """Create the command line parser object for gpkill""" help = [] parser = OptParser(option_class=OptChecker, description='Check or Terminate a Greenplum Database process.', version='%prog version $Revision: #1 $') parser.setHelp(help) addStandardLoggingAndHelpOptions(parser, True) parser.remove_option('-l') parser.remove_option('-a') addTo = OptionGroup(parser, 'Check Options') parser.add_option_group(addTo) addTo.add_option('--check', metavar='pid', help='Only returns status 0 if pid may be killed without gpkill, status 1 otherwise.', action='store_true') return parser
def createParser(): """ Constructs and returns an option parser. Called by simple_main() """ parser = OptParser(option_class=OptChecker, description=' '.join(description.split()), version='%prog version $Revision: #1 $') parser.setHelp([]) addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=False) parser.add_option( "-C", "--collation", type="string", help="values for lc_collate, lc_monetary, lc_numeric separated by :" ) parser.add_option("-D", "--datadir", dest="dblist", action="append", type="string") parser.add_option("-p", "--pickledTransitionData", dest="pickledTransitionData", type="string") parser.add_option("-M", "--mirroringMode", dest="mirroringMode", type="string") parser.add_option("-V", "--gp-version", dest="gpversion", metavar="GP_VERSION", help="expected software version") parser.set_defaults(verbose=False, filters=[], slice=(None, None)) return parser
def create_parser(): parser = OptParser( option_class=OptChecker, description="Greenplum Package Manager", version="%prog version $Revision: #1 $" ) parser.setHelp([]) addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True) parser.remove_option("-q") parser.remove_option("-l") add_to = OptionGroup(parser, "General Options") parser.add_option_group(add_to) addMasterDirectoryOptionForSingleClusterProgram(add_to) # TODO: AK: Eventually, these options may need to be flexible enough to accept mutiple packages # in one invocation. If so, the structure of this parser may need to change. add_to.add_option("-i", "--install", help="install the given gppkg", metavar="<package>") add_to.add_option("-u", "--update", help="update the given gppkg", metavar="<package>") add_to.add_option("-r", "--remove", help="remove the given gppkg", metavar="<name>-<version>") add_to.add_option("-q", "--query", help="query the gppkg database or a particular gppkg", action="store_true") add_to.add_option("-b", "--build", help="build a gppkg", metavar="<directory>") add_to.add_option("-c", "--clean", help="clean the cluster of the given gppkg", action="store_true") add_to.add_option( "--migrate", help="migrate gppkgs from a separate $GPHOME", metavar="<from_gphome> <to_gphome>", action="store_true", default=False, ) add_to = OptionGroup(parser, "Query Options") parser.add_option_group(add_to) add_to.add_option( "--info", action="store_true", help="print information about the gppkg including name, version, description" ) add_to.add_option("--list", action="store_true", help="print all the files present in the gppkg") add_to.add_option("--all", action="store_true", help="print all the gppkgs installed by gppkg") return parser
def create_parser(): parser = OptParser(option_class=OptChecker, version='%prog version $Revision: #1 $', description='Persistent tables backp and restore') addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True) addTo = OptionGroup(parser, 'Connection opts') parser.add_option_group(addTo) addMasterDirectoryOptionForSingleClusterProgram(addTo) addTo = OptionGroup(parser, 'Persistent tables backup and restore options') addTo.add_option('--backup', metavar="<pickled dbid info>", type="string", help="A list of dbid info where backups need to be done in pickled format") addTo.add_option('--restore', metavar="<pickled dbid info>", type="string", help="A list of dbid info where restore needs to be done in pickled format") addTo.add_option('--validate-backups', metavar="<pickled dbid info>", type="string", help="A list of dbid info where validation needs to be done in pickled format") addTo.add_option('--validate-backup-dir', metavar="<pickled dbid info>", type="string", help="A list of dbid info where validation needs to be done in pickled format") addTo.add_option('--timestamp', metavar="<timestamp of backup>", type="string", help="A timestamp for the backup that needs to be validated") addTo.add_option('--batch-size', metavar="<batch size for the worker pool>", type="int", help="Batch size for parallelism in worker pool") addTo.add_option('--backup-dir', metavar="<backup directory>", type="string", help="Backup directory for persistent tables and transaction logs") addTo.add_option('--perdbpt', metavar="<per database pt filename>", type="string", help="Filenames for per database persistent files") addTo.add_option('--globalpt', metavar="<global pt filenames>", type="string", help="Filenames for global persistent files") addTo.add_option('--validate-source-file-only', action='store_true', default=False, help="validate that required source files existed for backup and restore") parser.setHelp([ """ This tool is used to backup persistent table files. """ ]) return parser
def createParser(): """ Constructs and returns an option parser. Called by simple_main() """ parser = OptParser(option_class=OptChecker, description=' '.join(description.split()), version='%prog version $Revision: #1 $') parser.setHelp([]) addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=False) parser.add_option("-C", "--collation", type="string", help="values for lc_collate, lc_monetary, lc_numeric separated by :") parser.add_option("-D","--datadir",dest="dblist", action="append", type="string") parser.add_option("-p","--pickledTransitionData",dest="pickledTransitionData", type="string") parser.add_option("-M","--mirroringMode",dest="mirroringMode", type="string") parser.add_option("-V", "--gp-version", dest="gpversion",metavar="GP_VERSION", help="expected software version") parser.set_defaults(verbose=False, filters=[], slice=(None, None)) return parser
def createParser(): """ Creates the command line options parser object for gpverify. """ description = "Initiates primary/mirror verification." help = [] parser = OptParser( option_class=OptChecker, description=" ".join(description.split()), version="%prog version $Revision: #1 $" ) parser.setHelp(help) addStandardLoggingAndHelpOptions(parser, True) addTo = OptionGroup(parser, "Request Type") parser.add_option_group(addTo) addTo.add_option( "--full", dest="full", action="store_true", help="Perform a full verification pass. Use --token option to " "give the verification pass an identifier.", ) addTo.add_option( "--file", dest="verify_file", metavar="<file>", help="Based on file type, perform either a physical or logical verification of <file>. " "Use --token option to give the verification request an identifier.", ) addTo.add_option( "--directorytree", dest="verify_dir", metavar="<verify_dir>", help="Perform a full verification pass on the specified directory. " "Use --token option to assign the verification pass an identifier.", ) addTo = OptionGroup(parser, "Request Options") parser.add_option_group(addTo) addTo.add_option( "--token", dest="token", metavar="<token>", help="A token to use for the request. " "This identifier will be used in the logs and can be used to identify " "a verification pass to the --abort, --suspend, --resume and --results " "options.", ) addTo.add_option( "-c", "--content", dest="content", metavar="<content_id>", help="Send verification request only to the primary segment with the given <content_id>.", ) addTo.add_option( "--abort", dest="abort", action="store_true", help="Abort a verification request that is in progress. " "Can use --token option to abort a specific verification request.", ) addTo.add_option( "--suspend", dest="suspend", action="store_true", help="Suspend a verification request that is in progress." "Can use --token option to suspend a specific verification request.", ) addTo.add_option( "--resume", dest="resume", action="store_true", help="Resume a suspended verification request. Can use the " "--token option to resume a specific verification request.", ) addTo.add_option( "--fileignore", dest="ignore_file", metavar="<ignore_file>", help="Ignore any filenames matching <ignore_file>. Multiple " "files can be specified using a comma separated list.", ) addTo.add_option( "--dirignore", dest="ignore_dir", metavar="<ignore_dir>", help="Ignore any directories matching <ignore_dir>. Multiple " "directories can be specified using a comma separated list.", ) addTo = OptionGroup(parser, "Reporting Options") parser.add_option_group(addTo) addTo.add_option( "--results", dest="results", action="store_true", help="Display verification results. Can use" "the --token option to view results of a specific verification request.", ) addTo.add_option( "--resultslevel", dest="results_level", action="store", metavar="<detail_level>", type=int, help="Level of detail to show for results. Valid levels are from 1 to 10.", ) addTo.add_option( "--clean", dest="clean", action="store_true", help="Clean up verification artifacts and the gp_verification_history table.", ) addTo = OptionGroup(parser, "Misc. Options") parser.add_option_group(addTo) addTo.add_option( "-B", "--parallel", action="store", default=64, type=int, help="Number of worker threads used to send verification requests.", ) parser.set_defaults() return parser
def createParser(): description = (""" This utility is NOT SUPPORTED and is for internal-use only. Used to inject faults into the file replication code. """) help = [ """ Return codes: 0 - Fault injected non-zero: Error or invalid options """ ] parser = OptParser(option_class=OptChecker, description=' '.join(description.split()), version='%prog version $Revision$') parser.setHelp(help) addStandardLoggingAndHelpOptions(parser, False) # these options are used to determine the target segments addTo = OptionGroup(parser, 'Target Segment Options: ') parser.add_option_group(addTo) addTo.add_option( '-r', '--role', dest="targetRole", type='string', metavar="<role>", help= "Role of segments to target: primary, mirror, or primary_mirror") addTo.add_option( "-s", "--seg_dbid", dest="targetDbId", type="string", metavar="<dbid>", help="The segment dbid on which fault should be set and triggered." ) addTo.add_option( "-H", "--host", dest="targetHost", type="string", metavar="<host>", help= "The hostname on which fault should be set and triggered; pass ALL to target all hosts" ) addTo = OptionGroup(parser, 'Master Connection Options') parser.add_option_group(addTo) addMasterDirectoryOptionForSingleClusterProgram(addTo) addTo.add_option("-p", "--master_port", dest="masterPort", type="int", default=None, metavar="<masterPort>", help="DEPRECATED, use MASTER_DATA_DIRECTORY environment variable or -d option. " \ "The port number of the master database on localhost, " \ "used to fetch the segment configuration.") addTo = OptionGroup(parser, 'Client Polling Options: ') parser.add_option_group(addTo) addTo.add_option('-m', '--mode', dest="syncMode", type='string', default="async", metavar="<syncMode>", help="Synchronization mode : sync (client waits for fault to occur)" \ " or async (client only sets fault request on server)") # these options are used to build the message for the segments addTo = OptionGroup(parser, 'Fault Options: ') parser.add_option_group(addTo) # NB: This list needs to be kept in sync with: # - FaultInjectorTypeEnumToString # - FaultInjectorType_e addTo.add_option('-y','--type', dest="type", type='string', metavar="<type>", help="fault type: sleep (insert sleep), fault (report fault to postmaster and fts prober), " \ "fatal (inject FATAL error), panic (inject PANIC error), error (inject ERROR), " \ "infinite_loop, data_curruption (corrupt data in memory and persistent media), " \ "suspend (suspend execution), resume (resume execution that was suspended), " \ "skip (inject skip i.e. skip checkpoint), " \ "memory_full (all memory is consumed when injected), " \ "reset (remove fault injection), status (report fault injection status), " \ "segv (inject a SEGV), " \ "interrupt (inject an Interrupt), " \ "finish_pending (set QueryFinishPending to true), " \ "checkpoint_and_panic (inject a panic following checkpoint) ") addTo.add_option("-z", "--sleep_time_s", dest="sleepTimeSeconds", type="int", default="10" , metavar="<sleepTime>", help="For 'sleep' faults, the amount of time for the sleep. Defaults to %default." \ "Min Max Range is [0, 7200 sec] ") addTo.add_option( '-f', '--fault_name', dest="faultName", type='string', metavar="<name>", help= "See src/include/utils/faultinjector_lists.h for list of fault names" ) addTo.add_option("-c", "--ddl_statement", dest="ddlStatement", type="string", metavar="ddlStatement", help="The DDL statement on which fault should be set and triggered " \ "(i.e. create_database, drop_database, create_table, drop_table)") addTo.add_option( "-D", "--database_name", dest="databaseName", type="string", metavar="databaseName", help="The database name on which fault should be set and triggered." ) addTo.add_option( "-t", "--table_name", dest="tableName", type="string", metavar="tableName", help="The table name on which fault should be set and triggered.") addTo.add_option("-o", "--occurrence", dest="numOccurrences", type="int", default=1, metavar="numOccurrences", help="The number of occurrence of the DDL statement with the database name " \ "and the table name before fault is triggered. Defaults to %default. Max is 1000. " \ "Fault is triggered always if set to '0'. ") parser.set_defaults() return parser
def createParser(): description = (""" This utility is NOT SUPPORTED and is for internal-use only. Used to inject faults into the file replication code. """) help = [ """ Return codes: 0 - Fault injected non-zero: Error or invalid options """ ] parser = OptParser(option_class=OptChecker, description=' '.join(description.split()), version='%prog version $Revision$') parser.setHelp(help) addStandardLoggingAndHelpOptions(parser, False) # these options are used to determine the target segments addTo = OptionGroup(parser, 'Target Segment Options: ') parser.add_option_group(addTo) addTo.add_option( '-r', '--role', dest="targetRole", type='string', metavar="<role>", help= "Role of segments to target: primary, mirror, or primary_mirror") addTo.add_option( "-s", "--seg_dbid", dest="targetDbId", type="string", metavar="<dbid>", help="The segment dbid on which fault should be set and triggered." ) addTo.add_option( "-H", "--host", dest="targetHost", type="string", metavar="<host>", help= "The hostname on which fault should be set and triggered; pass ALL to target all hosts" ) addTo = OptionGroup(parser, 'Master Connection Options') parser.add_option_group(addTo) addMasterDirectoryOptionForSingleClusterProgram(addTo) addTo.add_option("-p", "--master_port", dest="masterPort", type="int", default=None, metavar="<masterPort>", help="DEPRECATED, use MASTER_DATA_DIRECTORY environment variable or -d option. " \ "The port number of the master database on localhost, " \ "used to fetch the segment configuration.") addTo = OptionGroup(parser, 'Client Polling Options: ') parser.add_option_group(addTo) addTo.add_option('-m', '--mode', dest="syncMode", type='string', default="async", metavar="<syncMode>", help="Synchronization mode : sync (client waits for fault to occur)" \ " or async (client only sets fault request on server)") # these options are used to build the message for the segments addTo = OptionGroup(parser, 'Fault Options: ') parser.add_option_group(addTo) # NB: This list needs to be kept in sync with: # - FaultInjectorTypeEnumToString # - FaultInjectorType_e addTo.add_option('-y','--type', dest="type", type='string', metavar="<type>", help="fault type: sleep (insert sleep), fault (report fault to postmaster and fts prober), " \ "fatal (inject FATAL error), panic (inject PANIC error), error (inject ERROR), " \ "infinite_loop, data_curruption (corrupt data in memory and persistent media), " \ "suspend (suspend execution), resume (resume execution that was suspended), " \ "skip (inject skip i.e. skip checkpoint), " \ "memory_full (all memory is consumed when injected), " \ "reset (remove fault injection), status (report fault injection status), " \ "segv (inject a SEGV), " \ "interrupt (inject an Interrupt), " \ "finish_pending (set QueryFinishPending to true), " \ "checkpoint_and_panic (inject a panic following checkpoint) ") addTo.add_option("-z", "--sleep_time_s", dest="sleepTimeSeconds", type="int", default="10" , metavar="<sleepTime>", help="For 'sleep' faults, the amount of time for the sleep. Defaults to %default." \ "Min Max Range is [0, 7200 sec] ") addTo.add_option('-f','--fault_name', dest="faultName", type='string', metavar="<name>", help="fault name: " \ "postmaster (inject fault when new connection is accepted in postmaster), " \ "pg_control (inject fault when global/pg_control file is written), " \ "pg_xlog (inject fault when files in pg_xlog directory are written), " \ "start_prepare (inject fault during start prepare transaction), " \ "filerep_consumer (inject fault before data are processed, i.e. if mirror " \ "then before file operation is issued to file system, if primary " \ "then before mirror file operation is acknowledged to backend processes), " \ "filerep_consumer_verificaton (inject fault before ack verification data are processed on primary), " \ "filerep_change_tracking_compacting (inject fault when compacting change tracking log files), " \ "filerep_sender (inject fault before data are sent to network), " \ "filerep_receiver (inject fault after data are received from network), " \ "filerep_flush (inject fault before fsync is issued to file system), " \ "filerep_resync (inject fault while in resync when first relation is ready to be resynchronized), " \ "filerep_resync_in_progress (inject fault while resync is in progress), " \ "filerep_resync_worker (inject fault after write to mirror), " \ "filerep_resync_worker_read (inject fault before read required for resync), " \ "filerep_transition_to_resync (inject fault during transition to InResync before mirror re-create), " \ "filerep_transition_to_resync_mark_recreate (inject fault during transition to InResync before marking re-created), " \ "filerep_transition_to_resync_mark_completed (inject fault during transition to InResync before marking completed), " \ "filerep_transition_to_sync_begin (inject fault before transition to InSync begin), " \ "filerep_transition_to_sync (inject fault during transition to InSync), " \ "filerep_transition_to_sync_before_checkpoint (inject fault during transition to InSync before checkpoint is created), " \ "filerep_transition_to_sync_mark_completed (inject fault during transition to InSync before marking completed), " \ "filerep_transition_to_change_tracking (inject fault during transition to InChangeTracking), " \ "fileRep_is_operation_completed (inject fault in FileRep Is Operation completed function just for ResyncWorker Threads), "\ "filerep_immediate_shutdown_request (inject fault just before sending the shutdown SIGQUIT to child processes), "\ "checkpoint (inject fault before checkpoint is taken), " \ "change_tracking_compacting_report (report if compacting is in progress), " \ "change_tracking_disable (inject fault before fsync to Change Tracking log files), " \ "transaction_abort_after_distributed_prepared (abort prepared transaction), " \ "transaction_commit_pass1_from_create_pending_to_created, " \ "transaction_commit_pass1_from_drop_in_memory_to_drop_pending, " \ "transaction_commit_pass1_from_aborting_create_needed_to_aborting_create, " \ "transaction_abort_pass1_from_create_pending_to_aborting_create, " \ "transaction_abort_pass1_from_aborting_create_needed_to_aborting_create, " \ "transaction_commit_pass2_from_drop_in_memory_to_drop_pending, " \ "transaction_commit_pass2_from_aborting_create_needed_to_aborting_create, " \ "transaction_abort_pass2_from_create_pending_to_aborting_create, " \ "transaction_abort_pass2_from_aborting_create_needed_to_aborting_create, " \ "finish_prepared_transaction_commit_pass1_from_create_pending_to_created, " \ "finish_prepared_transaction_commit_pass2_from_create_pending_to_created, " \ "finish_prepared_transaction_abort_pass1_from_create_pending_to_aborting_create, " \ "finish_prepared_transaction_abort_pass2_from_create_pending_to_aborting_create, " \ "finish_prepared_transaction_commit_pass1_from_drop_in_memory_to_drop_pending, " \ "finish_prepared_transaction_commit_pass2_from_drop_in_memory_to_drop_pending, " \ "finish_prepared_transaction_commit_pass1_aborting_create_needed, " \ "finish_prepared_transaction_commit_pass2_aborting_create_needed, " \ "finish_prepared_transaction_abort_pass1_aborting_create_needed, " \ "finish_prepared_transaction_abort_pass2_aborting_create_needed, " \ "twophase_transaction_commit_prepared (inject fault before transaction commit is inserted in xlog), " \ "twophase_transaction_abort_prepared (inject fault before transaction abort is inserted in xlog), " \ "dtm_broadcast_prepare (inject fault after prepare broadcast), " \ "dtm_broadcast_commit_prepared (inject fault after commit broadcast), " \ "dtm_broadcast_abort_prepared (inject fault after abort broadcast), " \ "dtm_xlog_distributed_commit (inject fault after distributed commit was inserted in xlog), " \ "fault_before_pending_delete_relation_entry (inject fault before putting pending delete relation entry, " \ "fault_before_pending_delete_database_entry (inject fault before putting pending delete database entry, " \ "fault_before_pending_delete_tablespace_entry (inject fault before putting pending delete tablespace entry, " \ "fault_before_pending_delete_filespace_entry (inject fault before putting pending delete filespace entry, " \ "dtm_init (inject fault before initializing dtm), " \ "end_prepare_two_phase_sleep (inject sleep after two phase file creation), " \ "segment_transition_request (inject fault after segment receives state transition request), " \ "segment_probe_response (inject fault after segment is probed by FTS), " \ "local_tm_record_transaction_commit (inject fault for local transactions after transaction commit is recorded and flushed in xlog ), " \ "malloc_failure (inject fault to simulate memory allocation failure), " \ "transaction_abort_failure (inject fault to simulate transaction abort failure), " \ "workfile_creation_failure (inject fault to simulate workfile creation failure), " \ "update_committed_eof_in_persistent_table (inject fault before committed EOF is updated in gp_persistent_relation_node for Append Only segment files), " \ "exec_simple_query_end_command (inject fault before EndCommand in exec_simple_query), " \ "multi_exec_hash_large_vmem (allocate large vmem using palloc inside MultiExecHash to attempt to exceed vmem limit), " \ "execsort_before_sorting (inject fault in nodeSort after receiving all tuples and before sorting), " \ "execsort_mksort_mergeruns (inject fault in MKSort during the mergeruns phase), " \ "execshare_input_next (inject fault after shared input scan retrieved a tuple), " \ "base_backup_post_create_checkpoint (inject fault after requesting checkpoint as part of basebackup), " \ "compaction_before_segmentfile_drop (inject fault after compaction, but before the drop of the segment file), " \ "compaction_before_cleanup_phase (inject fault after compaction and drop, but before the cleanup phase), " \ "appendonly_insert (inject fault before an append-only insert), " \ "appendonly_delete (inject fault before an append-only delete), " \ "appendonly_update (inject fault before an append-only update), " \ "reindex_db (inject fault while reindex db is in progress), "\ "reindex_relation (inject fault while reindex relation is in progress), "\ "fault_during_exec_dynamic_table_scan (inject fault during scanning of a partition), " \ "fault_in_background_writer_main (inject fault in BackgroundWriterMain), " \ "cdb_copy_start_after_dispatch (inject fault in cdbCopyStart after dispatch), " \ "repair_frag_end (inject fault at the end of repair_frag), " \ "vacuum_full_before_truncate (inject fault before truncate in vacuum full), " \ "vacuum_full_after_truncate (inject fault after truncate in vacuum full), " \ "vacuum_relation_end_of_first_round (inject fault at the end of first round of vacuumRelation loop), " \ "rebuild_pt_db (inject fault while rebuilding persistent tables (for each db)), " \ "procarray_add (inject fault while adding PGPROC to procarray), " \ "exec_hashjoin_new_batch (inject fault before switching to a new batch in Hash Join), " \ "fts_wait_for_shutdown (pause FTS before committing changes), " \ "runaway_cleanup (inject fault before starting the cleanup for a runaway query), " \ "opt_task_allocate_string_buffer (inject fault while allocating string buffer), " \ "opt_relcache_translator_catalog_access (inject fault while translating relcache entries), " \ "send_qe_details_init_backend (inject fault before sending QE details during backend initialization)" \ "all (affects all faults injected, used for 'status' and 'reset'), ") addTo.add_option("-c", "--ddl_statement", dest="ddlStatement", type="string", metavar="ddlStatement", help="The DDL statement on which fault should be set and triggered " \ "(i.e. create_database, drop_database, create_table, drop_table)") addTo.add_option( "-D", "--database_name", dest="databaseName", type="string", metavar="databaseName", help="The database name on which fault should be set and triggered." ) addTo.add_option( "-t", "--table_name", dest="tableName", type="string", metavar="tableName", help="The table name on which fault should be set and triggered.") addTo.add_option("-o", "--occurrence", dest="numOccurrences", type="int", default=1, metavar="numOccurrences", help="The number of occurrence of the DDL statement with the database name " \ "and the table name before fault is triggered. Defaults to %default. Max is 1000. " \ "Fault is triggered always if set to '0'. ") parser.set_defaults() return parser
def createParser(): """ Creates the command line options parser object for gpverify. """ description = ("Initiates primary/mirror verification.") help = [] parser = OptParser(option_class=OptChecker, description=' '.join(description.split()), version='%prog version $Revision: #1 $') parser.setHelp(help) addStandardLoggingAndHelpOptions(parser, True) addTo = OptionGroup(parser, "Request Type") parser.add_option_group(addTo) addTo.add_option('--full', dest='full', action='store_true', help='Perform a full verification pass. Use --token option to ' \ 'give the verification pass an identifier.') addTo.add_option('--file', dest='verify_file', metavar='<file>', help='Based on file type, perform either a physical or logical verification of <file>. ' \ 'Use --token option to give the verification request an identifier.') addTo.add_option('--directorytree', dest='verify_dir', metavar='<verify_dir>', help='Perform a full verification pass on the specified directory. ' \ 'Use --token option to assign the verification pass an identifier.' ) addTo = OptionGroup(parser, "Request Options") parser.add_option_group(addTo) addTo.add_option('--token', dest='token', metavar='<token>', help='A token to use for the request. ' \ 'This identifier will be used in the logs and can be used to identify ' \ 'a verification pass to the --abort, --suspend, --resume and --results ' \ 'options.') addTo.add_option('-c', '--content', dest='content', metavar='<content_id>', help='Send verification request only to the primary segment with the given <content_id>.') addTo.add_option('--abort', dest='abort', action='store_true', help='Abort a verification request that is in progress. ' \ 'Can use --token option to abort a specific verification request.') addTo.add_option('--suspend', dest='suspend', action='store_true', help='Suspend a verification request that is in progress.' \ 'Can use --token option to suspend a specific verification request.') addTo.add_option('--resume', dest='resume', action='store_true', help='Resume a suspended verification request. Can use the ' \ '--token option to resume a specific verification request.') addTo.add_option('--fileignore', dest='ignore_file', metavar='<ignore_file>', help='Ignore any filenames matching <ignore_file>. Multiple ' \ 'files can be specified using a comma separated list.') addTo.add_option('--dirignore', dest='ignore_dir', metavar='<ignore_dir>', help='Ignore any directories matching <ignore_dir>. Multiple ' \ 'directories can be specified using a comma separated list.') addTo = OptionGroup(parser, "Reporting Options") parser.add_option_group(addTo) addTo.add_option('--results', dest='results', action='store_true', help='Display verification results. Can use' \ 'the --token option to view results of a specific verification request.') addTo.add_option('--resultslevel', dest='results_level', action='store', metavar='<detail_level>', type=int, help='Level of detail to show for results. Valid levels are from 1 to 10.') addTo.add_option('--clean', dest='clean', action='store_true', help='Clean up verification artifacts and the gp_verification_history table.') addTo = OptionGroup(parser, "Misc. Options") parser.add_option_group(addTo) addTo.add_option('-B', '--parallel', action='store', default=64, type=int, help='Number of worker threads used to send verification requests.') parser.set_defaults() return parser
def create_parser(): parser = OptParser(option_class=OptChecker, version='%prog version $Revision: #1 $', description='Persistent tables backp and restore') addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True) addTo = OptionGroup(parser, 'Connection opts') parser.add_option_group(addTo) addMasterDirectoryOptionForSingleClusterProgram(addTo) addTo = OptionGroup(parser, 'Persistent tables backup and restore options') addTo.add_option( '--backup', metavar="<pickled dbid info>", type="string", help= "A list of dbid info where backups need to be done in pickled format") addTo.add_option( '--restore', metavar="<pickled dbid info>", type="string", help= "A list of dbid info where restore needs to be done in pickled format") addTo.add_option( '--validate-backups', metavar="<pickled dbid info>", type="string", help= "A list of dbid info where validation needs to be done in pickled format" ) addTo.add_option( '--validate-backup-dir', metavar="<pickled dbid info>", type="string", help= "A list of dbid info where validation needs to be done in pickled format" ) addTo.add_option( '--timestamp', metavar="<timestamp of backup>", type="string", help="A timestamp for the backup that needs to be validated") addTo.add_option('--batch-size', metavar="<batch size for the worker pool>", type="int", help="Batch size for parallelism in worker pool") addTo.add_option( '--backup-dir', metavar="<backup directory>", type="string", help="Backup directory for persistent tables and transaction logs") addTo.add_option('--perdbpt', metavar="<per database pt filename>", type="string", help="Filenames for per database persistent files") addTo.add_option('--globalpt', metavar="<global pt filenames>", type="string", help="Filenames for global persistent files") addTo.add_option( '--validate-source-file-only', action='store_true', default=False, help= "validate that required source files existed for backup and restore") parser.setHelp( [""" This tool is used to backup persistent table files. """]) return parser
def createParser(): """ Creates the command line options parser object for gpverify. """ description = ("Initiates primary/mirror verification.") help = [] parser = OptParser(option_class=OptChecker, description=' '.join(description.split()), version='%prog version $Revision: #1 $') parser.setHelp(help) addStandardLoggingAndHelpOptions(parser, True) addTo = OptionGroup(parser, "Request Type") parser.add_option_group(addTo) addTo.add_option('--full', dest='full', action='store_true', help='Perform a full verification pass. Use --token option to ' \ 'give the verification pass an identifier.') addTo.add_option('--file', dest='verify_file', metavar='<file>', help='Based on file type, perform either a physical or logical verification of <file>. ' \ 'Use --token option to give the verification request an identifier.') addTo.add_option('--directorytree', dest='verify_dir', metavar='<verify_dir>', help='Perform a full verification pass on the specified directory. ' \ 'Use --token option to assign the verification pass an identifier.' ) addTo = OptionGroup(parser, "Request Options") parser.add_option_group(addTo) addTo.add_option('--token', dest='token', metavar='<token>', help='A token to use for the request. ' \ 'This identifier will be used in the logs and can be used to identify ' \ 'a verification pass to the --abort, --suspend, --resume and --results ' \ 'options.') addTo.add_option( '-c', '--content', dest='content', metavar='<content_id>', help= 'Send verification request only to the primary segment with the given <content_id>.' ) addTo.add_option('--abort', dest='abort', action='store_true', help='Abort a verification request that is in progress. ' \ 'Can use --token option to abort a specific verification request.') addTo.add_option('--suspend', dest='suspend', action='store_true', help='Suspend a verification request that is in progress.' \ 'Can use --token option to suspend a specific verification request.') addTo.add_option('--resume', dest='resume', action='store_true', help='Resume a suspended verification request. Can use the ' \ '--token option to resume a specific verification request.') addTo.add_option('--fileignore', dest='ignore_file', metavar='<ignore_file>', help='Ignore any filenames matching <ignore_file>. Multiple ' \ 'files can be specified using a comma separated list.') addTo.add_option('--dirignore', dest='ignore_dir', metavar='<ignore_dir>', help='Ignore any directories matching <ignore_dir>. Multiple ' \ 'directories can be specified using a comma separated list.') addTo = OptionGroup(parser, "Reporting Options") parser.add_option_group(addTo) addTo.add_option('--results', dest='results', action='store_true', help='Display verification results. Can use' \ 'the --token option to view results of a specific verification request.') addTo.add_option( '--resultslevel', dest='results_level', action='store', metavar='<detail_level>', type=int, help= 'Level of detail to show for results. Valid levels are from 1 to 10.' ) addTo.add_option( '--clean', dest='clean', action='store_true', help= 'Clean up verification artifacts and the gp_verification_history table.' ) addTo = OptionGroup(parser, "Misc. Options") parser.add_option_group(addTo) addTo.add_option( '-B', '--parallel', action='store', default=64, type=int, help='Number of worker threads used to send verification requests.' ) parser.set_defaults() return parser
def createParser(): description = ("Add mirrors to a system") help = [""] parser = OptParser(option_class=OptChecker, description=' '.join(description.split()), version='%prog version $Revision$') parser.setHelp(help) addStandardLoggingAndHelpOptions(parser, True) addTo = OptionGroup(parser, "Connection Options") parser.add_option_group(addTo) addCoordinatorDirectoryOptionForSingleClusterProgram(addTo) addTo = OptionGroup(parser, "Mirroring Options") parser.add_option_group(addTo) addTo.add_option("-i", None, type="string", dest="mirrorConfigFile", metavar="<configFile>", help="Mirroring configuration file") addTo.add_option( "-o", None, dest="outputSampleConfigFile", metavar="<configFile>", type="string", help="Sample configuration file name to output; " "this file can be passed to a subsequent call using -i option") addTo.add_option("-m", None, type="string", dest="mirrorDataDirConfigFile", metavar="<dataDirConfigFile>", help="Mirroring data directory configuration file") addTo.add_option( '-s', default=False, action='store_true', dest="spreadMirroring", help="use spread mirroring for placing mirrors on hosts") addTo.add_option( "-p", None, type="int", default=1000, dest="mirrorOffset", metavar="<mirrorOffset>", help= "Mirror port offset. The mirror port offset will be used multiple times " "to derive three sets of ports [default: %default]") addTo.add_option( "-B", "--batch-size", type="int", default=gp.DEFAULT_COORDINATOR_NUM_WORKERS, dest="batch_size", metavar="<batch_size>", help= 'Max number of hosts to operate on in parallel. Valid values are 1-%d' % gp.MAX_COORDINATOR_NUM_WORKERS) addTo.add_option( "-b", "--segment-batch-size", type="int", default=gp.DEFAULT_SEGHOST_NUM_WORKERS, dest="segment_batch_size", metavar="<segment_batch_size>", help= 'Max number of segments per host to operate on in parallel. Valid values are: 1-%d' % gp.MAX_SEGHOST_NUM_WORKERS) addTo.add_option('', '--hba-hostnames', action='store_true', dest='hba_hostnames', help='use hostnames instead of CIDR in pg_hba.conf') parser.set_defaults() return parser
def createParser(): description = (""" This utility is NOT SUPPORTED and is for internal-use only. Used to inject faults into the file replication code. """) help = [""" Return codes: 0 - Fault injected non-zero: Error or invalid options """] parser = OptParser(option_class=OptChecker, description=' '.join(description.split()), version='%prog version $Revision$') parser.setHelp(help) addStandardLoggingAndHelpOptions(parser, False) # these options are used to determine the target segments addTo = OptionGroup(parser, 'Target Segment Options: ') parser.add_option_group(addTo) addTo.add_option('-r', '--role', dest="targetRole", type='string', metavar="<role>", help="Role of segments to target: master, standby, primary") addTo.add_option("-s", "--registration_order", dest="targetRegistrationOrder", type="string", metavar="<registration_order>", help="The segment registration_order on which fault should be set and triggered.") addTo.add_option("-H", "--host", dest="targetHost", type="string", metavar="<host>", help="The hostname on which fault should be set and triggered; pass ALL to target all hosts") addTo = OptionGroup(parser, 'Master Connection Options') parser.add_option_group(addTo) addMasterDirectoryOptionForSingleClusterProgram(addTo) addTo.add_option("-p", "--master_port", dest="masterPort", type="int", default=None, metavar="<masterPort>", help="DEPRECATED, use MASTER_DATA_DIRECTORY environment variable or -d option. " \ "The port number of the master database on localhost, " \ "used to fetch the segment configuration.") addTo = OptionGroup(parser, 'Client Polling Options: ') parser.add_option_group(addTo) addTo.add_option('-m', '--mode', dest="syncMode", type='string', default="async", metavar="<syncMode>", help="Synchronization mode : sync (client waits for fault to occur)" \ " or async (client only sets fault request on server)") # these options are used to build the message for the segments addTo = OptionGroup(parser, 'Fault Options: ') parser.add_option_group(addTo) addTo.add_option('-y', '--type', dest="type", type='string', metavar="<type>", help="fault type: sleep (insert sleep), fault (report fault to postmaster and fts prober), " \ "fatal (inject FATAL error), panic (inject PANIC error), error (inject ERROR), " \ "infinite_loop, data_curruption (corrupt data in memory and persistent media), " \ "suspend (suspend execution), resume (resume execution that was suspended), " \ "skip (inject skip i.e. skip checkpoint), " \ "memory_full (all memory is consumed when injected), " \ "reset (remove fault injection), status (report fault injection status), " \ "panic_suppress (inject suppressed PANIC in critical section), " \ "segv (inject a SEGV), " \ "interrupt (inject an Interrupt) ") addTo.add_option("-z", "--sleep_time_s", dest="sleepTimeSeconds", type="int", default="10" , metavar="<sleepTime>", help="For 'sleep' faults, the amount of time for the sleep. Defaults to %default." \ "Min Max Range is [0, 7200 sec] ") addTo.add_option('-f', '--fault_name', dest="faultName", type='string', metavar="<name>", help="fault name: " \ "postmaster (inject fault when new connection is accepted in postmaster), " \ "pg_control (inject fault when global/pg_control file is written), " \ "pg_xlog (inject fault when files in pg_xlog directory are written), " \ "start_prepare (inject fault during start prepare transaction), " \ "filerep_consumer (inject fault before data are processed, i.e. if mirror " \ "then before file operation is issued to file system, if primary " \ "then before mirror file operation is acknowledged to backend processes), " \ "filerep_consumer_verificaton (inject fault before ack verification data are processed on primary), " \ "filerep_change_tracking_compacting (inject fault when compacting change tracking log files), " \ "filerep_sender (inject fault before data are sent to network), " \ "filerep_receiver (inject fault after data are received from network), " \ "filerep_flush (inject fault before fsync is issued to file system), " \ "filerep_resync (inject fault while in resync when first relation is ready to be resynchronized), " \ "filerep_resync_in_progress (inject fault while resync is in progress), " \ "filerep_resync_worker (inject fault after write to mirror), " \ "filerep_resync_worker_read (inject fault before read required for resync), " \ "filerep_transition_to_resync (inject fault during transition to InResync before mirror re-create), " \ "filerep_transition_to_resync_mark_recreate (inject fault during transition to InResync before marking re-created), " \ "filerep_transition_to_resync_mark_completed (inject fault during transition to InResync before marking completed), " \ "filerep_transition_to_sync_begin (inject fault before transition to InSync begin), " \ "filerep_transition_to_sync (inject fault during transition to InSync), " \ "filerep_transition_to_sync_before_checkpoint (inject fault during transition to InSync before checkpoint is created), " \ "filerep_transition_to_sync_mark_completed (inject fault during transition to InSync before marking completed), " \ "filerep_transition_to_change_tracking (inject fault during transition to InChangeTracking), " \ "checkpoint (inject fault before checkpoint is taken), " \ "change_tracking_compacting_report (report if compacting is in progress), " \ "change_tracking_disable (inject fault before fsync to Change Tracking log files), " \ "transaction_abort_after_distributed_prepared (abort prepared transaction), " \ "transaction_commit_pass1_from_create_pending_to_created, " \ "transaction_commit_pass1_from_drop_in_memory_to_drop_pending, " \ "transaction_commit_pass1_from_aborting_create_needed_to_aborting_create, " \ "transaction_abort_pass1_from_create_pending_to_aborting_create, " \ "transaction_abort_pass1_from_aborting_create_needed_to_aborting_create, " \ "transaction_commit_pass2_from_drop_in_memory_to_drop_pending, " \ "transaction_commit_pass2_from_aborting_create_needed_to_aborting_create, " \ "transaction_abort_pass2_from_create_pending_to_aborting_create, " \ "transaction_abort_pass2_from_aborting_create_needed_to_aborting_create, " \ "finish_prepared_transaction_commit_pass1_from_create_pending_to_created, " \ "finish_prepared_transaction_commit_pass2_from_create_pending_to_created, " \ "finish_prepared_transaction_abort_pass1_from_create_pending_to_aborting_create, " \ "finish_prepared_transaction_abort_pass2_from_create_pending_to_aborting_create, " \ "finish_prepared_transaction_commit_pass1_from_drop_in_memory_to_drop_pending, " \ "finish_prepared_transaction_commit_pass2_from_drop_in_memory_to_drop_pending, " \ "finish_prepared_transaction_commit_pass1_aborting_create_needed, " \ "finish_prepared_transaction_commit_pass2_aborting_create_needed, " \ "finish_prepared_transaction_abort_pass1_aborting_create_needed, " \ "finish_prepared_transaction_abort_pass2_aborting_create_needed, " \ "twophase_transaction_commit_prepared (inject fault before transaction commit is inserted in xlog), " \ "twophase_transaction_abort_prepared (inject fault before transaction abort is inserted in xlog), " \ "dtm_broadcast_prepare (inject fault after prepare broadcast), " \ "dtm_broadcast_commit_prepared (inject fault after commit broadcast), " \ "dtm_broadcast_abort_prepared (inject fault after abort broadcast), " \ "dtm_xlog_distributed_commit (inject fault after distributed commit was inserted in xlog), " \ "fault_before_pending_delete_relation_entry (inject fault before putting pending delete relation entry, " \ "fault_before_pending_delete_database_entry (inject fault before putting pending delete database entry, " \ "fault_before_pending_delete_tablespace_entry (inject fault before putting pending delete tablespace entry, " \ "fault_before_pending_delete_filespace_entry (inject fault before putting pending delete filespace entry, " \ "dtm_init (inject fault before initializing dtm), " \ "end_prepare_two_phase_sleep (inject sleep after two phase file creation), " \ "segment_transition_request (inject fault after segment receives state transition request), " \ "segment_probe_response (inject fault after segment is probed by FTS), " \ "sync_persistent_table (inject fault to sync persistent table to disk), " \ "xlog_insert (inject fault to skip insert record into xlog), " \ "local_tm_record_transaction_commit (inject fault for local transactions after transaction commit is recorded and flushed in xlog ), " \ "malloc_failure (inject fault to simulate memory allocation failure), " \ "transaction_abort_failure (inject fault to simulate transaction abort failure), " \ "update_committed_eof_in_persistent_table (inject fault before committed EOF is updated in gp_persistent_relation_node for Append Only segment files), " \ "fault_during_exec_dynamic_table_scan (inject fault during scanning of a partition), " \ "internal_flush_error (inject an error during internal_flush), " \ "exec_simple_query_end_command (inject fault before EndCommand in exec_simple_query), " \ "multi_exec_hash_large_vmem (allocate large vmem using palloc inside MultiExecHash to attempt to exceed vmem limit), " \ "execsort_before_sorting (inject fault in nodeSort after receiving all tuples and before sorting), " \ "workfile_cleanup_set (inject fault in workfile manager cleanup set)" \ "execsort_mksort_mergeruns (inject fault in MKSort during the mergeruns phase), " \ "cdb_copy_start_after_dispatch (inject fault in cdbCopyStart after dispatch), " \ "fault_in_background_writer_main (inject fault in BackgroundWriterMain), " \ "exec_hashjoin_new_batch (inject fault before switching to a new batch in Hash Join), " \ "analyze_subxact_error (inject an error during analyze)," \ "opt_task_allocate_string_buffer (inject fault while allocating string buffer), " \ "runaway_cleanup (inject fault before starting the cleanup for a runaway query)" \ "all (affects all faults injected, used for 'status' and 'reset'), ") addTo.add_option("-c", "--ddl_statement", dest="ddlStatement", type="string", metavar="ddlStatement", help="The DDL statement on which fault should be set and triggered " \ "(i.e. create_database, drop_database, create_table, drop_table)") addTo.add_option("-D", "--database_name", dest="databaseName", type="string", metavar="databaseName", help="The database name on which fault should be set and triggered.") addTo.add_option("-t", "--table_name", dest="tableName", type="string", metavar="tableName", help="The table name on which fault should be set and triggered.") addTo.add_option("-o", "--occurrence", dest="numOccurrences", type="int", default=1, metavar="numOccurrences", help="The number of occurrence of the DDL statement with the database name " \ "and the table name before fault is triggered. Defaults to %default. Max is 1000. " \ "Fault is triggered always if set to '0'. ") parser.set_defaults() return parser
def createParser(): description = (""" This utility is NOT SUPPORTED and is for internal-use only. Used to inject faults into the file replication code. """) help = [""" Return codes: 0 - Fault injected non-zero: Error or invalid options """] parser = OptParser(option_class=OptChecker, description=' '.join(description.split()), version='%prog version $Revision$') parser.setHelp(help) addStandardLoggingAndHelpOptions(parser, False) # these options are used to determine the target segments addTo = OptionGroup(parser, 'Target Segment Options: ') parser.add_option_group(addTo) addTo.add_option('-r', '--role', dest="targetRole", type='string', metavar="<role>", help="Role of segments to target: primary, mirror, or primary_mirror") addTo.add_option("-s", "--seg_dbid", dest="targetDbId", type="string", metavar="<dbid>", help="The segment dbid on which fault should be set and triggered.") addTo.add_option("-H", "--host", dest="targetHost", type="string", metavar="<host>", help="The hostname on which fault should be set and triggered; pass ALL to target all hosts") addTo = OptionGroup(parser, 'Master Connection Options') parser.add_option_group(addTo) addMasterDirectoryOptionForSingleClusterProgram(addTo) addTo.add_option("-p", "--master_port", dest="masterPort", type="int", default=None, metavar="<masterPort>", help="DEPRECATED, use MASTER_DATA_DIRECTORY environment variable or -d option. " \ "The port number of the master database on localhost, " \ "used to fetch the segment configuration.") addTo = OptionGroup(parser, 'Client Polling Options: ') parser.add_option_group(addTo) addTo.add_option('-m', '--mode', dest="syncMode", type='string', default="async", metavar="<syncMode>", help="Synchronization mode : sync (client waits for fault to occur)" \ " or async (client only sets fault request on server)") # these options are used to build the message for the segments addTo = OptionGroup(parser, 'Fault Options: ') parser.add_option_group(addTo) # NB: This list needs to be kept in sync with: # - FaultInjectorTypeEnumToString # - FaultInjectorType_e addTo.add_option('-y','--type', dest="type", type='string', metavar="<type>", help="fault type: sleep (insert sleep), fault (report fault to postmaster and fts prober), " \ "fatal (inject FATAL error), panic (inject PANIC error), error (inject ERROR), " \ "infinite_loop, data_curruption (corrupt data in memory and persistent media), " \ "suspend (suspend execution), resume (resume execution that was suspended), " \ "skip (inject skip i.e. skip checkpoint), " \ "memory_full (all memory is consumed when injected), " \ "reset (remove fault injection), status (report fault injection status), " \ "segv (inject a SEGV), " \ "interrupt (inject an Interrupt), " \ "finish_pending (set QueryFinishPending to true), " \ "checkpoint_and_panic (inject a panic following checkpoint) ") addTo.add_option("-z", "--sleep_time_s", dest="sleepTimeSeconds", type="int", default="10" , metavar="<sleepTime>", help="For 'sleep' faults, the amount of time for the sleep. Defaults to %default." \ "Min Max Range is [0, 7200 sec] ") addTo.add_option('-f','--fault_name', dest="faultName", type='string', metavar="<name>", help="fault name: " \ "postmaster (inject fault when new connection is accepted in postmaster), " \ "pg_control (inject fault when global/pg_control file is written), " \ "pg_xlog (inject fault when files in pg_xlog directory are written), " \ "start_prepare (inject fault during start prepare transaction), " \ "filerep_consumer (inject fault before data are processed, i.e. if mirror " \ "then before file operation is issued to file system, if primary " \ "then before mirror file operation is acknowledged to backend processes), " \ "filerep_consumer_verificaton (inject fault before ack verification data are processed on primary), " \ "filerep_change_tracking_compacting (inject fault when compacting change tracking log files), " \ "filerep_sender (inject fault before data are sent to network), " \ "filerep_receiver (inject fault after data are received from network), " \ "filerep_flush (inject fault before fsync is issued to file system), " \ "filerep_resync (inject fault while in resync when first relation is ready to be resynchronized), " \ "filerep_resync_in_progress (inject fault while resync is in progress), " \ "filerep_resync_worker (inject fault after write to mirror), " \ "filerep_resync_worker_read (inject fault before read required for resync), " \ "filerep_transition_to_resync (inject fault during transition to InResync before mirror re-create), " \ "filerep_transition_to_resync_mark_recreate (inject fault during transition to InResync before marking re-created), " \ "filerep_transition_to_resync_mark_completed (inject fault during transition to InResync before marking completed), " \ "filerep_transition_to_sync_begin (inject fault before transition to InSync begin), " \ "filerep_transition_to_sync (inject fault during transition to InSync), " \ "filerep_transition_to_sync_before_checkpoint (inject fault during transition to InSync before checkpoint is created), " \ "filerep_transition_to_sync_mark_completed (inject fault during transition to InSync before marking completed), " \ "filerep_transition_to_change_tracking (inject fault during transition to InChangeTracking), " \ "fileRep_is_operation_completed (inject fault in FileRep Is Operation completed function just for ResyncWorker Threads), "\ "filerep_immediate_shutdown_request (inject fault just before sending the shutdown SIGQUIT to child processes), "\ "checkpoint (inject fault before checkpoint is taken), " \ "change_tracking_compacting_report (report if compacting is in progress), " \ "change_tracking_disable (inject fault before fsync to Change Tracking log files), " \ "transaction_abort_after_distributed_prepared (abort prepared transaction), " \ "transaction_commit_pass1_from_create_pending_to_created, " \ "transaction_commit_pass1_from_drop_in_memory_to_drop_pending, " \ "transaction_commit_pass1_from_aborting_create_needed_to_aborting_create, " \ "transaction_abort_pass1_from_create_pending_to_aborting_create, " \ "transaction_abort_pass1_from_aborting_create_needed_to_aborting_create, " \ "transaction_commit_pass2_from_drop_in_memory_to_drop_pending, " \ "transaction_commit_pass2_from_aborting_create_needed_to_aborting_create, " \ "transaction_abort_pass2_from_create_pending_to_aborting_create, " \ "transaction_abort_pass2_from_aborting_create_needed_to_aborting_create, " \ "finish_prepared_transaction_commit_pass1_from_create_pending_to_created, " \ "finish_prepared_transaction_commit_pass2_from_create_pending_to_created, " \ "finish_prepared_transaction_abort_pass1_from_create_pending_to_aborting_create, " \ "finish_prepared_transaction_abort_pass2_from_create_pending_to_aborting_create, " \ "finish_prepared_transaction_commit_pass1_from_drop_in_memory_to_drop_pending, " \ "finish_prepared_transaction_commit_pass2_from_drop_in_memory_to_drop_pending, " \ "finish_prepared_transaction_commit_pass1_aborting_create_needed, " \ "finish_prepared_transaction_commit_pass2_aborting_create_needed, " \ "finish_prepared_transaction_abort_pass1_aborting_create_needed, " \ "finish_prepared_transaction_abort_pass2_aborting_create_needed, " \ "twophase_transaction_commit_prepared (inject fault before transaction commit is inserted in xlog), " \ "twophase_transaction_abort_prepared (inject fault before transaction abort is inserted in xlog), " \ "dtm_broadcast_prepare (inject fault after prepare broadcast), " \ "dtm_broadcast_commit_prepared (inject fault after commit broadcast), " \ "dtm_broadcast_abort_prepared (inject fault after abort broadcast), " \ "dtm_xlog_distributed_commit (inject fault after distributed commit was inserted in xlog), " \ "fault_before_pending_delete_relation_entry (inject fault before putting pending delete relation entry, " \ "fault_before_pending_delete_database_entry (inject fault before putting pending delete database entry, " \ "fault_before_pending_delete_tablespace_entry (inject fault before putting pending delete tablespace entry, " \ "fault_before_pending_delete_filespace_entry (inject fault before putting pending delete filespace entry, " \ "dtm_init (inject fault before initializing dtm), " \ "end_prepare_two_phase_sleep (inject sleep after two phase file creation), " \ "segment_transition_request (inject fault after segment receives state transition request), " \ "segment_probe_response (inject fault after segment is probed by FTS), " \ "local_tm_record_transaction_commit (inject fault for local transactions after transaction commit is recorded and flushed in xlog ), " \ "malloc_failure (inject fault to simulate memory allocation failure), " \ "transaction_abort_failure (inject fault to simulate transaction abort failure), " \ "workfile_creation_failure (inject fault to simulate workfile creation failure), " \ "workfile_write_failure (inject fault to simulate workfile write failure), " \ "workfile_hashjoin_failure (inject fault before we close workfile in ExecHashJoinNewBatch), "\ "update_committed_eof_in_persistent_table (inject fault before committed EOF is updated in gp_persistent_relation_node for Append Only segment files), " \ "exec_simple_query_end_command (inject fault before EndCommand in exec_simple_query), " \ "multi_exec_hash_large_vmem (allocate large vmem using palloc inside MultiExecHash to attempt to exceed vmem limit), " \ "execsort_before_sorting (inject fault in nodeSort after receiving all tuples and before sorting), " \ "execsort_mksort_mergeruns (inject fault in MKSort during the mergeruns phase), " \ "execshare_input_next (inject fault after shared input scan retrieved a tuple), " \ "base_backup_post_create_checkpoint (inject fault after requesting checkpoint as part of basebackup), " \ "compaction_before_segmentfile_drop (inject fault after compaction, but before the drop of the segment file), " \ "compaction_before_cleanup_phase (inject fault after compaction and drop, but before the cleanup phase), " \ "appendonly_insert (inject fault before an append-only insert), " \ "appendonly_delete (inject fault before an append-only delete), " \ "appendonly_update (inject fault before an append-only update), " \ "reindex_db (inject fault while reindex db is in progress), "\ "reindex_relation (inject fault while reindex relation is in progress), "\ "fault_during_exec_dynamic_table_scan (inject fault during scanning of a partition), " \ "fault_in_background_writer_main (inject fault in BackgroundWriterMain), " \ "cdb_copy_start_after_dispatch (inject fault in cdbCopyStart after dispatch), " \ "repair_frag_end (inject fault at the end of repair_frag), " \ "vacuum_full_before_truncate (inject fault before truncate in vacuum full), " \ "vacuum_full_after_truncate (inject fault after truncate in vacuum full), " \ "vacuum_relation_end_of_first_round (inject fault at the end of first round of vacuumRelation loop), " \ "rebuild_pt_db (inject fault while rebuilding persistent tables (for each db)), " \ "procarray_add (inject fault while adding PGPROC to procarray), " \ "exec_hashjoin_new_batch (inject fault before switching to a new batch in Hash Join), " \ "fts_wait_for_shutdown (pause FTS before committing changes), " \ "runaway_cleanup (inject fault before starting the cleanup for a runaway query), " \ "opt_task_allocate_string_buffer (inject fault while allocating string buffer), " \ "opt_relcache_translator_catalog_access (inject fault while translating relcache entries), " \ "interconnect_stop_ack_is_lost (inject fault in interconnect to skip sending the stop ack), " \ "send_qe_details_init_backend (inject fault before sending QE details during backend initialization), " \ "process_startup_packet (inject fault when processing startup packet during backend initialization), " \ "quickdie (inject fault when auxiliary processes quitting), " \ "after_one_slice_dispatched (inject fault after one slice was dispatched when dispatching plan), " \ "cursor_qe_reader_after_snapshot (inject fault after QE READER has populated snashot for cursor)" \ "fsync_counter (inject fault to count buffers fsync'ed by checkpoint process), " \ "bg_buffer_sync_default_logic (inject fault to 'skip' in order to flush all buffers in BgBufferSync()), " \ "finish_prepared_after_record_commit_prepared (inject fault in FinishPreparedTransaction() after recording the commit prepared record), " \ "all (affects all faults injected, used for 'status' and 'reset'), ") addTo.add_option("-c", "--ddl_statement", dest="ddlStatement", type="string", metavar="ddlStatement", help="The DDL statement on which fault should be set and triggered " \ "(i.e. create_database, drop_database, create_table, drop_table)") addTo.add_option("-D", "--database_name", dest="databaseName", type="string", metavar="databaseName", help="The database name on which fault should be set and triggered.") addTo.add_option("-t", "--table_name", dest="tableName", type="string", metavar="tableName", help="The table name on which fault should be set and triggered.") addTo.add_option("-o", "--occurrence", dest="numOccurrences", type="int", default=1, metavar="numOccurrences", help="The number of occurrence of the DDL statement with the database name " \ "and the table name before fault is triggered. Defaults to %default. Max is 1000. " \ "Fault is triggered always if set to '0'. ") parser.set_defaults() return parser