コード例 #1
0
ファイル: cpu_stresstest.py プロジェクト: pdeemea/CSO-Toolkit
def parseargs():
    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help',  action='store_true')
    parser.add_option('-f', '--fromthreads', type='int')
    parser.add_option('-t', '--tothreads',   type='int')
    parser.add_option('-d', '--database',    type='string')
    (options, args) = parser.parse_args()
    if options.help:
        print """
Script generates a big CPU workload on the cluster within configured diapason of
thread number to check the elasicity of cluster CPU resources
Usage:
python cpu_stresstest.py -f fromthreads -t tothreads -d database
    -t | --fromthreads - Lower bound of thread number to start
    -t | --tothreads   - Upper bound of thread number to start
    -d | --database    - Database to run the test on
"""
        sys.exit(0)
    if not options.fromthreads:
        raise_err('You must specify the lower bound of thread number with -f parameter')
    if not options.tothreads:
        raise_err('You must specify the upper bound of thread number with -t parameter')
    if not options.database:
        raise_err('You must specify the database name with -d parameter')
    return options
コード例 #2
0
    def createParser():
        parser = OptParser(
            option_class=OptChecker,
            description="Gets status from segments on a single host " "using a transition message.  Internal-use only.",
            version="%prog version $Revision: #1 $",
        )
        parser.setHelp([])

        addStandardLoggingAndHelpOptions(parser, True)

        addTo = parser
        addTo.add_option(
            "-s",
            None,
            type="string",
            dest="statusQueryRequests",
            metavar="<statusQueryRequests>",
            help="Status Query Message",
        )
        addTo.add_option(
            "-D", "--dblist", type="string", action="append", dest="dirList", metavar="<dirList>", help="Directory List"
        )

        parser.set_defaults()
        return parser
コード例 #3
0
ファイル: gppkg.py プロジェクト: xiaoxiaoHe-E/gpdb
    def create_parser():
        parser = OptParser(option_class=OptChecker,
            description="Greenplum Package Manager",
            version='%prog version $Revision: #1 $')
        parser.setHelp([])

        addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True)

        parser.remove_option('-q')
        parser.remove_option('-l')

        add_to = OptionGroup(parser, 'General Options')
        parser.add_option_group(add_to)

        addMasterDirectoryOptionForSingleClusterProgram(add_to)

        # TODO: AK: Eventually, these options may need to be flexible enough to accept mutiple packages
        # in one invocation. If so, the structure of this parser may need to change.
        add_to.add_option('-i', '--install', help='install the given gppkg', metavar='<package>')
        add_to.add_option('-u', '--update', help='update the given gppkg', metavar='<package>')
        add_to.add_option('-r', '--remove', help='remove the given gppkg', metavar='<name>-<version>')
        add_to.add_option('-q', '--query', help='query the gppkg database or a particular gppkg', action='store_true')
        add_to.add_option('-b', '--build', help='build a gppkg', metavar='<directory>')
        add_to.add_option('-c', '--clean', help='clean the cluster of the given gppkg', action='store_true')
        add_to.add_option('--migrate', help='migrate gppkgs from a separate $GPHOME', metavar='<from_gphome> <to_gphome>', action='store_true', default=False)
        add_to.add_option('-f', '--filename', help='set specific package name', metavar='<name>')

        add_to = OptionGroup(parser, 'Query Options')
        parser.add_option_group(add_to)
        add_to.add_option('--info', action='store_true', help='print information about the gppkg including name, version, description')
        add_to.add_option('--list', action='store_true', help='print all the files present in the gppkg')
        add_to.add_option('--all', action='store_true', help='print all the gppkgs installed by gppkg')

        return parser
コード例 #4
0
ファイル: gppkg.py プロジェクト: BALDELab/incubator-hawq
    def create_parser():
        parser = OptParser(option_class=OptChecker,
            description="Greenplum Package Manager",
            version='%prog version $Revision: #1 $')
        parser.setHelp([])

        addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True)

        parser.remove_option('-q')
        parser.remove_option('-l')
        
        add_to = OptionGroup(parser, 'General Options')
        parser.add_option_group(add_to)

        addMasterDirectoryOptionForSingleClusterProgram(add_to)

        # TODO: AK: Eventually, these options may need to be flexible enough to accept mutiple packages
        # in one invocation. If so, the structure of this parser may need to change.
        add_to.add_option('-i', '--install', help='install the given gppkg', metavar='<package>')
        add_to.add_option('-u', '--update', help='update the given gppkg', metavar='<package>')
        add_to.add_option('-r', '--remove', help='remove the given gppkg', metavar='<name>-<version>')
        add_to.add_option('-q', '--query', help='query the gppkg database or a particular gppkg', action='store_true')
        add_to.add_option('-b', '--build', help='build a gppkg', metavar='<directory>')
        add_to.add_option('-c', '--clean', help='clean the cluster of the given gppkg', action='store_true')
        add_to.add_option('--migrate', help='migrate gppkgs from a separate $GPHOME', metavar='<from_gphome> <to_gphome>', action='store_true', default=False)

        add_to = OptionGroup(parser, 'Query Options')
        parser.add_option_group(add_to)
        add_to.add_option('--info', action='store_true', help='print information about the gppkg including name, version, description')
        add_to.add_option('--list', action='store_true', help='print all the files present in the gppkg')
        add_to.add_option('--all', action='store_true', help='print all the gppkgs installed by gppkg')

        return parser
コード例 #5
0
ファイル: clsAddMirrors.py プロジェクト: wanziforever/gpdb
    def createParser():

        description = ("Add mirrors to a system")
        help = [""]

        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision$')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, True)

        addTo = OptionGroup(parser, "Connection Options")
        parser.add_option_group(addTo)
        addMasterDirectoryOptionForSingleClusterProgram(addTo)

        addTo = OptionGroup(parser, "Mirroring Options")
        parser.add_option_group(addTo)
        addTo.add_option("-i", None, type="string",
                         dest="mirrorConfigFile",
                         metavar="<configFile>",
                         help="Mirroring configuration file")

        addTo.add_option("-o", None,
                         dest="outputSampleConfigFile",
                         metavar="<configFile>", type="string",
                         help="Sample configuration file name to output; "
                              "this file can be passed to a subsequent call using -i option")

        addTo.add_option("-m", None, type="string",
                         dest="mirrorDataDirConfigFile",
                         metavar="<dataDirConfigFile>",
                         help="Mirroring data directory configuration file")

        addTo.add_option('-s', default=False, action='store_true',
                         dest="spreadMirroring",
                         help="use spread mirroring for placing mirrors on hosts")

        addTo.add_option("-p", None, type="int", default=1000,
                         dest="mirrorOffset",
                         metavar="<mirrorOffset>",
                         help="Mirror port offset.  The mirror port offset will be used multiple times "
                              "to derive three sets of ports [default: %default]")

        addTo.add_option("-B", None, type="int", default=16,
                         dest="parallelDegree",
                         metavar="<parallelDegree>",
                         help="Max # of workers to use for building recovery segments.  [default: %default]")

        addTo.add_option('', '--hba-hostnames', action='store_true', dest='hba_hostnames',
                          help='use hostnames instead of CIDR in pg_hba.conf')

        parser.set_defaults()
        return parser
コード例 #6
0
    def createParser():

        description = ("""
        Clean segment directories.
        """)

        help = ["""
          To be used internally only.
        """]

        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision: #1 $')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, True)

        addTo = OptionGroup(parser, "Clean Segment Options")
        parser.add_option_group(addTo)
        addTo.add_option('-p',
                         None,
                         dest="pickledArguments",
                         type='string',
                         default=None,
                         metavar="<pickledArguments>",
                         help="The arguments passed from the original script")

        parser.set_defaults()
        return parser
コード例 #7
0
ファイル: kill.py プロジェクト: zsmj513/gpdb
    def create_parser():
        """Create the command line parser object for gpkill"""

        help = []
        parser = OptParser(
            option_class=OptChecker,
            description='Check or Terminate a Greenplum Database process.',
            version='%prog version $Revision: #1 $')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, True)

        parser.remove_option('-l')
        parser.remove_option('-a')

        addTo = OptionGroup(parser, 'Check Options')
        parser.add_option_group(addTo)
        addTo.add_option(
            '--check',
            metavar='pid',
            help=
            'Only returns status 0 if pid may be killed without gpkill, status 1 otherwise.',
            action='store_true')

        return parser
コード例 #8
0
    def createParser():

        description = ("Add mirrors to a system")
        help = [""]

        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision$')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, True)

        addTo = OptionGroup(parser, "Connection Options")
        parser.add_option_group(addTo)
        addMasterDirectoryOptionForSingleClusterProgram(addTo)

        addTo = OptionGroup(parser, "Mirroring Options")
        parser.add_option_group(addTo)
        addTo.add_option("-i", None, type="string",
                         dest="mirrorConfigFile",
                         metavar="<configFile>",
                         help="Mirroring configuration file")

        addTo.add_option("-o", None,
                         dest="outputSampleConfigFile",
                         metavar="<configFile>", type="string",
                         help="Sample configuration file name to output; "
                         "this file can be passed to a subsequent call using -i option")
        
        addTo.add_option("-m", None, type="string",
                         dest="mirrorDataDirConfigFile",
                         metavar="<dataDirConfigFile>",
                         help="Mirroring data directory configuration file")

        addTo.add_option('-s', default=False, action='store_true',
                         dest="spreadMirroring" ,
                         help="use spread mirroring for placing mirrors on hosts")

        addTo.add_option("-p", None, type="int", default=1000,
                         dest="mirrorOffset",
                         metavar="<mirrorOffset>",
                         help="Mirror port offset.  The mirror port offset will be used multiple times "
                         "to derive three sets of ports [default: %default]")

        addTo.add_option("-B", None, type="int", default=16,
                         dest="parallelDegree",
                         metavar="<parallelDegree>",
                         help="Max # of workers to use for building recovery segments.  [default: %default]")

        parser.set_defaults()
        return parser
コード例 #9
0
ファイル: gpcleansegmentdir.py プロジェクト: adam8157/gpdb
    def createParser():

        description = ("""
        Clean segment directories.
        """)

        help = ["""
          To be used internally only.
        """]

        parser = OptParser(option_class=OptChecker,
                    description=' '.join(description.split()),
                    version='%prog version $Revision: #1 $')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, True)

        addTo = OptionGroup(parser, "Clean Segment Options")
        parser.add_option_group(addTo)
        addTo.add_option('-p', None, dest="pickledArguments",
                         type='string', default=None, metavar="<pickledArguments>",
                       help="The arguments passed from the original script")

        parser.set_defaults()
        return parser
コード例 #10
0
def create_parser():
    parser = OptParser(option_class=OptChecker, 
                       version='%prog version $Revision: #1 $',
                       description='Persistent tables backp and restore')

    addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True)

    addTo = OptionGroup(parser, 'Connection opts')
    parser.add_option_group(addTo)
    addMasterDirectoryOptionForSingleClusterProgram(addTo)

    addTo = OptionGroup(parser, 'Persistent tables backup and restore options')
    addTo.add_option('--backup', metavar="<pickled dbid info>", type="string",
                     help="A list of dbid info where backups need to be done in pickled format")
    addTo.add_option('--restore', metavar="<pickled dbid info>", type="string",
                     help="A list of dbid info where restore needs to be done in pickled format")
    addTo.add_option('--validate-backups', metavar="<pickled dbid info>", type="string",
                     help="A list of dbid info where validation needs to be done in pickled format")
    addTo.add_option('--validate-backup-dir', metavar="<pickled dbid info>", type="string",
                     help="A list of dbid info where validation needs to be done in pickled format")
    addTo.add_option('--timestamp', metavar="<timestamp of backup>", type="string",
                     help="A timestamp for the backup that needs to be validated") 
    addTo.add_option('--batch-size', metavar="<batch size for the worker pool>", type="int",
                      help="Batch size for parallelism in worker pool")
    addTo.add_option('--backup-dir', metavar="<backup directory>", type="string",
                      help="Backup directory for persistent tables and transaction logs")
    addTo.add_option('--perdbpt', metavar="<per database pt filename>", type="string",  
                      help="Filenames for per database persistent files")
    addTo.add_option('--globalpt', metavar="<global pt filenames>", type="string",
                      help="Filenames for global persistent files")
    addTo.add_option('--validate-source-file-only', action='store_true', default=False,
                      help="validate that required source files existed for backup and restore")

    parser.setHelp([
    """
    This tool is used to backup persistent table files.
    """
    ])

    return parser
コード例 #11
0
    def createParser():
        parser = OptParser(
            option_class=OptChecker,
            description="Gets status from segments on a single host "
            "using a transition message.  Internal-use only.",
            version='%prog version $Revision: #1 $')
        parser.setHelp([])

        addStandardLoggingAndHelpOptions(parser, True)

        addTo = parser
        addTo.add_option("-s",
                         None,
                         type="string",
                         dest="statusQueryRequests",
                         metavar="<statusQueryRequests>",
                         help="Status Query Message")
        addTo.add_option("-D",
                         "--dblist",
                         type="string",
                         action="append",
                         dest="dirList",
                         metavar="<dirList>",
                         help="Directory List")

        parser.set_defaults()
        return parser
コード例 #12
0
ファイル: seg_update_pg_hba.py プロジェクト: water32/gpdb
def parseargs():
    parser = OptParser(option_class=OptChecker,
                       description=' '.join(description.split()))

    parser.setHelp([])

    parser.add_option('-d',
                      '--data-dir',
                      dest='datadir',
                      metavar='<data dir of the segment>',
                      help='Data dir of the segment to update pg_hba.conf')
    parser.add_option("-e",
                      "--entries",
                      dest="entries",
                      metavar="<entries to be added>",
                      help="entries to be added to pg_hba.conf")
    options, args = parser.parse_args()
    return validate_args(options)
コード例 #13
0
def parseargs():
    global allocation_rate, memory_split, cpu_split
    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')    
    parser.add_option('-h', '-?', '--help', action='store_true')
    parser.add_option('-f', '--force',      action='store_true')
    parser.add_option('-s', '--memsplit',   type='float')
    parser.add_option('-a', '--allocrate',  type='float')
    parser.add_option('-c', '--cpusplit',   type='float')
    (options, args) = parser.parse_args()
    if options.help:
        print """
Script configures memory and CPU allocation for GPText and GPDB. GPDB should be running when
the script is started. The script should work on master server. Local GPDB connection under
gpadmin to template1 database should be passwordless.
Usage:
python gptext_tune_settings.py [-s memory_split] [-a allocation_rate] [-c cpu_split] [-f | --force]
    memory_split    - [0.1 .. 0.9] - split of the memory between GPText and GPDB. Greater value - more memory for GPText
    allocation_rate - [0.1 .. 0.9] - part of the system memory available to GPText and GPDB
    cpu_split       - [0.3 .. 2.0] - part of the CPU dedicated to GPDB. Over utilization is allowed
    force           - do not ask for confirmation of changing the memory settings
"""
        sys.exit(0)        
    if options.allocrate:
        if not (options.allocrate >= 0.1 and options.allocrate <= 0.9):
            logger.error('Correct values for --allocrate are [0.1 .. 0.9]')
            sys.exit(3)
        allocation_rate = options.allocrate
    if options.memsplit:
        if not (options.memsplit >= 0.1 and options.memsplit <= 0.9):
            logger.error('Correct values for --memsplit are [0.1 .. 0.9]')
            sys.exit(3)
        memory_split = options.memsplit
    if options.cpusplit:
        if not (options.cpusplit >= 0.3 and options.cpusplit <= 2.0):
            logger.error('Correct values for --cpusplit are [0.3 .. 2.0]')
            sys.exit(3)
        cpu_split = options.cpusplit
    return options
コード例 #14
0
ファイル: gppkg.py プロジェクト: shubh8689/gpdb
    def create_parser():
        parser = OptParser(
            option_class=OptChecker, description="Greenplum Package Manager", version="%prog version $Revision: #1 $"
        )
        parser.setHelp([])

        addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True)

        parser.remove_option("-q")
        parser.remove_option("-l")

        add_to = OptionGroup(parser, "General Options")
        parser.add_option_group(add_to)

        addMasterDirectoryOptionForSingleClusterProgram(add_to)

        # TODO: AK: Eventually, these options may need to be flexible enough to accept mutiple packages
        # in one invocation. If so, the structure of this parser may need to change.
        add_to.add_option("-i", "--install", help="install the given gppkg", metavar="<package>")
        add_to.add_option("-u", "--update", help="update the given gppkg", metavar="<package>")
        add_to.add_option("-r", "--remove", help="remove the given gppkg", metavar="<name>-<version>")
        add_to.add_option("-q", "--query", help="query the gppkg database or a particular gppkg", action="store_true")
        add_to.add_option("-b", "--build", help="build a gppkg", metavar="<directory>")
        add_to.add_option("-c", "--clean", help="clean the cluster of the given gppkg", action="store_true")
        add_to.add_option(
            "--migrate",
            help="migrate gppkgs from a separate $GPHOME",
            metavar="<from_gphome> <to_gphome>",
            action="store_true",
            default=False,
        )

        add_to = OptionGroup(parser, "Query Options")
        parser.add_option_group(add_to)
        add_to.add_option(
            "--info", action="store_true", help="print information about the gppkg including name, version, description"
        )
        add_to.add_option("--list", action="store_true", help="print all the files present in the gppkg")
        add_to.add_option("--all", action="store_true", help="print all the gppkgs installed by gppkg")

        return parser
コード例 #15
0
def parseargs():
    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help', action='store_true')
    parser.add_option('-d', '--dbname',   type='string')
    parser.add_option('-p', '--password', type='string')
    parser.add_option('-n', '--nthreads', type='int')
    (options, args) = parser.parse_args()
    if options.help or (not options.dbname and not options.filename):
        print """Script performs parallel analyze of all tables
Usage:
./parallel_analyze.py -n thread_number -d dbname [-p gpadmin_password]
Parameters:
    thread_number    - number of parallel threads to run
    dbname           - name of the database
    gpadmin_password - password of the gpadmin user"""
        sys.exit(0)
    if not options.dbname:
        logger.error('Failed to start utility. Please, specify database name with "-d" key')
        sys.exit(1)
    if not options.nthreads:
        logger.error('Failed to start utility. Please, specify number of threads with "-n" key')
        sys.exit(1)
    return options
コード例 #16
0
    def createParser():
        """
        Constructs and returns an option parser.

        Called by simple_main()
        """
        parser = OptParser(option_class=OptChecker,
                           version='%prog version $Revision: $')
        parser.setHelp(__help__)

        addStandardLoggingAndHelpOptions(parser, False)

        opts = OptionGroup(parser, "Required Options")
        opts.add_option('-d', '--directory', type='string')
        opts.add_option('-i', '--dbid', type='int')
        parser.add_option_group(opts)

        parser.set_defaults()
        return parser
コード例 #17
0
ファイル: gpsetdbid.py プロジェクト: ginobiliwang/gpdb
    def createParser():
        """
        Constructs and returns an option parser.

        Called by simple_main()
        """
        parser = OptParser(option_class=OptChecker, version="%prog version $Revision: $")
        parser.setHelp(__help__)

        addStandardLoggingAndHelpOptions(parser, False)

        opts = OptionGroup(parser, "Required Options")
        opts.add_option("-d", "--directory", type="string")
        opts.add_option("-i", "--dbid", type="int")
        parser.add_option_group(opts)

        parser.set_defaults()
        return parser
コード例 #18
0
def parseargs():
    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help', action='store_true')
    parser.add_option('-i', '--index',      type='string')
    (options, args) = parser.parse_args()
    if options.help:
        print """
Script is calling Solr commit with the expungeDeletes flag to cause
all the index segments with deletes in them to clean up their segments
from the deleted records
Usage:
python gptext_expunge_deletes.py -i index_name
    -i | --index - Name of the index to expunge
Examples:
    python gptext_expunge_deletes.py -i test.public.test_table
"""
        sys.exit(0)
    if not options.index:
        logger.error('You must specify index name with -i or --index key')
        sys.exit(3)
    return options
コード例 #19
0
ファイル: kill.py プロジェクト: PivotalBigData/gpdb
    def create_parser():
        """Create the command line parser object for gpkill"""

        help = []
        parser = OptParser(option_class=OptChecker,
                    description='Check or Terminate a Greenplum Database process.',
                    version='%prog version $Revision: #1 $')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, True)

        parser.remove_option('-l')
        parser.remove_option('-a')
 
        addTo = OptionGroup(parser, 'Check Options') 
        parser.add_option_group(addTo)
        addTo.add_option('--check', metavar='pid', help='Only returns status 0 if pid may be killed without gpkill, status 1 otherwise.', action='store_true')
        
        return parser
コード例 #20
0
def parseargs():
    parser = OptParser(option_class=OptChecker)
    parser.remove_option("-h")
    parser.add_option("-h", "-?", "--help", action="store_true")
    parser.add_option("-d", "--dbname", type="string")
    parser.add_option("-p", "--password", type="string")
    parser.add_option("-n", "--nthreads", type="int")
    (options, args) = parser.parse_args()
    if options.help or (not options.dbname and not options.filename):
        print """Script performs serial restore of the backup files in case
of the cluster topology change.
Usage:
./parallel_analyze.py -n thread_number -d dbname [-p gpadmin_password]
Parameters:
    thread_number    - number of parallel threads to run
    dbname           - name of the database
    gpadmin_password - password of the gpadmin user"""
        sys.exit(0)
    if not options.dbname:
        logger.error('Failed to start utility. Please, specify database name with "-d" key')
        sys.exit(1)
    if not options.nthreads:
        logger.error('Failed to start utility. Please, specify number of threads with "-n" key')
        sys.exit(1)
    return options
コード例 #21
0
ファイル: ddl_migration.py プロジェクト: dpolirov/ingos_copy
    def __init__(self) :
        parser = OptParser(option_class=OptChecker)
        parser.remove_option("-h")    
        parser.add_option("-h", "-?", "--help",  action="store_true")
        parser.add_option("-f", "--file",        type="string")
        parser.add_option("-t", "--table",       type="string")
        parser.add_option("-c", "--config",      type="string")
        parser.add_option("-d", "--drop",        action="store_true")
        (parser_result, args) = parser.parse_args()
        helpstr = "Usage:" + os.linesep + "./ddl_migration.py -f path"
        if parser_result.help :
            print helpstr
            sys.exit(0)
        if not parser_result.file and not parser_result.table:
            print "Failed to start utility. Please, specify path to input file with -f key or table with -t key"
            sys.exit(1)

        configpath = parser_result.config or "./ddl_migration.conf"
        try:
            configfile = open(configpath, "rb")
            self.connection_options = json.load(configfile)
            configfile.close()
        except Exception, e :
            raise Exception("Could not read config file " + configpath + ". " +str(e))
コード例 #22
0
    def createParser():
        """
        Create parser expected by simple_main
        """

        parser = OptParser(option_class=OptChecker,
                           description=' '.join(DESCRIPTION.split()),
                           version='%prog version main build dev')
        parser.setHelp(HELP)

        #
        # Note that this mirroringmode parameter should only be either mirrorless or quiescent.
        #   If quiescent then it is implied that there is pickled transition data that will be
        #   provided (using -p) to immediately convert to a primary or a mirror.
        #
        addStandardLoggingAndHelpOptions(parser,
                                         includeNonInteractiveOption=False)

        parser.add_option(
            "-C",
            "--collation",
            type="string",
            help="values for lc_collate, lc_monetary, lc_numeric separated by :"
        )
        parser.add_option("-D",
                          "--dblist",
                          dest="dblist",
                          action="append",
                          type="string")
        parser.add_option("-M",
                          "--mirroringmode",
                          dest="mirroringMode",
                          type="string")
        parser.add_option("-p",
                          "--pickledTransitionData",
                          dest="pickledTransitionData",
                          type="string")
        parser.add_option("-V",
                          "--gp-version",
                          dest="gpversion",
                          metavar="GP_VERSION",
                          help="expected software version")
        parser.add_option("-n",
                          "--numsegments",
                          dest="num_cids",
                          help="number of distinct content ids in cluster")
        parser.add_option("", "--era", dest="era", help="master era")
        parser.add_option("-t",
                          "--timeout",
                          dest="timeout",
                          type="int",
                          default=gp.SEGMENT_TIMEOUT_DEFAULT,
                          help="seconds to wait")
        parser.add_option(
            '-U',
            '--specialMode',
            type='choice',
            choices=['upgrade', 'maintenance'],
            metavar='upgrade|maintenance',
            action='store',
            default=None,
            help='start the instance in upgrade or maintenance mode')
        parser.add_option('',
                          '--wrapper',
                          dest="wrapper",
                          default=None,
                          type='string')
        parser.add_option('',
                          '--wrapper-args',
                          dest="wrapper_args",
                          default=None,
                          type='string')

        return parser
コード例 #23
0
def create_parser():
    parser = OptParser(option_class=OptChecker,
                       description='update the pg_hba.conf on all segments')

    addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True)
    parser.add_option('-p',
                      '--pg-hba-info',
                      dest='pg_hba_info',
                      metavar='<pg_hba entries>',
                      help='Entries that get added to pg_hba.conf file')
    parser.add_option(
        '-d',
        '--data-dirs',
        dest='data_dirs',
        metavar='<list of data dirs>',
        help='A list of all data directories present on this host')
    parser.add_option('-b',
                      '--backup',
                      action='store_true',
                      help='Backup the pg_hba.conf file')
    parser.add_option('-r',
                      '--restore',
                      action='store_true',
                      help='Restore the pg_hba.conf file')
    parser.add_option('-D',
                      '--delete',
                      action='store_true',
                      help='Cleanup the pg_hba.conf backup file')

    return parser
コード例 #24
0
    def createParser():
        description = ("""
        This utility is NOT SUPPORTED and is for internal-use only.

        Used to inject faults into the file replication code.
        """)

        help = ["""

        Return codes:
          0 - Fault injected
          non-zero: Error or invalid options
        """]

        parser = OptParser(option_class=OptChecker,
                    description='  '.join(description.split()),
                    version='%prog version $Revision$')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, False)

        # these options are used to determine the target segments
        addTo = OptionGroup(parser, 'Target Segment Options: ')
        parser.add_option_group(addTo)
        addTo.add_option('-r', '--role', dest="targetRole", type='string', metavar="<role>",
                         help="Role of segments to target: master, standby, primary")
        addTo.add_option("-s", "--registration_order", dest="targetRegistrationOrder", type="string", metavar="<registration_order>",
                         help="The segment registration_order on which fault should be set and triggered.")
        addTo.add_option("-H", "--host", dest="targetHost", type="string", metavar="<host>",
                         help="The hostname on which fault should be set and triggered; pass ALL to target all hosts")

        addTo = OptionGroup(parser, 'Master Connection Options')
        parser.add_option_group(addTo)

        addMasterDirectoryOptionForSingleClusterProgram(addTo)
        addTo.add_option("-p", "--master_port", dest="masterPort", type="int", default=None,
                         metavar="<masterPort>",
                         help="DEPRECATED, use MASTER_DATA_DIRECTORY environment variable or -d option.  " \
                         "The port number of the master database on localhost, " \
                         "used to fetch the segment configuration.")

        addTo = OptionGroup(parser, 'Client Polling Options: ')
        parser.add_option_group(addTo)
        addTo.add_option('-m', '--mode', dest="syncMode", type='string', default="async",
                         metavar="<syncMode>",
                         help="Synchronization mode : sync (client waits for fault to occur)" \
                         " or async (client only sets fault request on server)")

        # these options are used to build the message for the segments
        addTo = OptionGroup(parser, 'Fault Options: ')
        parser.add_option_group(addTo)
        addTo.add_option('-y', '--type', dest="type", type='string', metavar="<type>",
                         help="fault type: sleep (insert sleep), fault (report fault to postmaster and fts prober), " \
                  "fatal (inject FATAL error), panic (inject PANIC error), error (inject ERROR), " \
                  "infinite_loop, data_curruption (corrupt data in memory and persistent media), " \
                  "suspend (suspend execution), resume (resume execution that was suspended), " \
                  "skip (inject skip i.e. skip checkpoint), " \
                  "memory_full (all memory is consumed when injected), " \
                  "reset (remove fault injection), status (report fault injection status), " \
                  "panic_suppress (inject suppressed PANIC in critical section), " \
                  "segv (inject a SEGV), " \
                  "interrupt (inject an Interrupt) ")
        addTo.add_option("-z", "--sleep_time_s", dest="sleepTimeSeconds", type="int", default="10" ,
                            metavar="<sleepTime>",
                            help="For 'sleep' faults, the amount of time for the sleep.  Defaults to %default." \
                 "Min Max Range is [0, 7200 sec] ")
        addTo.add_option('-f', '--fault_name', dest="faultName", type='string', metavar="<name>",
                         help="fault name: " \
                  "postmaster (inject fault when new connection is accepted in postmaster), " \
                  "pg_control (inject fault when global/pg_control file is written), " \
                  "pg_xlog (inject fault when files in pg_xlog directory are written), " \
                  "start_prepare (inject fault during start prepare transaction), " \
                  "filerep_consumer (inject fault before data are processed, i.e. if mirror " \
                  "then before file operation is issued to file system, if primary " \
                  "then before mirror file operation is acknowledged to backend processes), " \
                  "filerep_consumer_verificaton (inject fault before ack verification data are processed on primary), " \
                  "filerep_change_tracking_compacting (inject fault when compacting change tracking log files), " \
                  "filerep_sender (inject fault before data are sent to network), " \
                  "filerep_receiver (inject fault after data are received from network), " \
                  "filerep_flush (inject fault before fsync is issued to file system), " \
                  "filerep_resync (inject fault while in resync when first relation is ready to be resynchronized), " \
                  "filerep_resync_in_progress (inject fault while resync is in progress), " \
                  "filerep_resync_worker (inject fault after write to mirror), " \
                  "filerep_resync_worker_read (inject fault before read required for resync), " \
                  "filerep_transition_to_resync (inject fault during transition to InResync before mirror re-create), " \
                  "filerep_transition_to_resync_mark_recreate (inject fault during transition to InResync before marking re-created), " \
                  "filerep_transition_to_resync_mark_completed (inject fault during transition to InResync before marking completed), " \
                  "filerep_transition_to_sync_begin (inject fault before transition to InSync begin), " \
                  "filerep_transition_to_sync (inject fault during transition to InSync), " \
                  "filerep_transition_to_sync_before_checkpoint (inject fault during transition to InSync before checkpoint is created), " \
                  "filerep_transition_to_sync_mark_completed (inject fault during transition to InSync before marking completed), " \
                  "filerep_transition_to_change_tracking (inject fault during transition to InChangeTracking), " \
                  "checkpoint (inject fault before checkpoint is taken), " \
                  "change_tracking_compacting_report (report if compacting is in progress), " \
                  "change_tracking_disable (inject fault before fsync to Change Tracking log files), " \
                  "transaction_abort_after_distributed_prepared (abort prepared transaction), " \
                  "transaction_commit_pass1_from_create_pending_to_created, " \
                  "transaction_commit_pass1_from_drop_in_memory_to_drop_pending, " \
                  "transaction_commit_pass1_from_aborting_create_needed_to_aborting_create, " \
                  "transaction_abort_pass1_from_create_pending_to_aborting_create, " \
                  "transaction_abort_pass1_from_aborting_create_needed_to_aborting_create, " \
                  "transaction_commit_pass2_from_drop_in_memory_to_drop_pending, " \
                  "transaction_commit_pass2_from_aborting_create_needed_to_aborting_create, " \
                  "transaction_abort_pass2_from_create_pending_to_aborting_create, " \
                  "transaction_abort_pass2_from_aborting_create_needed_to_aborting_create, " \
                  "finish_prepared_transaction_commit_pass1_from_create_pending_to_created, " \
                  "finish_prepared_transaction_commit_pass2_from_create_pending_to_created, " \
                  "finish_prepared_transaction_abort_pass1_from_create_pending_to_aborting_create, " \
                  "finish_prepared_transaction_abort_pass2_from_create_pending_to_aborting_create, " \
                  "finish_prepared_transaction_commit_pass1_from_drop_in_memory_to_drop_pending, " \
                  "finish_prepared_transaction_commit_pass2_from_drop_in_memory_to_drop_pending, " \
                  "finish_prepared_transaction_commit_pass1_aborting_create_needed, " \
                  "finish_prepared_transaction_commit_pass2_aborting_create_needed, " \
                  "finish_prepared_transaction_abort_pass1_aborting_create_needed, " \
                  "finish_prepared_transaction_abort_pass2_aborting_create_needed, " \
                  "twophase_transaction_commit_prepared (inject fault before transaction commit is inserted in xlog), " \
                  "twophase_transaction_abort_prepared (inject fault before transaction abort is inserted in xlog), " \
                  "dtm_broadcast_prepare (inject fault after prepare broadcast), " \
                  "dtm_broadcast_commit_prepared (inject fault after commit broadcast), " \
                  "dtm_broadcast_abort_prepared (inject fault after abort broadcast), " \
                  "dtm_xlog_distributed_commit (inject fault after distributed commit was inserted in xlog), " \
                  "fault_before_pending_delete_relation_entry (inject fault before putting pending delete relation entry, " \
                  "fault_before_pending_delete_database_entry (inject fault before putting pending delete database entry, " \
                  "fault_before_pending_delete_tablespace_entry (inject fault before putting pending delete tablespace entry, " \
                  "fault_before_pending_delete_filespace_entry (inject fault before putting pending delete filespace entry, " \
                  "dtm_init (inject fault before initializing dtm), " \
                  "end_prepare_two_phase_sleep (inject sleep after two phase file creation), " \
                  "segment_transition_request (inject fault after segment receives state transition request), " \
                  "segment_probe_response (inject fault after segment is probed by FTS), " \
                  "sync_persistent_table (inject fault to sync persistent table to disk), " \
                  "xlog_insert (inject fault to skip insert record into xlog), " \
                  "local_tm_record_transaction_commit (inject fault for local transactions after transaction commit is recorded and flushed in xlog ), " \
                  "malloc_failure (inject fault to simulate memory allocation failure), " \
                  "transaction_abort_failure (inject fault to simulate transaction abort failure), " \
                  "update_committed_eof_in_persistent_table (inject fault before committed EOF is updated in gp_persistent_relation_node for Append Only segment files), " \
                  "fault_during_exec_dynamic_table_scan (inject fault during scanning of a partition), " \
                  "internal_flush_error (inject an error during internal_flush), " \
                  "exec_simple_query_end_command (inject fault before EndCommand in exec_simple_query), " \
                  "multi_exec_hash_large_vmem (allocate large vmem using palloc inside MultiExecHash to attempt to exceed vmem limit), " \
                  "execsort_before_sorting (inject fault in nodeSort after receiving all tuples and before sorting), " \
                  "workfile_cleanup_set (inject fault in workfile manager cleanup set)" \
                  "execsort_mksort_mergeruns (inject fault in MKSort during the mergeruns phase), " \
                  "cdb_copy_start_after_dispatch (inject fault in cdbCopyStart after dispatch), " \
                  "fault_in_background_writer_main (inject fault in BackgroundWriterMain), " \
                  "exec_hashjoin_new_batch (inject fault before switching to a new batch in Hash Join), " \
                  "analyze_subxact_error (inject an error during analyze)," \
                  "opt_task_allocate_string_buffer (inject fault while allocating string buffer), " \
                  "runaway_cleanup (inject fault before starting the cleanup for a runaway query)" \
                  "all (affects all faults injected, used for 'status' and 'reset'), ") 
        addTo.add_option("-c", "--ddl_statement", dest="ddlStatement", type="string",
                         metavar="ddlStatement",
                         help="The DDL statement on which fault should be set and triggered " \
                         "(i.e. create_database, drop_database, create_table, drop_table)")
        addTo.add_option("-D", "--database_name", dest="databaseName", type="string",
                         metavar="databaseName",
                         help="The database name on which fault should be set and triggered.")
        addTo.add_option("-t", "--table_name", dest="tableName", type="string",
                         metavar="tableName",
                         help="The table name on which fault should be set and triggered.")
        addTo.add_option("-o", "--occurrence", dest="numOccurrences", type="int", default=1,
                         metavar="numOccurrences",
                         help="The number of occurrence of the DDL statement with the database name " \
                         "and the table name before fault is triggered.  Defaults to %default. Max is 1000. " \
             "Fault is triggered always if set to '0'. ")
        parser.set_defaults()
        return parser
コード例 #25
0
ファイル: gprestore_filter.py プロジェクト: AnLingm/gpdb
def get_change_schema_name(change_schema_file):
    """
    Only strip the '\n' as it is one of the non-supported chars to be part
    of the schema or table name 
    """
    if not os.path.exists(change_schema_file):
        raise Exception('change schema file path %s does not exist' % change_schema_file)
    change_schema_name = None
    with open(change_schema_file) as fr:
        line = fr.read()
        change_schema_name = line.strip('\n')
    return change_schema_name

if __name__ == "__main__":
    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help', action='store_true')
    parser.add_option('-t', '--tablefile', type='string', default=None)
    parser.add_option('-m', '--master_only', action='store_true')
    parser.add_option('-c', '--change-schema-file', type='string', default=None)
    parser.add_option('-s', '--schema-level-file', type='string', default=None)
    (options, args) = parser.parse_args()
    if not (options.tablefile or options.schema_level_file):
        raise Exception('-t table file name or -s schema level file name must be specified')
    elif options.schema_level_file and options.change_schema_file:
        raise Exception('-s schema level file option can not be specified with -c change schema file option')

    schemas, tables = None, None
    if options.tablefile:
        (schemas, tables) = get_table_schema_set(options.tablefile)
コード例 #26
0
def parseargs():
    parser = OptParser(option_class=OptChecker)

    parser.setHelp(_help)

    parser.remove_option('-h')
    parser.add_option('-h',
                      '-?',
                      '--help',
                      action='help',
                      help='show this help message and exit')

    parser.add_option('--entry', type='string')
    parser.add_option('--value', type='string')
    parser.add_option('--removeonly', action='store_true')
    parser.set_defaults(removeonly=False)

    # Parse the command line arguments
    (options, args) = parser.parse_args()

    # sanity check
    if not options.entry:
        print "--entry is required"
        sys.exit(1)

    if (not options.value) and (not options.removeonly):
        print "Select either --value or --removeonly"
        sys.exit(1)

    return options
コード例 #27
0
def parseargs(args):
    global logger

    pguser = os.environ.get("PGUSER") or unix.getUserName()
    pghost = os.environ.get("PGHOST") or unix.getLocalHostname()
    pgport = os.environ.get("PGPORT") or 5432

    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-?', '--help', '-h', action='store_true', default=False)
    parser.add_option('-n', '--host', default=pghost)
    parser.add_option('-p', '--port', default=pgport)
    parser.add_option('-u', '--username', default=pguser)
    parser.add_option('-w', '--password', default=False, action='store_true')
    parser.add_option('-v', '--verbose', default=False, action='store_true')
    parser.add_option('-q', '--quiet', default=True, action='store_true')

    (options, args) = parser.parse_args()

    if options.help:
        print __doc__
        sys.exit(1)
    try:
        options.port = int(options.port)
    except:
        logger.error("Invalid PORT: '%s'" % options.port)
        sys.exit(1)

    if options.verbose:
        gplog.enable_verbose_logging()
    elif options.quiet:
        gplog.quiet_stdout_logging()

    return options
コード例 #28
0
ファイル: clsRecoverSegment.py プロジェクト: pf-qiu/gpdb
    def createParser():

        description = ("Recover a failed segment")
        help = [""]

        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision$')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, True)

        addTo = OptionGroup(parser, "Connection Options")
        parser.add_option_group(addTo)
        addMasterDirectoryOptionForSingleClusterProgram(addTo)

        addTo = OptionGroup(parser, "Recovery Source Options")
        parser.add_option_group(addTo)
        addTo.add_option("-i", None, type="string",
                         dest="recoveryConfigFile",
                         metavar="<configFile>",
                         help="Recovery configuration file")
        addTo.add_option("-o", None,
                         dest="outputSampleConfigFile",
                         metavar="<configFile>", type="string",
                         help="Sample configuration file name to output; "
                              "this file can be passed to a subsequent call using -i option")

        addTo = OptionGroup(parser, "Recovery Destination Options")
        parser.add_option_group(addTo)
        addTo.add_option("-p", None, type="string",
                         dest="newRecoverHosts",
                         metavar="<targetHosts>",
                         help="Spare new hosts to which to recover segments")

        addTo = OptionGroup(parser, "Recovery Options")
        parser.add_option_group(addTo)
        addTo.add_option('-F', None, default=False, action='store_true',
                         dest="forceFullResynchronization",
                         metavar="<forceFullResynchronization>",
                         help="Force full segment resynchronization")
        addTo.add_option("-B", None, type="int", default=16,
                         dest="parallelDegree",
                         metavar="<parallelDegree>",
                         help="Max # of workers to use for building recovery segments.  [default: %default]")
        addTo.add_option("-r", None, default=False, action='store_true',
                         dest='rebalanceSegments', help='Rebalance synchronized segments.')

        parser.set_defaults()
        return parser
コード例 #29
0
ファイル: initstandby.py プロジェクト: adam8157/gpdb
def create_parser():
    parser = OptParser(option_class=OptChecker,
                       description='update the pg_hba.conf on all segments')

    addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True)
    parser.add_option('-p', '--pg-hba-info', dest='pg_hba_info', metavar='<pg_hba entries>',
                      help='Entries that get added to pg_hba.conf file')
    parser.add_option('-d', '--data-dirs', dest='data_dirs', metavar='<list of data dirs>',
                      help='A list of all data directories present on this host')
    parser.add_option('-b', '--backup', action='store_true',
                      help='Backup the pg_hba.conf file')
    parser.add_option('-r', '--restore', action='store_true',
                      help='Restore the pg_hba.conf file')
    parser.add_option('-D', '--delete', action='store_true',
                      help='Cleanup the pg_hba.conf backup file')

    return parser
コード例 #30
0
ファイル: gpaddconfig.py プロジェクト: PengJi/gpdb-comments
def parseargs():
    parser = OptParser(option_class=OptChecker)

    parser.setHelp(_help)

    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help', action='help', help='show this help message and exit')

    parser.add_option('--entry', type='string')
    parser.add_option('--value', type='string')
    parser.add_option('--removeonly', action='store_true')
    parser.set_defaults(removeonly=False)

    # Parse the command line arguments
    (options, args) = parser.parse_args()

    # sanity check
    if not options.entry:
        print "--entry is required"
        sys.exit(1)

    if (not options.value) and (not options.removeonly):
        print "Select either --value or --removeonly"
        sys.exit(1)

    return options
コード例 #31
0
def parseargs():
    global allocation_rate, memory_split, cpu_split
    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help',  action='store_true')
    parser.add_option('-s', '--seghosts',    type='int')
    parser.add_option('-d', '--segdbs',      type='int')
    parser.add_option('-p', '--primarydirs', type='string')
    parser.add_option('-m', '--mirrordirs',  type='string')
    parser.add_option('-o', '--outfile',     type='string')
    parser.add_option('-i', '--initsystem',  action='store_true')
    parser.add_option('-e', '--expansion',   action='store_true')
    parser.add_option('-a', '--added',       type='int')
    parser.add_option('-C', '--maxcontent',  type='int')
    parser.add_option('-D', '--maxdbid',     type='int')
    (options, args) = parser.parse_args()
    if options.help:
        print """
Script generates the segment placement map for Greenplum initialization.
By default the primary segment directories are /data1/primary and /data2/primary
and the mirror directories are /data1/mirror and /data2/mirror. Output file is
by default located in the working directory and is called gpinitsystem_map_<timestamp>
Usage:
python generate_segment_map.py -s number_of_segment_hosts -d number_of_dbs_per_host
                              [-o outfile]
                              [-p directories_for_primaries -m directories_for_mirrors]
                              [-i | --initsystem]
                              [-e -a number_of_hosts_added --maxcontent max_content
                                    --maxdbid max_dbid]
    -s | --seghosts    - Number of segment hosts in the system
    -d | --segdbs      - Number of segment databases per host
    -o | --outfile     - Output file
    -p | --primarydirs - Colon-separated list of primary segment directories
    -m | --mirrordirs  - Colon-separated list of mirror segment directories
    -i | --initsystem  - Generate map file for system initialization
    -e | --expansion   - Generate map file for system expansion
    -a | --added       - Number of segment hosts added during expansion
    -C | --maxcontent  - Maximal number of content in existing GPDB
    -D | --maxdbid     - Maximal number of dbid in existing GPDB
Examples:
    1. Initialize system with 16 segment servers and 4 segments per host:
    python generate_segment_map.py -s 16 -d 4 -i > gpinitsystem_map
    2. Prepare expansion map to add 8 segment servers to existing system with
        16 segment servers and 4 segment databases per host:
    python generate_segment_map.py -s 16 -d 4 -e -a 8 --maxcontent 100 --maxdbid 100 > gpexpand_map
"""
        sys.exit(0)
    if not options.seghosts or not options.segdbs:
        raise_err('You must specify both number of segment hosts (-s) and number of segment databases on each host (-d)')
    if (options.primarydirs and not options.mirrordirs) or (not options.primarydirs and options.mirrordirs):
        raise_err('You must either specify both folders for primaries and mirrors or use defaults for both')
    if (not options.initsystem and not options.expansion) or (options.initsystem and options.expansion):
        raise_err('You should either specify init system mode ( -i ) or expansion mode ( -e )')
    if options.expansion and not options.added:
        raise_err('In expansion mode you must specify number of segment servers added')
    return options
コード例 #32
0
def parseargs():
    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help', action='store_true')
    parser.add_option('-d', '--database',   type='string')
    parser.add_option('-u', '--username',   type='string')
    parser.add_option('-p', '--password',   type='string')
    parser.add_option('-l', '--logfile',    type='string')
    parser.add_option('-n', '--nrows',      type='int')
    
    (options, args) = parser.parse_args()
    if options.help:
        print """
Script executes baseline performance test on the database
Usage:
python performance_baseline.py -d database_name
                               [-u username -p password]
                               [-l logfile]
                               [-n number_of_rows]
    -d | --database   - name of the database to run the test
    -u | --username   - name of the user to be used for testing (default is $PGUSER)
    -p | --password   - password of the user used for testing   (default is $PGPASSWORD)
    -l | --logfile    - performance test output file (default is stdout)
    -n | --nrows      - number of rows generated in test table (default is 5000)
"""
        sys.exit(0)
    if not options.nrows:
        options.nrows = 5000
    if options.nrows < 5000:
        raise_err('Number of rows should be 5000 or more')
    if not options.database:
        raise_err('You must specify database name (-d)')
    if (options.password and not options.username) or (not options.password and options.username):
        raise_err('You should either specify both username and password or not specify them both')
    return options
コード例 #33
0
ファイル: gprepairmirrorseg.py プロジェクト: yanchaomars/gpdb
def parseargs():
    parser = OptParser(option_class=OptChecker,
                       description=' '.join(DESCRIPTION.split()),
                       version='%prog version $Revision: #12 $')
    parser.setHelp(_help)
    parser.set_usage('%prog ' + _usage)
    parser.remove_option('-h')

    parser.add_option(
        '-f',
        '--file',
        default='',
        help='the name of a file containing the re-sync file list.')
    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      help='debug output.',
                      default=False)
    parser.add_option('-h',
                      '-?',
                      '--help',
                      action='help',
                      help='show this help message and exit.',
                      default=False)
    parser.add_option('--usage', action="briefhelp")
    parser.add_option(
        '-d',
        '--master_data_directory',
        type='string',
        dest="masterDataDirectory",
        metavar="<master data directory>",
        help=
        "Optional. The master host data directory. If not specified, the value set for $MASTER_DATA_DIRECTORY will be used.",
        default=get_masterdatadir())
    parser.add_option('-a',
                      help='don\'t ask to confirm repairs',
                      dest='confirm',
                      default=True,
                      action='store_false')
    """
     Parse the command line arguments
    """
    (options, args) = parser.parse_args()

    if len(args) > 0:
        logger.error('Unknown argument %s' % args[0])
        parser.exit()

    return options, args
コード例 #34
0
ファイル: clsInjectFault.py プロジェクト: zwt1024/gpdb
    def createParser():
        description = ("""
        This utility is NOT SUPPORTED and is for internal-use only.

        Used to inject faults into the file replication code.
        """)

        help = [
            """

        Return codes:
          0 - Fault injected
          non-zero: Error or invalid options
        """
        ]

        parser = OptParser(option_class=OptChecker,
                           description='  '.join(description.split()),
                           version='%prog version $Revision$')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, False)

        # these options are used to determine the target segments
        addTo = OptionGroup(parser, 'Target Segment Options: ')
        parser.add_option_group(addTo)
        addTo.add_option(
            '-r',
            '--role',
            dest="targetRole",
            type='string',
            metavar="<role>",
            help=
            "Role of segments to target: primary, mirror, or primary_mirror")
        addTo.add_option(
            "-s",
            "--seg_dbid",
            dest="targetDbId",
            type="string",
            metavar="<dbid>",
            help="The segment  dbid on which fault should be set and triggered."
        )
        addTo.add_option(
            "-H",
            "--host",
            dest="targetHost",
            type="string",
            metavar="<host>",
            help=
            "The hostname on which fault should be set and triggered; pass ALL to target all hosts"
        )

        addTo = OptionGroup(parser, 'Master Connection Options')
        parser.add_option_group(addTo)

        addMasterDirectoryOptionForSingleClusterProgram(addTo)
        addTo.add_option("-p", "--master_port", dest="masterPort",  type="int", default=None,
                         metavar="<masterPort>",
                         help="DEPRECATED, use MASTER_DATA_DIRECTORY environment variable or -d option.  " \
                         "The port number of the master database on localhost, " \
                         "used to fetch the segment configuration.")

        addTo = OptionGroup(parser, 'Client Polling Options: ')
        parser.add_option_group(addTo)
        addTo.add_option('-m', '--mode', dest="syncMode", type='string', default="async",
                         metavar="<syncMode>",
                         help="Synchronization mode : sync (client waits for fault to occur)" \
                         " or async (client only sets fault request on server)")

        # these options are used to build the message for the segments
        addTo = OptionGroup(parser, 'Fault Options: ')
        parser.add_option_group(addTo)
        # NB: This list needs to be kept in sync with:
        # - FaultInjectorTypeEnumToString
        # - FaultInjectorType_e
        addTo.add_option('-y','--type', dest="type", type='string', metavar="<type>",
                         help="fault type: sleep (insert sleep), fault (report fault to postmaster and fts prober), " \
         "fatal (inject FATAL error), panic (inject PANIC error), error (inject ERROR), " \
         "infinite_loop, data_curruption (corrupt data in memory and persistent media), " \
         "suspend (suspend execution), resume (resume execution that was suspended), " \
         "skip (inject skip i.e. skip checkpoint), " \
         "memory_full (all memory is consumed when injected), " \
         "reset (remove fault injection), status (report fault injection status), " \
         "segv (inject a SEGV), " \
         "interrupt (inject an Interrupt), " \
         "finish_pending (set QueryFinishPending to true), " \
         "checkpoint_and_panic (inject a panic following checkpoint) ")
        addTo.add_option("-z", "--sleep_time_s", dest="sleepTimeSeconds", type="int", default="10" ,
                            metavar="<sleepTime>",
                            help="For 'sleep' faults, the amount of time for the sleep.  Defaults to %default." \
     "Min Max Range is [0, 7200 sec] ")
        addTo.add_option(
            '-f',
            '--fault_name',
            dest="faultName",
            type='string',
            metavar="<name>",
            help=
            "See src/include/utils/faultinjector_lists.h for list of fault names"
        )
        addTo.add_option("-c", "--ddl_statement", dest="ddlStatement", type="string",
                         metavar="ddlStatement",
                         help="The DDL statement on which fault should be set and triggered " \
                         "(i.e. create_database, drop_database, create_table, drop_table)")
        addTo.add_option(
            "-D",
            "--database_name",
            dest="databaseName",
            type="string",
            metavar="databaseName",
            help="The database name on which fault should be set and triggered."
        )
        addTo.add_option(
            "-t",
            "--table_name",
            dest="tableName",
            type="string",
            metavar="tableName",
            help="The table name on which fault should be set and triggered.")
        addTo.add_option("-o", "--occurrence", dest="numOccurrences", type="int", default=1,
                         metavar="numOccurrences",
                         help="The number of occurrence of the DDL statement with the database name " \
                         "and the table name before fault is triggered.  Defaults to %default. Max is 1000. " \
    "Fault is triggered always if set to '0'. ")
        parser.set_defaults()
        return parser
コード例 #35
0
def get_schema_level_restore_list(schema_level_restore_file=None):
    """
    Note: white space in schema and table name is supported now, don't do strip on them
    """
    if not os.path.exists(schema_level_restore_file):
        raise Exception("schema level restore file path %s does not exist" % schema_level_restore_file)
    schema_level_restore_list = []
    with open(schema_level_restore_file) as fr:
        schema_entries = fr.read()
        schema_level_restore_list = schema_entries.splitlines()
    return schema_level_restore_list


if __name__ == "__main__":
    parser = OptParser(option_class=OptChecker)
    parser.remove_option("-h")
    parser.add_option("-h", "-?", "--help", action="store_true")
    parser.add_option("-t", "--tablefile", type="string", default=None)
    parser.add_option("-c", "--change-schema-file", type="string", default=None)
    parser.add_option("-s", "--schema-level-file", type="string", default=None)
    (options, args) = parser.parse_args()
    if not (options.tablefile or options.schema_level_file):
        raise Exception("-t table file name or -s schema level file name must be specified")
    elif options.schema_level_file and options.change_schema_file:
        raise Exception("-s schema level file option can not be specified with -c change schema file option")

    schemas, tables = None, None
    if options.tablefile:
        (schemas, tables) = get_table_schema_set(options.tablefile)
コード例 #36
0
ファイル: gpsegstop.py プロジェクト: zyclove/gpdb
    def createParser():
        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision: #12 $')
        parser.setHelp([])

        addStandardLoggingAndHelpOptions(parser,
                                         includeNonInteractiveOption=False)

        parser.add_option("-D",
                          "--db",
                          dest="dblist",
                          action="append",
                          type="string")
        parser.add_option("-V",
                          "--gp-version",
                          dest="gpversion",
                          metavar="GP_VERSION",
                          help="expected software version")
        parser.add_option(
            "-m",
            "--mode",
            dest="mode",
            metavar="<MODE>",
            help="how to shutdown. modes are smart,fast, or immediate")
        parser.add_option("-t",
                          "--timeout",
                          dest="timeout",
                          type="int",
                          default=SEGMENT_STOP_TIMEOUT_DEFAULT,
                          help="seconds to wait")
        return parser
コード例 #37
0
def parseargs():
    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help', action='store_true')
    parser.add_option('-d', '--dbname', type='string')
    parser.add_option('-p', '--password', type='string')
    parser.add_option('-n', '--nthreads', type='int')
    parser.add_option('-u', '--user', type='string')
    parser.add_option('-l', '--location', type='string')
    parser.add_option('-m', '--stat_mem', type='string')
    parser.add_option('-f', '--filename', type='string')
    (options, args) = parser.parse_args()
    if options.help:
        print """Script performs serial restore of the backup files in case
of the cluster itopology change.
Usage:
./parallel_analyze.py -n thread_number -d dbname [-p gpadmin_password]
Parameters:
    thread_number    - number of parallel threads to run
    dbname           - name of the database
    gpadmin_password - password of the gpadmin user"""
        sys.exit(0)
    if not options.dbname:
        logger.error(
            'Failed to start utility. Please, specify database name with "-d" key'
        )
        sys.exit(1)
    if not options.nthreads:
        logger.error(
            'Failed to start utility. Please, specify number of threads with "-n" key'
        )
        sys.exit(1)
    if not options.stat_mem:
        logger.error(
            'Failed to start utility. Please, specify statement_mem parameter  with "-m" key'
        )
        sys.exit(1)
    if not options.filename:
        logger.error(
            'Failed to start utility. Please, specify filename parameter (e.g. initial, compare)  with "-f" key'
        )
        sys.exit(1)
    if not options.location:
        logger.error(
            'Failed to start utility. Please, specify result folder parameter  with "-l" key'
        )
        sys.exit(1)
    return options
コード例 #38
0
def parseargs():
    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help', action='store_true')
    parser.add_option('-d', '--dbname',   type='string')
    parser.add_option('-p', '--password', type='string')
    parser.add_option('-n', '--nthreads', type='int')
    parser.add_option('-u', '--user', type='string')
    parser.add_option('-l', '--location', type='string')
    parser.add_option('-m', '--stat_mem', type='string')
    parser.add_option('-f', '--filename', type='string')	
    (options, args) = parser.parse_args()
    if options.help:
        print """Script performs serial restore of the backup files in case
of the cluster itopology change.
Usage:
./parallel_analyze.py -n thread_number -d dbname [-p gpadmin_password]
Parameters:
    thread_number    - number of parallel threads to run
    dbname           - name of the database
    gpadmin_password - password of the gpadmin user"""
        sys.exit(0)
    if not options.dbname:
        logger.error('Failed to start utility. Please, specify database name with "-d" key')
        sys.exit(1)
    if not options.nthreads:
        logger.error('Failed to start utility. Please, specify number of threads with "-n" key')
        sys.exit(1)
    if not options.stat_mem:
        logger.error('Failed to start utility. Please, specify statement_mem parameter  with "-m" key')
        sys.exit(1)
    if not options.filename:
        logger.error('Failed to start utility. Please, specify filename parameter (e.g. initial, compare)  with "-f" key')
        sys.exit(1)
    if not options.location:
        logger.error('Failed to start utility. Please, specify result folder parameter  with "-l" key')
        sys.exit(1)  
    return options
コード例 #39
0
ファイル: recovery_base.py プロジェクト: petersky/gpdb
    def parseargs(self):
        parser = OptParser(option_class=OptChecker,
                           description=' '.join(self.description.split()),
                           version='%prog version $Revision: $')
        parser.set_usage(
            '%prog is a utility script used by gprecoverseg, and gpaddmirrors and is not intended to be run separately.'
        )
        parser.remove_option('-h')

        #TODO we may not need the verbose flag
        parser.add_option('-v',
                          '--verbose',
                          action='store_true',
                          help='debug output.',
                          default=False)
        parser.add_option('-c', '--confinfo', type='string')
        parser.add_option('-b',
                          '--batch-size',
                          type='int',
                          default=DEFAULT_SEGHOST_NUM_WORKERS,
                          metavar='<batch_size>')
        parser.add_option('-f',
                          '--force-overwrite',
                          dest='forceoverwrite',
                          action='store_true',
                          default=False)
        parser.add_option('-l',
                          '--log-dir',
                          dest="logfileDirectory",
                          type="string")
        parser.add_option(
            '',
            '--era',
            dest="era",
            help="coordinator era",
        )

        # Parse the command line arguments
        self.options, _ = parser.parse_args()

        if not self.options.confinfo:
            raise Exception('Missing --confinfo argument.')
        if not self.options.logfileDirectory:
            raise Exception('Missing --log-dir argument.')

        self.logger = gplog.setup_tool_logging(
            os.path.split(self.file_name)[-1],
            unix.getLocalHostname(),
            unix.getUserName(),
            logdir=self.options.logfileDirectory)

        if self.options.batch_size <= 0:
            self.logger.warn('batch_size was less than zero.  Setting to 1.')
            self.options.batch_size = 1

        if self.options.verbose:
            gplog.enable_verbose_logging()

        self.seg_recovery_info_list = recoveryinfo.deserialize_list(
            self.options.confinfo)
        if len(self.seg_recovery_info_list) == 0:
            raise Exception(
                'No segment configuration values found in --confinfo argument')
コード例 #40
0
ファイル: runquery.py プロジェクト: macroyuyang/card
        retCode = exitStatus >> 8

        if retCode:
            return 1

    return 0


###### main()
if __name__ == '__main__':

    gphome = os.environ.get('GPHOME')
    if not gphome:
        sys.exit(2)

    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help', action='store_true')
    parser.add_option('-s', '--spaceusage', action='store_true')
    parser.add_option('-u', '--user', type='string')
    parser.add_option('-d', '--dblist',  type='string')

    (options, args) = parser.parse_args()

    if options.help:
        print __doc__
        sys.exit(1)

    cmd=""
    # Get defaults from healthmon config
    if options.spaceusage:
コード例 #41
0
def parseargs():
    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help', action='store_true')
    parser.add_option('-d', '--dbname', type='string')
    parser.add_option('-p', '--password', type='string')
    parser.add_option('-n', '--nthreads', type='int')
    (options, args) = parser.parse_args()
    if options.help or (not options.dbname and not options.filename):
        print """Script performs parallel analyze of all tables
Usage:
./parallel_analyze.py -n thread_number -d dbname [-p gpadmin_password]
Parameters:
    thread_number    - number of parallel threads to run
    dbname           - name of the database
    gpadmin_password - password of the gpadmin user"""
        sys.exit(0)
    if not options.dbname:
        logger.error(
            'Failed to start utility. Please, specify database name with "-d" key'
        )
        sys.exit(1)
    if not options.nthreads:
        logger.error(
            'Failed to start utility. Please, specify number of threads with "-n" key'
        )
        sys.exit(1)
    return options
コード例 #42
0
    def createParser():

        description = ("Recover a failed segment")
        help = [""]

        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision$')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, True)

        addTo = OptionGroup(parser, "Connection Options")
        parser.add_option_group(addTo)
        addMasterDirectoryOptionForSingleClusterProgram(addTo)

        addTo = OptionGroup(parser, "Recovery Source Options")
        parser.add_option_group(addTo)
        addTo.add_option("-i",
                         None,
                         type="string",
                         dest="recoveryConfigFile",
                         metavar="<configFile>",
                         help="Recovery configuration file")
        addTo.add_option(
            "-o",
            None,
            dest="outputSampleConfigFile",
            metavar="<configFile>",
            type="string",
            help="Sample configuration file name to output; "
            "this file can be passed to a subsequent call using -i option")

        addTo = OptionGroup(parser, "Recovery Destination Options")
        parser.add_option_group(addTo)
        addTo.add_option("-p",
                         None,
                         type="string",
                         dest="newRecoverHosts",
                         metavar="<targetHosts>",
                         help="Spare new hosts to which to recover segments")

        addTo = OptionGroup(parser, "Recovery Options")
        parser.add_option_group(addTo)
        addTo.add_option('-F',
                         None,
                         default=False,
                         action='store_true',
                         dest="forceFullResynchronization",
                         metavar="<forceFullResynchronization>",
                         help="Force full segment resynchronization")
        addTo.add_option(
            "-B",
            None,
            type="int",
            default=16,
            dest="parallelDegree",
            metavar="<parallelDegree>",
            help=
            "Max # of workers to use for building recovery segments.  [default: %default]"
        )
        addTo.add_option("-r",
                         None,
                         default=False,
                         action='store_true',
                         dest='rebalanceSegments',
                         help='Rebalance synchronized segments.')

        parser.set_defaults()
        return parser
コード例 #43
0
    def createParser():
        """
        Creates the command line options parser object for gpverify.
        """

        description = ("Initiates primary/mirror verification.")
        help = []

        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision: #1 $')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, True)

        addTo = OptionGroup(parser, "Request Type")
        parser.add_option_group(addTo)
        addTo.add_option('--full', dest='full', action='store_true',
                         help='Perform a full verification pass.  Use --token option to ' \
                         'give the verification pass an identifier.')
        addTo.add_option('--file', dest='verify_file', metavar='<file>',
                         help='Based on file type, perform either a physical or logical verification of <file>.  ' \
                         'Use --token option to give the verification request an identifier.')
        addTo.add_option('--directorytree', dest='verify_dir',
                         metavar='<verify_dir>',
                         help='Perform a full verification pass on the specified directory.  ' \
                         'Use --token option to assign the verification pass an identifier.' )

        addTo = OptionGroup(parser, "Request Options")
        parser.add_option_group(addTo)
        addTo.add_option('--token', dest='token', metavar='<token>',
                         help='A token to use for the request.  ' \
                         'This identifier will be used in the logs and can be used to identify ' \
                         'a verification pass to the --abort, --suspend, --resume and --results ' \
                         'options.')

        addTo.add_option(
            '-c',
            '--content',
            dest='content',
            metavar='<content_id>',
            help=
            'Send verification request only to the primary segment with the given <content_id>.'
        )
        addTo.add_option('--abort', dest='abort', action='store_true',
                         help='Abort a verification request that is in progress.  ' \
                         'Can use --token option to abort a specific verification request.')
        addTo.add_option('--suspend', dest='suspend', action='store_true',
                         help='Suspend a verification request that is in progress.' \
                         'Can use --token option to suspend a specific verification request.')
        addTo.add_option('--resume', dest='resume', action='store_true',
                         help='Resume a suspended verification request.  Can use the ' \
                         '--token option to resume a specific verification request.')
        addTo.add_option('--fileignore', dest='ignore_file', metavar='<ignore_file>',
                         help='Ignore any filenames matching <ignore_file>.  Multiple ' \
                         'files can be specified using a comma separated list.')
        addTo.add_option('--dirignore', dest='ignore_dir', metavar='<ignore_dir>',
                         help='Ignore any directories matching <ignore_dir>.  Multiple ' \
                         'directories can be specified using a comma separated list.')

        addTo = OptionGroup(parser, "Reporting Options")
        parser.add_option_group(addTo)
        addTo.add_option('--results', dest='results', action='store_true',
                         help='Display verification results.  Can use' \
                         'the --token option to view results of a specific verification request.')
        addTo.add_option(
            '--resultslevel',
            dest='results_level',
            action='store',
            metavar='<detail_level>',
            type=int,
            help=
            'Level of detail to show for results. Valid levels are from 1 to 10.'
        )
        addTo.add_option(
            '--clean',
            dest='clean',
            action='store_true',
            help=
            'Clean up verification artifacts and the gp_verification_history table.'
        )

        addTo = OptionGroup(parser, "Misc. Options")
        parser.add_option_group(addTo)
        addTo.add_option(
            '-B',
            '--parallel',
            action='store',
            default=64,
            type=int,
            help='Number of worker threads used to send verification requests.'
        )

        parser.set_defaults()
        return parser
コード例 #44
0
ファイル: clsInjectFault.py プロジェクト: 50wu/gpdb
    def createParser():
        description = ("""
        This utility is NOT SUPPORTED and is for internal-use only.

        Used to inject faults into the file replication code.
        """)

        help = ["""

        Return codes:
          0 - Fault injected
          non-zero: Error or invalid options
        """]

        parser = OptParser(option_class=OptChecker,
                    description='  '.join(description.split()),
                    version='%prog version $Revision$')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, False)

        # these options are used to determine the target segments
        addTo = OptionGroup(parser, 'Target Segment Options: ')
        parser.add_option_group(addTo)
        addTo.add_option('-r', '--role', dest="targetRole", type='string', metavar="<role>",
                         help="Role of segments to target: primary, mirror, or primary_mirror")
        addTo.add_option("-s", "--seg_dbid", dest="targetDbId", type="string", metavar="<dbid>",
                         help="The segment  dbid on which fault should be set and triggered.")
        addTo.add_option("-H", "--host", dest="targetHost", type="string", metavar="<host>",
                         help="The hostname on which fault should be set and triggered; pass ALL to target all hosts")

        addTo = OptionGroup(parser, 'Master Connection Options')
        parser.add_option_group(addTo)

        addMasterDirectoryOptionForSingleClusterProgram(addTo)
        addTo.add_option("-p", "--master_port", dest="masterPort",  type="int", default=None,
                         metavar="<masterPort>",
                         help="DEPRECATED, use MASTER_DATA_DIRECTORY environment variable or -d option.  " \
                         "The port number of the master database on localhost, " \
                         "used to fetch the segment configuration.")

        addTo = OptionGroup(parser, 'Client Polling Options: ')
        parser.add_option_group(addTo)
        addTo.add_option('-m', '--mode', dest="syncMode", type='string', default="async",
                         metavar="<syncMode>",
                         help="Synchronization mode : sync (client waits for fault to occur)" \
                         " or async (client only sets fault request on server)")

        # these options are used to build the message for the segments
        addTo = OptionGroup(parser, 'Fault Options: ')
        parser.add_option_group(addTo)
        # NB: This list needs to be kept in sync with:
        # - FaultInjectorTypeEnumToString
        # - FaultInjectorType_e
        addTo.add_option('-y','--type', dest="type", type='string', metavar="<type>",
                         help="fault type: sleep (insert sleep), fault (report fault to postmaster and fts prober), " \
			      "fatal (inject FATAL error), panic (inject PANIC error), error (inject ERROR), " \
			      "infinite_loop, data_curruption (corrupt data in memory and persistent media), " \
			      "suspend (suspend execution), resume (resume execution that was suspended), " \
			      "skip (inject skip i.e. skip checkpoint), " \
			      "memory_full (all memory is consumed when injected), " \
			      "reset (remove fault injection), status (report fault injection status), " \
			      "segv (inject a SEGV), " \
			      "interrupt (inject an Interrupt), " \
			      "finish_pending (set QueryFinishPending to true), " \
			      "checkpoint_and_panic (inject a panic following checkpoint) ")
        addTo.add_option("-z", "--sleep_time_s", dest="sleepTimeSeconds", type="int", default="10" ,
                            metavar="<sleepTime>",
                            help="For 'sleep' faults, the amount of time for the sleep.  Defaults to %default." \
				 "Min Max Range is [0, 7200 sec] ")
        addTo.add_option('-f','--fault_name', dest="faultName", type='string', metavar="<name>",
                         help="fault name: " \
			      "postmaster (inject fault when new connection is accepted in postmaster), " \
			      "pg_control (inject fault when global/pg_control file is written), " \
			      "pg_xlog (inject fault when files in pg_xlog directory are written), " \
			      "start_prepare (inject fault during start prepare transaction), " \
			      "filerep_consumer (inject fault before data are processed, i.e. if mirror " \
			      "then before file operation is issued to file system, if primary " \
			      "then before mirror file operation is acknowledged to backend processes), " \
			      "filerep_consumer_verificaton (inject fault before ack verification data are processed on primary), " \
			      "filerep_change_tracking_compacting (inject fault when compacting change tracking log files), " \
			      "filerep_sender (inject fault before data are sent to network), " \
			      "filerep_receiver (inject fault after data are received from network), " \
			      "filerep_flush (inject fault before fsync is issued to file system), " \
			      "filerep_resync (inject fault while in resync when first relation is ready to be resynchronized), " \
			      "filerep_resync_in_progress (inject fault while resync is in progress), " \
			      "filerep_resync_worker (inject fault after write to mirror), " \
			      "filerep_resync_worker_read (inject fault before read required for resync), " \
			      "filerep_transition_to_resync (inject fault during transition to InResync before mirror re-create), " \
			      "filerep_transition_to_resync_mark_recreate (inject fault during transition to InResync before marking re-created), " \
			      "filerep_transition_to_resync_mark_completed (inject fault during transition to InResync before marking completed), " \
			      "filerep_transition_to_sync_begin (inject fault before transition to InSync begin), " \
			      "filerep_transition_to_sync (inject fault during transition to InSync), " \
			      "filerep_transition_to_sync_before_checkpoint (inject fault during transition to InSync before checkpoint is created), " \
			      "filerep_transition_to_sync_mark_completed (inject fault during transition to InSync before marking completed), " \
			      "filerep_transition_to_change_tracking (inject fault during transition to InChangeTracking), " \
                              "fileRep_is_operation_completed (inject fault in FileRep Is Operation completed function just for ResyncWorker Threads), "\
                              "filerep_immediate_shutdown_request (inject fault just before sending the shutdown SIGQUIT to child processes), "\
			      "checkpoint (inject fault before checkpoint is taken), " \
			      "change_tracking_compacting_report (report if compacting is in progress), " \
			      "change_tracking_disable (inject fault before fsync to Change Tracking log files), " \
			      "transaction_abort_after_distributed_prepared (abort prepared transaction), " \
			      "transaction_commit_pass1_from_create_pending_to_created, " \
			      "transaction_commit_pass1_from_drop_in_memory_to_drop_pending, " \
			      "transaction_commit_pass1_from_aborting_create_needed_to_aborting_create, " \
			      "transaction_abort_pass1_from_create_pending_to_aborting_create, " \
			      "transaction_abort_pass1_from_aborting_create_needed_to_aborting_create, " \
			      "transaction_commit_pass2_from_drop_in_memory_to_drop_pending, " \
			      "transaction_commit_pass2_from_aborting_create_needed_to_aborting_create, " \
			      "transaction_abort_pass2_from_create_pending_to_aborting_create, " \
			      "transaction_abort_pass2_from_aborting_create_needed_to_aborting_create, " \
			      "finish_prepared_transaction_commit_pass1_from_create_pending_to_created, " \
			      "finish_prepared_transaction_commit_pass2_from_create_pending_to_created, " \
			      "finish_prepared_transaction_abort_pass1_from_create_pending_to_aborting_create, " \
			      "finish_prepared_transaction_abort_pass2_from_create_pending_to_aborting_create, " \
			      "finish_prepared_transaction_commit_pass1_from_drop_in_memory_to_drop_pending, " \
			      "finish_prepared_transaction_commit_pass2_from_drop_in_memory_to_drop_pending, " \
			      "finish_prepared_transaction_commit_pass1_aborting_create_needed, " \
			      "finish_prepared_transaction_commit_pass2_aborting_create_needed, " \
			      "finish_prepared_transaction_abort_pass1_aborting_create_needed, " \
			      "finish_prepared_transaction_abort_pass2_aborting_create_needed, " \
			      "twophase_transaction_commit_prepared (inject fault before transaction commit is inserted in xlog), " \
			      "twophase_transaction_abort_prepared (inject fault before transaction abort is inserted in xlog), " \
			      "dtm_broadcast_prepare (inject fault after prepare broadcast), " \
			      "dtm_broadcast_commit_prepared (inject fault after commit broadcast), " \
			      "dtm_broadcast_abort_prepared (inject fault after abort broadcast), " \
			      "dtm_xlog_distributed_commit (inject fault after distributed commit was inserted in xlog), " \
			      "fault_before_pending_delete_relation_entry (inject fault before putting pending delete relation entry, " \
			      "fault_before_pending_delete_database_entry (inject fault before putting pending delete database entry, " \
			      "fault_before_pending_delete_tablespace_entry (inject fault before putting pending delete tablespace entry, " \
			      "fault_before_pending_delete_filespace_entry (inject fault before putting pending delete filespace entry, " \
			      "dtm_init (inject fault before initializing dtm), " \
                              "end_prepare_two_phase_sleep (inject sleep after two phase file creation), " \
                              "segment_transition_request (inject fault after segment receives state transition request), " \
                              "segment_probe_response (inject fault after segment is probed by FTS), " \
			      "local_tm_record_transaction_commit (inject fault for local transactions after transaction commit is recorded and flushed in xlog ), " \
			      "malloc_failure (inject fault to simulate memory allocation failure), " \
			      "transaction_abort_failure (inject fault to simulate transaction abort failure), " \
			      "workfile_creation_failure (inject fault to simulate workfile creation failure), " \
			      "workfile_write_failure (inject fault to simulate workfile write failure), " \
                  "workfile_hashjoin_failure (inject fault before we close workfile in ExecHashJoinNewBatch), "\
			      "update_committed_eof_in_persistent_table (inject fault before committed EOF is updated in gp_persistent_relation_node for Append Only segment files), " \
			      "exec_simple_query_end_command (inject fault before EndCommand in exec_simple_query), " \
			      "multi_exec_hash_large_vmem (allocate large vmem using palloc inside MultiExecHash to attempt to exceed vmem limit), " \
			      "execsort_before_sorting (inject fault in nodeSort after receiving all tuples and before sorting), " \
			      "execsort_mksort_mergeruns (inject fault in MKSort during the mergeruns phase), " \
			      "execshare_input_next (inject fault after shared input scan retrieved a tuple), " \
			      "base_backup_post_create_checkpoint (inject fault after requesting checkpoint as part of basebackup), " \
			      "compaction_before_segmentfile_drop (inject fault after compaction, but before the drop of the segment file), "  \
			      "compaction_before_cleanup_phase (inject fault after compaction and drop, but before the cleanup phase), " \
			      "appendonly_insert (inject fault before an append-only insert), " \
			      "appendonly_delete (inject fault before an append-only delete), " \
			      "appendonly_update (inject fault before an append-only update), " \
			      "reindex_db (inject fault while reindex db is in progress), "\
			      "reindex_relation (inject fault while reindex relation is in progress), "\
			      "fault_during_exec_dynamic_table_scan (inject fault during scanning of a partition), " \
			      "fault_in_background_writer_main (inject fault in BackgroundWriterMain), " \
			      "cdb_copy_start_after_dispatch (inject fault in cdbCopyStart after dispatch), " \
			      "repair_frag_end (inject fault at the end of repair_frag), " \
			      "vacuum_full_before_truncate (inject fault before truncate in vacuum full), " \
			      "vacuum_full_after_truncate (inject fault after truncate in vacuum full), " \
			      "vacuum_relation_end_of_first_round (inject fault at the end of first round of vacuumRelation loop), " \
			      "rebuild_pt_db (inject fault while rebuilding persistent tables (for each db)), " \
			      "procarray_add (inject fault while adding PGPROC to procarray), " \
			      "exec_hashjoin_new_batch (inject fault before switching to a new batch in Hash Join), " \
			      "fts_wait_for_shutdown (pause FTS before committing changes), " \
			      "runaway_cleanup (inject fault before starting the cleanup for a runaway query), " \
                  "opt_task_allocate_string_buffer (inject fault while allocating string buffer), " \
                  "opt_relcache_translator_catalog_access (inject fault while translating relcache entries), " \
			      "interconnect_stop_ack_is_lost (inject fault in interconnect to skip sending the stop ack), " \
                  "send_qe_details_init_backend (inject fault before sending QE details during backend initialization), " \
                  "process_startup_packet (inject fault when processing startup packet during backend initialization), " \
                  "quickdie (inject fault when auxiliary processes quitting), " \
                  "after_one_slice_dispatched (inject fault after one slice was dispatched when dispatching plan), " \
                  "cursor_qe_reader_after_snapshot (inject fault after QE READER has populated snashot for cursor)" \
			      "fsync_counter (inject fault to count buffers fsync'ed by checkpoint process), " \
			      "bg_buffer_sync_default_logic (inject fault to 'skip' in order to flush all buffers in BgBufferSync()), " \
                  "finish_prepared_after_record_commit_prepared (inject fault in FinishPreparedTransaction() after recording the commit prepared record), " \
			      "all (affects all faults injected, used for 'status' and 'reset'), ") 
        addTo.add_option("-c", "--ddl_statement", dest="ddlStatement", type="string",
                         metavar="ddlStatement",
                         help="The DDL statement on which fault should be set and triggered " \
                         "(i.e. create_database, drop_database, create_table, drop_table)")
        addTo.add_option("-D", "--database_name", dest="databaseName", type="string",
                         metavar="databaseName",
                         help="The database name on which fault should be set and triggered.")
        addTo.add_option("-t", "--table_name", dest="tableName", type="string",
                         metavar="tableName",
                         help="The table name on which fault should be set and triggered.")
        addTo.add_option("-o", "--occurrence", dest="numOccurrences", type="int", default=1,
                         metavar="numOccurrences",
                         help="The number of occurrence of the DDL statement with the database name " \
                         "and the table name before fault is triggered.  Defaults to %default. Max is 1000. " \
			 "Fault is triggered always if set to '0'. ")
        parser.set_defaults()
        return parser
コード例 #45
0
ファイル: gpsegtoprimaryormirror.py プロジェクト: 50wu/gpdb
    def createParser():
        """
        Constructs and returns an option parser.

        Called by simple_main()
        """
        parser = OptParser(option_class=OptChecker,
                    description=' '.join(description.split()),
                    version='%prog version $Revision: #1 $')
        parser.setHelp([])

        addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=False)

        parser.add_option("-C", "--collation", type="string",
                            help="values for lc_collate, lc_monetary, lc_numeric separated by :")
        parser.add_option("-D","--datadir",dest="dblist", action="append", type="string")
        parser.add_option("-p","--pickledTransitionData",dest="pickledTransitionData", type="string")
        parser.add_option("-M","--mirroringMode",dest="mirroringMode", type="string")
        parser.add_option("-V", "--gp-version", dest="gpversion",metavar="GP_VERSION",
                        help="expected software version")

        parser.set_defaults(verbose=False, filters=[], slice=(None, None))

        return parser
コード例 #46
0
def create_parser():
    parser = OptParser(option_class=OptChecker,
                       version='%prog version $Revision: #1 $',
                       description='Persistent tables backp and restore')

    addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True)

    addTo = OptionGroup(parser, 'Connection opts')
    parser.add_option_group(addTo)
    addMasterDirectoryOptionForSingleClusterProgram(addTo)

    addTo = OptionGroup(parser, 'Persistent tables backup and restore options')
    addTo.add_option(
        '--backup',
        metavar="<pickled dbid info>",
        type="string",
        help=
        "A list of dbid info where backups need to be done in pickled format")
    addTo.add_option(
        '--restore',
        metavar="<pickled dbid info>",
        type="string",
        help=
        "A list of dbid info where restore needs to be done in pickled format")
    addTo.add_option(
        '--validate-backups',
        metavar="<pickled dbid info>",
        type="string",
        help=
        "A list of dbid info where validation needs to be done in pickled format"
    )
    addTo.add_option(
        '--validate-backup-dir',
        metavar="<pickled dbid info>",
        type="string",
        help=
        "A list of dbid info where validation needs to be done in pickled format"
    )
    addTo.add_option(
        '--timestamp',
        metavar="<timestamp of backup>",
        type="string",
        help="A timestamp for the backup that needs to be validated")
    addTo.add_option('--batch-size',
                     metavar="<batch size for the worker pool>",
                     type="int",
                     help="Batch size for parallelism in worker pool")
    addTo.add_option(
        '--backup-dir',
        metavar="<backup directory>",
        type="string",
        help="Backup directory for persistent tables and transaction logs")
    addTo.add_option('--perdbpt',
                     metavar="<per database pt filename>",
                     type="string",
                     help="Filenames for per database persistent files")
    addTo.add_option('--globalpt',
                     metavar="<global pt filenames>",
                     type="string",
                     help="Filenames for global persistent files")
    addTo.add_option(
        '--validate-source-file-only',
        action='store_true',
        default=False,
        help=
        "validate that required source files existed for backup and restore")

    parser.setHelp(
        ["""
    This tool is used to backup persistent table files.
    """])

    return parser
コード例 #47
0
ファイル: gpsegstop.py プロジェクト: AnLingm/gpdb
    def createParser():
        parser = OptParser(option_class=OptChecker,
                    description=' '.join(description.split()),
                    version='%prog version $Revision: #12 $')
        parser.setHelp([])

        addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=False)

        parser.add_option("-D","--db",dest="dblist", action="append", type="string")
        parser.add_option("-V", "--gp-version", dest="gpversion",metavar="GP_VERSION",
                          help="expected software version")
        parser.add_option("-m", "--mode", dest="mode",metavar="<MODE>",
                          help="how to shutdown. modes are smart,fast, or immediate")
        parser.add_option("-t", "--timeout", dest="timeout", type="int", default=SEGMENT_STOP_TIMEOUT_DEFAULT,
                          help="seconds to wait")
        return parser
コード例 #48
0
            parts = t.split('.')
            if len(parts) != 2:
                raise Exception("Bad table in filter list")
            schema = parts[0].strip()
            table = parts[1].strip()
            dump_tables.add((schema, table))
            dump_schemas.add(schema)

    return (dump_schemas, dump_tables)


def extract_schema(line):
    temp = line[len_search_path_expr:]
    idx = temp.find(",")
    if idx == -1:
        return None
    schema = temp[:idx]
    return schema.strip('"')


if __name__ == "__main__":
    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help', action='store_true')
    parser.add_option('-t', '--tablefile', type='string', default=None)
    (options, args) = parser.parse_args()
    if not options.tablefile:
        raise Exception('-t table file name has to be specified')
    (schemas, tables) = get_table_schema_set(options.tablefile)
    process_schema(schemas, tables, sys.stdin, sys.stdout)
コード例 #49
0
    """
    Only strip the '\n' as it is one of the non-supported chars to be part
    of the schema or table name 
    """
    if not os.path.exists(change_schema_file):
        raise Exception('change schema file path %s does not exist' %
                        change_schema_file)
    change_schema_name = None
    with open(change_schema_file) as fr:
        line = fr.read()
        change_schema_name = line.strip('\n')
    return change_schema_name


if __name__ == "__main__":
    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help', action='store_true')
    parser.add_option('-t', '--tablefile', type='string', default=None)
    parser.add_option('-m', '--master_only', action='store_true')
    parser.add_option('-c',
                      '--change-schema-file',
                      type='string',
                      default=None)
    parser.add_option('-s', '--schema-level-file', type='string', default=None)
    (options, args) = parser.parse_args()
    if not (options.tablefile or options.schema_level_file):
        raise Exception(
            '-t table file name or -s schema level file name must be specified'
        )
    elif options.schema_level_file and options.change_schema_file:
コード例 #50
0
ファイル: gpsegstart.py プロジェクト: adam8157/gpdb
    def createParser():
        """
        Create parser expected by simple_main
        """

        parser = OptParser(option_class=OptChecker,
                           description=' '.join(DESCRIPTION.split()),
                           version='%prog version main build dev')
        parser.setHelp(HELP)

        #
        # Note that this mirroringmode parameter should only be either mirrorless or quiescent.
        #   If quiescent then it is implied that there is pickled transition data that will be
        #   provided (using -p) to immediately convert to a primary or a mirror.
        #
        addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=False)

        parser.add_option("-D", "--dblist", dest="dblist", action="append", type="string")
        parser.add_option("-M", "--mirroringmode", dest="mirroringMode", type="string")
        parser.add_option("-p", "--pickledTransitionData", dest="pickledTransitionData", type="string")
        parser.add_option("-V", "--gp-version", dest="gpversion", metavar="GP_VERSION", help="expected software version")
        parser.add_option("-n", "--numsegments", dest="num_cids", help="number of distinct content ids in cluster")
        parser.add_option("", "--era", dest="era", help="master era")
        parser.add_option("-t", "--timeout", dest="timeout", type="int", default=gp.SEGMENT_TIMEOUT_DEFAULT,
                          help="seconds to wait")
        parser.add_option('-U', '--specialMode', type='choice', choices=['upgrade', 'maintenance'],
                           metavar='upgrade|maintenance', action='store', default=None,
                           help='start the instance in upgrade or maintenance mode')
        parser.add_option('', '--wrapper', dest="wrapper", default=None, type='string')
        parser.add_option('', '--wrapper-args', dest="wrapper_args", default=None, type='string')
        parser.add_option('', '--master-checksum-version', dest="master_checksum_version", default=None, type='string', action="store")
        parser.add_option('-B', '--parallel', type="int", dest="parallel", default=gp.DEFAULT_GPSTART_NUM_WORKERS, help='maximum size of a threadpool to start segments')

        return parser
コード例 #51
0
ファイル: clsRecoverSegment.py プロジェクト: petersky/gpdb
    def createParser():

        description = ("Recover a failed segment")
        help = [""]

        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision$')
        parser.setHelp(help)

        loggingGroup = addStandardLoggingAndHelpOptions(parser, True)
        loggingGroup.add_option("-s", None, default=None, action='store_false',
                                dest='showProgressInplace',
                                help='Show pg_basebackup/pg_rewind progress sequentially instead of inplace')
        loggingGroup.add_option("--no-progress",
                                dest="showProgress", default=True, action="store_false",
                                help="Suppress pg_basebackup/pg_rewind progress output")

        addTo = OptionGroup(parser, "Connection Options")
        parser.add_option_group(addTo)
        addCoordinatorDirectoryOptionForSingleClusterProgram(addTo)

        addTo = OptionGroup(parser, "Recovery Source Options")
        parser.add_option_group(addTo)
        addTo.add_option("-i", None, type="string",
                         dest="recoveryConfigFile",
                         metavar="<configFile>",
                         help="Recovery configuration file")
        addTo.add_option("-o", None,
                         dest="outputSampleConfigFile",
                         metavar="<configFile>", type="string",
                         help="Sample configuration file name to output; "
                              "this file can be passed to a subsequent call using -i option")

        addTo = OptionGroup(parser, "Recovery Destination Options")
        parser.add_option_group(addTo)
        addTo.add_option("-p", None, type="string",
                         dest="newRecoverHosts",
                         metavar="<targetHosts>",
                         help="Spare new hosts to which to recover segments")

        addTo = OptionGroup(parser, "Recovery Options")
        parser.add_option_group(addTo)
        addTo.add_option('-F', None, default=False, action='store_true',
                         dest="forceFullResynchronization",
                         metavar="<forceFullResynchronization>",
                         help="Force full segment resynchronization")
        addTo.add_option("-B", None, type="int", default=gp.DEFAULT_COORDINATOR_NUM_WORKERS,
                         dest="parallelDegree",
                         metavar="<parallelDegree>",
                         help="Max number of hosts to operate on in parallel. Valid values are: 1-%d"
                              % gp.MAX_COORDINATOR_NUM_WORKERS)
        addTo.add_option("-b", None, type="int", default=gp.DEFAULT_SEGHOST_NUM_WORKERS,
                         dest="parallelPerHost",
                         metavar="<parallelPerHost>",
                         help="Max number of segments per host to operate on in parallel. Valid values are: 1-%d"
                              % gp.MAX_SEGHOST_NUM_WORKERS)

        addTo.add_option("-r", None, default=False, action='store_true',
                         dest='rebalanceSegments', help='Rebalance synchronized segments.')
        addTo.add_option('', '--hba-hostnames', action='store_true', dest='hba_hostnames',
                         help='use hostnames instead of CIDR in pg_hba.conf')

        parser.set_defaults()
        return parser
コード例 #52
0
def parseargs():
    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help',          action='store_true')
    parser.add_option('-d', '--dbname',              type='string')
    parser.add_option('-u', '--user',                type='string')
    parser.add_option('-p', '--password',            type='string')
    parser.add_option('-n', '--nthreads',            type='int')
    parser.add_option('-s', '--stat_mem',            type='string')
    parser.add_option('-f', '--tablefile',           type='string')
    parser.add_option('-t', '--distkeyfile',         type='string')
    parser.add_option('-m', '--metadatatablesuffix', type='string')
    (options, args) = parser.parse_args()
    if options.help:
        print """Script performs analysis of table row number and number of
unique values of table distribution key
Usage:
./data_consistency_check.py -d dbname [-n thread_number] [-u user_name] [-p password]
                                      [-s statement_mem] [-f tablefile] [-t distkeyfile]
                                      [-m metadatatablesuffix]
Parameters:
    -d | --dbname    - name of the database to process
    -n | --nthreads  - number of parallel threads to run
    -u | --user      - user to connect to the database
    -p | --password  - password to connect to the database
    -s | --statement_mem    - the value of statement_mem to use
    -f | --tablefile        - file with the list of tables to process
    -t | --distkeyfile      - file with the tables which should be analyzed with
                              counting distinct values of distribution key
    -m | --metadatatablesuffix
                            - suffix for the table to store script metadata in
Metadata objects created are:
    public.__zz_pivotal_{suffix}   - view with the final information on row counts
    public.__zz_pivotal_{suffix}_l - list of tables to process
    public.__zz_pivotal_{suffix}_p - current progress of table row count calculation
After the run has finished for the second time, join two metadata tables by
the "tablename" field like this:
select  m1.tablename as table_first,
        m2.tablename as table_second,
        m1.rowcount  as rows_before,
        m2.rowcount  as rows_after,
        m1.distkeycount as dist_keys_before,
        m2.distkeycount as dist_keys_after
    from {metadatatable1} as m1
        full outer join {metadatatable2} as m2
        on m1.tablename = m2.tablename
    where m1.tablename is null
        or m2.tablename is null
        or m1.rowcount is distinct from m2.rowcount
        or m1.distkeycount is distinct from m2.distkeycount
"""
        sys.exit(0)
    if not options.dbname:
        logger.error('Failed to start utility. Please, specify database name with "-d" key')
        sys.exit(1)
    if not options.nthreads:
        logger.info('Number of threads is not specified. Using 1 by default')
        options.nthreads = 1
    if not options.stat_mem:
        logger.info('Statement memory is not specified. Using 125MB by default')
        options.stat_mem = '125MB'
    if not options.metadatatablesuffix:
        logger.info('Metadata table suffix is not specified. Using "table_list" by default')
        options.metadatatablesuffix = 'table_list'
    else:
        if not re.match('^[0-9a-z_]*$', options.metadatatablesuffix):
            logger.error ('Metadata suffix must contain only lowercase letters, numbers 0-9 and underscore sign')
            sys.exit(1)
    if not options.tablefile:
        logger.info('No tablefile specified. Will process all the tables in database by default')
    if not options.distkeyfile:
        logger.info('No distribution key table file specified. Will omit distribution key analysis')
    return options
コード例 #53
0
    def createParser():

        description = ("Add mirrors to a system")
        help = [""]

        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision$')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, True)

        addTo = OptionGroup(parser, "Connection Options")
        parser.add_option_group(addTo)
        addCoordinatorDirectoryOptionForSingleClusterProgram(addTo)

        addTo = OptionGroup(parser, "Mirroring Options")
        parser.add_option_group(addTo)
        addTo.add_option("-i",
                         None,
                         type="string",
                         dest="mirrorConfigFile",
                         metavar="<configFile>",
                         help="Mirroring configuration file")

        addTo.add_option(
            "-o",
            None,
            dest="outputSampleConfigFile",
            metavar="<configFile>",
            type="string",
            help="Sample configuration file name to output; "
            "this file can be passed to a subsequent call using -i option")

        addTo.add_option("-m",
                         None,
                         type="string",
                         dest="mirrorDataDirConfigFile",
                         metavar="<dataDirConfigFile>",
                         help="Mirroring data directory configuration file")

        addTo.add_option(
            '-s',
            default=False,
            action='store_true',
            dest="spreadMirroring",
            help="use spread mirroring for placing mirrors on hosts")

        addTo.add_option(
            "-p",
            None,
            type="int",
            default=1000,
            dest="mirrorOffset",
            metavar="<mirrorOffset>",
            help=
            "Mirror port offset.  The mirror port offset will be used multiple times "
            "to derive three sets of ports [default: %default]")

        addTo.add_option(
            "-B",
            "--batch-size",
            type="int",
            default=gp.DEFAULT_COORDINATOR_NUM_WORKERS,
            dest="batch_size",
            metavar="<batch_size>",
            help=
            'Max number of hosts to operate on in parallel. Valid values are 1-%d'
            % gp.MAX_COORDINATOR_NUM_WORKERS)
        addTo.add_option(
            "-b",
            "--segment-batch-size",
            type="int",
            default=gp.DEFAULT_SEGHOST_NUM_WORKERS,
            dest="segment_batch_size",
            metavar="<segment_batch_size>",
            help=
            'Max number of segments per host to operate on in parallel. Valid values are: 1-%d'
            % gp.MAX_SEGHOST_NUM_WORKERS)

        addTo.add_option('',
                         '--hba-hostnames',
                         action='store_true',
                         dest='hba_hostnames',
                         help='use hostnames instead of CIDR in pg_hba.conf')

        parser.set_defaults()
        return parser
コード例 #54
0
    def createParser():
        description = ("""
        This utility is NOT SUPPORTED and is for internal-use only.

        Used to inject faults into the file replication code.
        """)

        help = [
            """

        Return codes:
          0 - Fault injected
          non-zero: Error or invalid options
        """
        ]

        parser = OptParser(option_class=OptChecker,
                           description='  '.join(description.split()),
                           version='%prog version $Revision$')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, False)

        # these options are used to determine the target segments
        addTo = OptionGroup(parser, 'Target Segment Options: ')
        parser.add_option_group(addTo)
        addTo.add_option(
            '-r',
            '--role',
            dest="targetRole",
            type='string',
            metavar="<role>",
            help=
            "Role of segments to target: primary, mirror, or primary_mirror")
        addTo.add_option(
            "-s",
            "--seg_dbid",
            dest="targetDbId",
            type="string",
            metavar="<dbid>",
            help="The segment  dbid on which fault should be set and triggered."
        )
        addTo.add_option(
            "-H",
            "--host",
            dest="targetHost",
            type="string",
            metavar="<host>",
            help=
            "The hostname on which fault should be set and triggered; pass ALL to target all hosts"
        )

        addTo = OptionGroup(parser, 'Master Connection Options')
        parser.add_option_group(addTo)

        addMasterDirectoryOptionForSingleClusterProgram(addTo)
        addTo.add_option("-p", "--master_port", dest="masterPort",  type="int", default=None,
                         metavar="<masterPort>",
                         help="DEPRECATED, use MASTER_DATA_DIRECTORY environment variable or -d option.  " \
                         "The port number of the master database on localhost, " \
                         "used to fetch the segment configuration.")

        addTo = OptionGroup(parser, 'Client Polling Options: ')
        parser.add_option_group(addTo)
        addTo.add_option('-m', '--mode', dest="syncMode", type='string', default="async",
                         metavar="<syncMode>",
                         help="Synchronization mode : sync (client waits for fault to occur)" \
                         " or async (client only sets fault request on server)")

        # these options are used to build the message for the segments
        addTo = OptionGroup(parser, 'Fault Options: ')
        parser.add_option_group(addTo)
        # NB: This list needs to be kept in sync with:
        # - FaultInjectorTypeEnumToString
        # - FaultInjectorType_e
        addTo.add_option('-y','--type', dest="type", type='string', metavar="<type>",
                         help="fault type: sleep (insert sleep), fault (report fault to postmaster and fts prober), " \
         "fatal (inject FATAL error), panic (inject PANIC error), error (inject ERROR), " \
         "infinite_loop, data_curruption (corrupt data in memory and persistent media), " \
         "suspend (suspend execution), resume (resume execution that was suspended), " \
         "skip (inject skip i.e. skip checkpoint), " \
         "memory_full (all memory is consumed when injected), " \
         "reset (remove fault injection), status (report fault injection status), " \
         "segv (inject a SEGV), " \
         "interrupt (inject an Interrupt), " \
         "finish_pending (set QueryFinishPending to true), " \
         "checkpoint_and_panic (inject a panic following checkpoint) ")
        addTo.add_option("-z", "--sleep_time_s", dest="sleepTimeSeconds", type="int", default="10" ,
                            metavar="<sleepTime>",
                            help="For 'sleep' faults, the amount of time for the sleep.  Defaults to %default." \
     "Min Max Range is [0, 7200 sec] ")
        addTo.add_option('-f','--fault_name', dest="faultName", type='string', metavar="<name>",
                         help="fault name: " \
         "postmaster (inject fault when new connection is accepted in postmaster), " \
         "pg_control (inject fault when global/pg_control file is written), " \
         "pg_xlog (inject fault when files in pg_xlog directory are written), " \
         "start_prepare (inject fault during start prepare transaction), " \
         "filerep_consumer (inject fault before data are processed, i.e. if mirror " \
         "then before file operation is issued to file system, if primary " \
         "then before mirror file operation is acknowledged to backend processes), " \
         "filerep_consumer_verificaton (inject fault before ack verification data are processed on primary), " \
         "filerep_change_tracking_compacting (inject fault when compacting change tracking log files), " \
         "filerep_sender (inject fault before data are sent to network), " \
         "filerep_receiver (inject fault after data are received from network), " \
         "filerep_flush (inject fault before fsync is issued to file system), " \
         "filerep_resync (inject fault while in resync when first relation is ready to be resynchronized), " \
         "filerep_resync_in_progress (inject fault while resync is in progress), " \
         "filerep_resync_worker (inject fault after write to mirror), " \
         "filerep_resync_worker_read (inject fault before read required for resync), " \
         "filerep_transition_to_resync (inject fault during transition to InResync before mirror re-create), " \
         "filerep_transition_to_resync_mark_recreate (inject fault during transition to InResync before marking re-created), " \
         "filerep_transition_to_resync_mark_completed (inject fault during transition to InResync before marking completed), " \
         "filerep_transition_to_sync_begin (inject fault before transition to InSync begin), " \
         "filerep_transition_to_sync (inject fault during transition to InSync), " \
         "filerep_transition_to_sync_before_checkpoint (inject fault during transition to InSync before checkpoint is created), " \
         "filerep_transition_to_sync_mark_completed (inject fault during transition to InSync before marking completed), " \
         "filerep_transition_to_change_tracking (inject fault during transition to InChangeTracking), " \
                              "fileRep_is_operation_completed (inject fault in FileRep Is Operation completed function just for ResyncWorker Threads), "\
                              "filerep_immediate_shutdown_request (inject fault just before sending the shutdown SIGQUIT to child processes), "\
         "checkpoint (inject fault before checkpoint is taken), " \
         "change_tracking_compacting_report (report if compacting is in progress), " \
         "change_tracking_disable (inject fault before fsync to Change Tracking log files), " \
         "transaction_abort_after_distributed_prepared (abort prepared transaction), " \
         "transaction_commit_pass1_from_create_pending_to_created, " \
         "transaction_commit_pass1_from_drop_in_memory_to_drop_pending, " \
         "transaction_commit_pass1_from_aborting_create_needed_to_aborting_create, " \
         "transaction_abort_pass1_from_create_pending_to_aborting_create, " \
         "transaction_abort_pass1_from_aborting_create_needed_to_aborting_create, " \
         "transaction_commit_pass2_from_drop_in_memory_to_drop_pending, " \
         "transaction_commit_pass2_from_aborting_create_needed_to_aborting_create, " \
         "transaction_abort_pass2_from_create_pending_to_aborting_create, " \
         "transaction_abort_pass2_from_aborting_create_needed_to_aborting_create, " \
         "finish_prepared_transaction_commit_pass1_from_create_pending_to_created, " \
         "finish_prepared_transaction_commit_pass2_from_create_pending_to_created, " \
         "finish_prepared_transaction_abort_pass1_from_create_pending_to_aborting_create, " \
         "finish_prepared_transaction_abort_pass2_from_create_pending_to_aborting_create, " \
         "finish_prepared_transaction_commit_pass1_from_drop_in_memory_to_drop_pending, " \
         "finish_prepared_transaction_commit_pass2_from_drop_in_memory_to_drop_pending, " \
         "finish_prepared_transaction_commit_pass1_aborting_create_needed, " \
         "finish_prepared_transaction_commit_pass2_aborting_create_needed, " \
         "finish_prepared_transaction_abort_pass1_aborting_create_needed, " \
         "finish_prepared_transaction_abort_pass2_aborting_create_needed, " \
         "twophase_transaction_commit_prepared (inject fault before transaction commit is inserted in xlog), " \
         "twophase_transaction_abort_prepared (inject fault before transaction abort is inserted in xlog), " \
         "dtm_broadcast_prepare (inject fault after prepare broadcast), " \
         "dtm_broadcast_commit_prepared (inject fault after commit broadcast), " \
         "dtm_broadcast_abort_prepared (inject fault after abort broadcast), " \
         "dtm_xlog_distributed_commit (inject fault after distributed commit was inserted in xlog), " \
         "fault_before_pending_delete_relation_entry (inject fault before putting pending delete relation entry, " \
         "fault_before_pending_delete_database_entry (inject fault before putting pending delete database entry, " \
         "fault_before_pending_delete_tablespace_entry (inject fault before putting pending delete tablespace entry, " \
         "fault_before_pending_delete_filespace_entry (inject fault before putting pending delete filespace entry, " \
         "dtm_init (inject fault before initializing dtm), " \
                              "end_prepare_two_phase_sleep (inject sleep after two phase file creation), " \
                              "segment_transition_request (inject fault after segment receives state transition request), " \
                              "segment_probe_response (inject fault after segment is probed by FTS), " \
         "local_tm_record_transaction_commit (inject fault for local transactions after transaction commit is recorded and flushed in xlog ), " \
         "malloc_failure (inject fault to simulate memory allocation failure), " \
         "transaction_abort_failure (inject fault to simulate transaction abort failure), " \
         "workfile_creation_failure (inject fault to simulate workfile creation failure), " \
         "update_committed_eof_in_persistent_table (inject fault before committed EOF is updated in gp_persistent_relation_node for Append Only segment files), " \
         "exec_simple_query_end_command (inject fault before EndCommand in exec_simple_query), " \
         "multi_exec_hash_large_vmem (allocate large vmem using palloc inside MultiExecHash to attempt to exceed vmem limit), " \
         "execsort_before_sorting (inject fault in nodeSort after receiving all tuples and before sorting), " \
         "execsort_mksort_mergeruns (inject fault in MKSort during the mergeruns phase), " \
         "execshare_input_next (inject fault after shared input scan retrieved a tuple), " \
         "base_backup_post_create_checkpoint (inject fault after requesting checkpoint as part of basebackup), " \
         "compaction_before_segmentfile_drop (inject fault after compaction, but before the drop of the segment file), "  \
         "compaction_before_cleanup_phase (inject fault after compaction and drop, but before the cleanup phase), " \
         "appendonly_insert (inject fault before an append-only insert), " \
         "appendonly_delete (inject fault before an append-only delete), " \
         "appendonly_update (inject fault before an append-only update), " \
         "reindex_db (inject fault while reindex db is in progress), "\
         "reindex_relation (inject fault while reindex relation is in progress), "\
         "fault_during_exec_dynamic_table_scan (inject fault during scanning of a partition), " \
         "fault_in_background_writer_main (inject fault in BackgroundWriterMain), " \
         "cdb_copy_start_after_dispatch (inject fault in cdbCopyStart after dispatch), " \
         "repair_frag_end (inject fault at the end of repair_frag), " \
         "vacuum_full_before_truncate (inject fault before truncate in vacuum full), " \
         "vacuum_full_after_truncate (inject fault after truncate in vacuum full), " \
         "vacuum_relation_end_of_first_round (inject fault at the end of first round of vacuumRelation loop), " \
         "rebuild_pt_db (inject fault while rebuilding persistent tables (for each db)), " \
         "procarray_add (inject fault while adding PGPROC to procarray), " \
         "exec_hashjoin_new_batch (inject fault before switching to a new batch in Hash Join), " \
         "fts_wait_for_shutdown (pause FTS before committing changes), " \
         "runaway_cleanup (inject fault before starting the cleanup for a runaway query), " \
                  "opt_task_allocate_string_buffer (inject fault while allocating string buffer), " \
                  "opt_relcache_translator_catalog_access (inject fault while translating relcache entries), " \
                  "send_qe_details_init_backend (inject fault before sending QE details during backend initialization)" \
         "all (affects all faults injected, used for 'status' and 'reset'), ")
        addTo.add_option("-c", "--ddl_statement", dest="ddlStatement", type="string",
                         metavar="ddlStatement",
                         help="The DDL statement on which fault should be set and triggered " \
                         "(i.e. create_database, drop_database, create_table, drop_table)")
        addTo.add_option(
            "-D",
            "--database_name",
            dest="databaseName",
            type="string",
            metavar="databaseName",
            help="The database name on which fault should be set and triggered."
        )
        addTo.add_option(
            "-t",
            "--table_name",
            dest="tableName",
            type="string",
            metavar="tableName",
            help="The table name on which fault should be set and triggered.")
        addTo.add_option("-o", "--occurrence", dest="numOccurrences", type="int", default=1,
                         metavar="numOccurrences",
                         help="The number of occurrence of the DDL statement with the database name " \
                         "and the table name before fault is triggered.  Defaults to %default. Max is 1000. " \
    "Fault is triggered always if set to '0'. ")
        parser.set_defaults()
        return parser
コード例 #55
0
def parse_command_line():
    parser = OptParser(option_class=OptChecker,
                description=' '.join(_description.split()))
    parser.setHelp(_help)
    parser.set_usage('%prog ' + _usage)
    parser.remove_option('-h')
    
    parser.add_option('--start', action='store_true',
                        help='Start the Greenplum Performance Monitor web server.')
    parser.add_option('--stop', action='store_true',
                      help='Stop the Greenplum Performance Monitor web server.')
    parser.add_option('--restart', action='store_true',
                      help='Restart the Greenplum Performance Monitor web server.')                        
    parser.add_option('--status', action='store_true',
                      help='Display the status of the Gerrnplum Performance Monitor web server.')
    parser.add_option('--setup', action='store_true',
                      help='Setup the Greenplum Performance Monitor web server.')
    parser.add_option('--version', action='store_true',
                       help='Display version information')
    parser.add_option('--upgrade', action='store_true',
                      help='Upgrade a previous installation of the Greenplum Performance Monitors web UI')
        
    parser.set_defaults(verbose=False,filters=[], slice=(None, None))
    
    # Parse the command line arguments
    (options, args) = parser.parse_args()

    if options.version:
        version()
        sys.exit(0)
    
    # check for too many options
    opt_count = 0
    if options.start:
        opt_count+=1
    if options.stop:
        opt_count+=1
    if options.setup:
        opt_count+=1
    if options.upgrade:
        opt_count+=1
    if options.status:
        opt_count+=1

    if opt_count > 1:
        parser.print_help()
        parser.exit()
    
    return options, args
コード例 #56
0
    def createParser():
        """
        Constructs and returns an option parser.

        Called by simple_main()
        """
        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision: #1 $')
        parser.setHelp([])

        addStandardLoggingAndHelpOptions(parser,
                                         includeNonInteractiveOption=False)

        parser.add_option(
            "-C",
            "--collation",
            type="string",
            help="values for lc_collate, lc_monetary, lc_numeric separated by :"
        )
        parser.add_option("-D",
                          "--datadir",
                          dest="dblist",
                          action="append",
                          type="string")
        parser.add_option("-p",
                          "--pickledTransitionData",
                          dest="pickledTransitionData",
                          type="string")
        parser.add_option("-M",
                          "--mirroringMode",
                          dest="mirroringMode",
                          type="string")
        parser.add_option("-V",
                          "--gp-version",
                          dest="gpversion",
                          metavar="GP_VERSION",
                          help="expected software version")

        parser.set_defaults(verbose=False, filters=[], slice=(None, None))

        return parser
コード例 #57
0
def parseargs():
    parser = OptParser(option_class=OptChecker)

    parser.setHelp(_help)

    parser.remove_option('-h')
    parser.add_option('-h',
                      '-?',
                      '--help',
                      action='help',
                      help='show this help message and exit')

    parser.add_option('--file',
                      type='string',
                      help='Required: The absolute path of postgresql.conf')
    parser.add_option(
        '--add-parameter',
        type='string',
        help='The configuration parameter to add. --value is required.')
    parser.add_option(
        '--value',
        type='string',
        help='The configuration value to add when using --add-parameter.')
    parser.add_option('--get-parameter',
                      type='string',
                      help='The configuration parameter value to return.')
    parser.add_option('--remove-parameter',
                      type='string',
                      help='The configuration parameter value to disable.')

    (options, args) = parser.parse_args()
    return validate_args(options)
コード例 #58
0
ファイル: recovery_base.py プロジェクト: lisakowen/gpdb
    def parseargs(self):
        parser = OptParser(option_class=OptChecker,
                           description=' '.join(self.description.split()),
                           version='%prog version $Revision: $')
        parser.set_usage(
            '%prog is a utility script used by gprecoverseg, and gpaddmirrors and is not intended to be run separately.'
        )
        parser.remove_option('-h')

        parser.add_option('-v',
                          '--verbose',
                          action='store_true',
                          help='debug output.',
                          default=False)
        parser.add_option('-c', '--confinfo', type='string')
        parser.add_option('-b',
                          '--batch-size',
                          type='int',
                          default=DEFAULT_SEGHOST_NUM_WORKERS,
                          metavar='<batch_size>')
        parser.add_option('-f',
                          '--force-overwrite',
                          dest='forceoverwrite',
                          action='store_true',
                          default=False)
        parser.add_option('-l',
                          '--log-dir',
                          dest="logfileDirectory",
                          type="string")

        # Parse the command line arguments
        options, _ = parser.parse_args()
        return options
コード例 #59
0
def parseargs():
    parser = OptParser(option_class=OptChecker
                       , description=' '.join(DESCRIPTION.split())
                       , version='%prog version $Revision: #12 $'
                       )
    parser.setHelp(_help)
    parser.set_usage('%prog ' + _usage)
    parser.remove_option('-h')

    parser.add_option('-f', '--file', default='',
                      help='the name of a file containing the re-sync file list.')
    parser.add_option('-v', '--verbose', action='store_true',
                      help='debug output.', default=False)
    parser.add_option('-h', '-?', '--help', action='help',
                      help='show this help message and exit.', default=False)
    parser.add_option('--usage', action="briefhelp")
    parser.add_option('-d', '--master_data_directory', type='string',
                      dest="masterDataDirectory",
                      metavar="<master data directory>",
                      help="Optional. The master host data directory. If not specified, the value set for $MASTER_DATA_DIRECTORY will be used.",
                      default=get_masterdatadir()
                      )
    parser.add_option('-a', help='don\'t ask to confirm repairs',
                      dest='confirm', default=True, action='store_false')

    """
     Parse the command line arguments
    """
    (options, args) = parser.parse_args()

    if len(args) > 0:
        logger.error('Unknown argument %s' % args[0])
        parser.exit()

    return options, args