Ejemplo n.º 1
0
def parseargs():
    parser = OptParser(option_class=OptChecker)

    parser.setHelp(_help)

    parser.remove_option('-h')
    parser.add_option('-h',
                      '-?',
                      '--help',
                      action='help',
                      help='show this help message and exit')

    parser.add_option('--file',
                      type='string',
                      help='Required: The absolute path of postgresql.conf')
    parser.add_option(
        '--add-parameter',
        type='string',
        help='The configuration parameter to add. --value is required.')
    parser.add_option(
        '--value',
        type='string',
        help='The configuration value to add when using --add-parameter.')
    parser.add_option('--get-parameter',
                      type='string',
                      help='The configuration parameter value to return.')
    parser.add_option('--remove-parameter',
                      type='string',
                      help='The configuration parameter value to disable.')

    (options, args) = parser.parse_args()
    return validate_args(options)
Ejemplo n.º 2
0
def parseargs(args):
    global logger

    pguser = os.environ.get("PGUSER") or unix.getUserName()
    pghost = os.environ.get("PGHOST") or unix.getLocalHostname()
    pgport = os.environ.get("PGPORT") or 5432

    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-?', '--help', '-h', action='store_true', default=False)
    parser.add_option('-n', '--host', default=pghost)
    parser.add_option('-p', '--port', default=pgport)
    parser.add_option('-u', '--username', default=pguser)
    parser.add_option('-w', '--password', default=False, action='store_true')
    parser.add_option('-v', '--verbose', default=False, action='store_true')
    parser.add_option('-q', '--quiet', default=True, action='store_true')

    (options, args) = parser.parse_args()

    if options.help:
        print __doc__
        sys.exit(1)
    try:
        options.port = int(options.port)
    except:
        logger.error("Invalid PORT: '%s'" % options.port)
        sys.exit(1)

    if options.verbose:
        gplog.enable_verbose_logging()
    elif options.quiet:
        gplog.quiet_stdout_logging()

    return options
Ejemplo n.º 3
0
Archivo: kill.py Proyecto: zsmj513/gpdb
    def create_parser():
        """Create the command line parser object for gpkill"""

        help = []
        parser = OptParser(
            option_class=OptChecker,
            description='Check or Terminate a Greenplum Database process.',
            version='%prog version $Revision: #1 $')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, True)

        parser.remove_option('-l')
        parser.remove_option('-a')

        addTo = OptionGroup(parser, 'Check Options')
        parser.add_option_group(addTo)
        addTo.add_option(
            '--check',
            metavar='pid',
            help=
            'Only returns status 0 if pid may be killed without gpkill, status 1 otherwise.',
            action='store_true')

        return parser
Ejemplo n.º 4
0
    def createParser():
        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision: #12 $')
        parser.setHelp([])

        addStandardLoggingAndHelpOptions(parser,
                                         includeNonInteractiveOption=False)

        parser.add_option("-D",
                          "--db",
                          dest="dblist",
                          action="append",
                          type="string")
        parser.add_option("-V",
                          "--gp-version",
                          dest="gpversion",
                          metavar="GP_VERSION",
                          help="expected software version")
        parser.add_option(
            "-m",
            "--mode",
            dest="mode",
            metavar="<MODE>",
            help="how to shutdown. modes are smart,fast, or immediate")
        parser.add_option("-t",
                          "--timeout",
                          dest="timeout",
                          type="int",
                          default=SEGMENT_STOP_TIMEOUT_DEFAULT,
                          help="seconds to wait")
        return parser
Ejemplo n.º 5
0
    def createParser():
        """
        Create parser expected by simple_main
        """

        parser = OptParser(option_class=OptChecker,
                           description=' '.join(DESCRIPTION.split()),
                           version='%prog version main build dev')
        parser.setHelp(HELP)

        #
        # Note that this mirroringmode parameter should only be either mirrorless or quiescent.
        #   If quiescent then it is implied that there is pickled transition data that will be
        #   provided (using -p) to immediately convert to a primary or a mirror.
        #
        addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=False)

        parser.add_option("-C", "--collation", type="string",
                            help="values for lc_collate, lc_monetary, lc_numeric separated by :")
        parser.add_option("-D", "--dblist", dest="dblist", action="append", type="string")
        parser.add_option("-M", "--mirroringmode", dest="mirroringMode", type="string")
        parser.add_option("-p", "--pickledTransitionData", dest="pickledTransitionData", type="string")
        parser.add_option("-V", "--gp-version", dest="gpversion", metavar="GP_VERSION", help="expected software version")
        parser.add_option("-n", "--numsegments", dest="num_cids", help="number of distinct content ids in cluster")
        parser.add_option("", "--era", dest="era", help="master era")
        parser.add_option("-t", "--timeout", dest="timeout", type="int", default=gp.SEGMENT_TIMEOUT_DEFAULT,
                          help="seconds to wait")
        parser.add_option('-U', '--specialMode', type='choice', choices=['upgrade', 'maintenance'],
                           metavar='upgrade|maintenance', action='store', default=None,
                           help='start the instance in upgrade or maintenance mode')
        parser.add_option('', '--wrapper', dest="wrapper", default=None, type='string')
        parser.add_option('', '--wrapper-args', dest="wrapper_args", default=None, type='string')
        
        return parser
Ejemplo n.º 6
0
    def create_parser():
        parser = OptParser(option_class=OptChecker,
            description="Greenplum Package Manager",
            version='%prog version $Revision: #1 $')
        parser.setHelp([])

        addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True)

        parser.remove_option('-q')
        parser.remove_option('-l')

        add_to = OptionGroup(parser, 'General Options')
        parser.add_option_group(add_to)

        addMasterDirectoryOptionForSingleClusterProgram(add_to)

        # TODO: AK: Eventually, these options may need to be flexible enough to accept mutiple packages
        # in one invocation. If so, the structure of this parser may need to change.
        add_to.add_option('-i', '--install', help='install the given gppkg', metavar='<package>')
        add_to.add_option('-u', '--update', help='update the given gppkg', metavar='<package>')
        add_to.add_option('-r', '--remove', help='remove the given gppkg', metavar='<name>-<version>')
        add_to.add_option('-q', '--query', help='query the gppkg database or a particular gppkg', action='store_true')
        add_to.add_option('-b', '--build', help='build a gppkg', metavar='<directory>')
        add_to.add_option('-c', '--clean', help='clean the cluster of the given gppkg', action='store_true')
        add_to.add_option('--migrate', help='migrate gppkgs from a separate $GPHOME', metavar='<from_gphome> <to_gphome>', action='store_true', default=False)
        add_to.add_option('-f', '--filename', help='set specific package name', metavar='<name>')

        add_to = OptionGroup(parser, 'Query Options')
        parser.add_option_group(add_to)
        add_to.add_option('--info', action='store_true', help='print information about the gppkg including name, version, description')
        add_to.add_option('--list', action='store_true', help='print all the files present in the gppkg')
        add_to.add_option('--all', action='store_true', help='print all the gppkgs installed by gppkg')

        return parser
    def createParser():
        parser = OptParser(
            option_class=OptChecker,
            description="Gets status from segments on a single host "
            "using a transition message.  Internal-use only.",
            version='%prog version $Revision: #1 $')
        parser.setHelp([])

        addStandardLoggingAndHelpOptions(parser, True)

        addTo = parser
        addTo.add_option("-s",
                         None,
                         type="string",
                         dest="statusQueryRequests",
                         metavar="<statusQueryRequests>",
                         help="Status Query Message")
        addTo.add_option("-D",
                         "--dblist",
                         type="string",
                         action="append",
                         dest="dirList",
                         metavar="<dirList>",
                         help="Directory List")

        parser.set_defaults()
        return parser
Ejemplo n.º 8
0
def create_parser():
    parser = OptParser(option_class=OptChecker,
                       description='update the pg_hba.conf on all segments')

    addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True)
    parser.add_option('-p',
                      '--pg-hba-info',
                      dest='pg_hba_info',
                      metavar='<pg_hba entries>',
                      help='Entries that get added to pg_hba.conf file')
    parser.add_option(
        '-d',
        '--data-dirs',
        dest='data_dirs',
        metavar='<list of data dirs>',
        help='A list of all data directories present on this host')
    parser.add_option('-b',
                      '--backup',
                      action='store_true',
                      help='Backup the pg_hba.conf file')
    parser.add_option('-r',
                      '--restore',
                      action='store_true',
                      help='Restore the pg_hba.conf file')
    parser.add_option('-D',
                      '--delete',
                      action='store_true',
                      help='Cleanup the pg_hba.conf backup file')

    return parser
Ejemplo n.º 9
0
    def createParser():

        description = ("""
        Clean segment directories.
        """)

        help = ["""
          To be used internally only.
        """]

        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision: #1 $')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, True)

        addTo = OptionGroup(parser, "Clean Segment Options")
        parser.add_option_group(addTo)
        addTo.add_option('-p',
                         None,
                         dest="pickledArguments",
                         type='string',
                         default=None,
                         metavar="<pickledArguments>",
                         help="The arguments passed from the original script")

        parser.set_defaults()
        return parser
Ejemplo n.º 10
0
def parseargs():
    parser = OptParser(option_class=OptChecker)

    parser.setHelp(_help)

    parser.remove_option('-h')
    parser.add_option('-h',
                      '-?',
                      '--help',
                      action='help',
                      help='show this help message and exit')

    parser.add_option('--entry', type='string')
    parser.add_option('--value', type='string')
    parser.add_option('--removeonly', action='store_true')
    parser.set_defaults(removeonly=False)

    # Parse the command line arguments
    (options, args) = parser.parse_args()

    # sanity check
    if not options.entry:
        print "--entry is required"
        sys.exit(1)

    if (not options.value) and (not options.removeonly):
        print "Select either --value or --removeonly"
        sys.exit(1)

    return options
Ejemplo n.º 11
0
def parseargs():
    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help', action='store_true')
    parser.add_option('-d', '--dbname', type='string')
    parser.add_option('-p', '--password', type='string')
    parser.add_option('-n', '--nthreads', type='int')
    (options, args) = parser.parse_args()
    if options.help or (not options.dbname and not options.filename):
        print """Script performs parallel analyze of all tables
Usage:
./parallel_analyze.py -n thread_number -d dbname [-p gpadmin_password]
Parameters:
    thread_number    - number of parallel threads to run
    dbname           - name of the database
    gpadmin_password - password of the gpadmin user"""
        sys.exit(0)
    if not options.dbname:
        logger.error(
            'Failed to start utility. Please, specify database name with "-d" key'
        )
        sys.exit(1)
    if not options.nthreads:
        logger.error(
            'Failed to start utility. Please, specify number of threads with "-n" key'
        )
        sys.exit(1)
    return options
Ejemplo n.º 12
0
    def parseargs(self):
        parser = OptParser(option_class=OptChecker,
                           description=' '.join(self.description.split()),
                           version='%prog version $Revision: $')
        parser.set_usage(
            '%prog is a utility script used by gprecoverseg, and gpaddmirrors and is not intended to be run separately.'
        )
        parser.remove_option('-h')

        parser.add_option('-v',
                          '--verbose',
                          action='store_true',
                          help='debug output.',
                          default=False)
        parser.add_option('-c', '--confinfo', type='string')
        parser.add_option('-b',
                          '--batch-size',
                          type='int',
                          default=DEFAULT_SEGHOST_NUM_WORKERS,
                          metavar='<batch_size>')
        parser.add_option('-f',
                          '--force-overwrite',
                          dest='forceoverwrite',
                          action='store_true',
                          default=False)
        parser.add_option('-l',
                          '--log-dir',
                          dest="logfileDirectory",
                          type="string")

        # Parse the command line arguments
        options, _ = parser.parse_args()
        return options
Ejemplo n.º 13
0
    def createParser():

        description = ("Recover a failed segment")
        help = [""]

        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision$')
        parser.setHelp(help)

        loggingGroup = addStandardLoggingAndHelpOptions(parser, True)
        loggingGroup.add_option("-s", None, default=None, action='store_false',
                                dest='showProgressInplace',
                                help='Show pg_basebackup/pg_rewind progress sequentially instead of inplace')
        loggingGroup.add_option("--no-progress",
                                dest="showProgress", default=True, action="store_false",
                                help="Suppress pg_basebackup/pg_rewind progress output")

        addTo = OptionGroup(parser, "Connection Options")
        parser.add_option_group(addTo)
        addCoordinatorDirectoryOptionForSingleClusterProgram(addTo)

        addTo = OptionGroup(parser, "Recovery Source Options")
        parser.add_option_group(addTo)
        addTo.add_option("-i", None, type="string",
                         dest="recoveryConfigFile",
                         metavar="<configFile>",
                         help="Recovery configuration file")
        addTo.add_option("-o", None,
                         dest="outputSampleConfigFile",
                         metavar="<configFile>", type="string",
                         help="Sample configuration file name to output; "
                              "this file can be passed to a subsequent call using -i option")

        addTo = OptionGroup(parser, "Recovery Destination Options")
        parser.add_option_group(addTo)
        addTo.add_option("-p", None, type="string",
                         dest="newRecoverHosts",
                         metavar="<targetHosts>",
                         help="Spare new hosts to which to recover segments")

        addTo = OptionGroup(parser, "Recovery Options")
        parser.add_option_group(addTo)
        addTo.add_option('-F', None, default=False, action='store_true',
                         dest="forceFullResynchronization",
                         metavar="<forceFullResynchronization>",
                         help="Force full segment resynchronization")
        addTo.add_option("-B", None, type="int", default=16,
                         dest="parallelDegree",
                         metavar="<parallelDegree>",
                         help="Max # of workers to use for building recovery segments.  [default: %default]")
        addTo.add_option("-r", None, default=False, action='store_true',
                         dest='rebalanceSegments', help='Rebalance synchronized segments.')
        addTo.add_option('', '--hba-hostnames', action='store_true', dest='hba_hostnames',
                         help='use hostnames instead of CIDR in pg_hba.conf')

        parser.set_defaults()
        return parser
Ejemplo n.º 14
0
    def createParser():

        description = ("Add mirrors to a system")
        help = [""]

        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision$')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, True)

        addTo = OptionGroup(parser, "Connection Options")
        parser.add_option_group(addTo)
        addMasterDirectoryOptionForSingleClusterProgram(addTo)

        addTo = OptionGroup(parser, "Mirroring Options")
        parser.add_option_group(addTo)
        addTo.add_option("-i", None, type="string",
                         dest="mirrorConfigFile",
                         metavar="<configFile>",
                         help="Mirroring configuration file")

        addTo.add_option("-o", None,
                         dest="outputSampleConfigFile",
                         metavar="<configFile>", type="string",
                         help="Sample configuration file name to output; "
                              "this file can be passed to a subsequent call using -i option")

        addTo.add_option("-m", None, type="string",
                         dest="mirrorDataDirConfigFile",
                         metavar="<dataDirConfigFile>",
                         help="Mirroring data directory configuration file")

        addTo.add_option('-s', default=False, action='store_true',
                         dest="spreadMirroring",
                         help="use spread mirroring for placing mirrors on hosts")

        addTo.add_option("-p", None, type="int", default=1000,
                         dest="mirrorOffset",
                         metavar="<mirrorOffset>",
                         help="Mirror port offset.  The mirror port offset will be used multiple times "
                              "to derive three sets of ports [default: %default]")

        addTo.add_option("-B", None, type="int", default=16,
                         dest="parallelDegree",
                         metavar="<parallelDegree>",
                         help="Max # of workers to use for building recovery segments.  [default: %default]")

        addTo.add_option('', '--hba-hostnames', action='store_true', dest='hba_hostnames',
                          help='use hostnames instead of CIDR in pg_hba.conf')

        parser.set_defaults()
        return parser
Ejemplo n.º 15
0
    def createParser():

        description = ("Recover a failed segment")
        help = [""]

        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision$')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, True)

        addTo = OptionGroup(parser, "Connection Options")
        parser.add_option_group(addTo)
        addMasterDirectoryOptionForSingleClusterProgram(addTo)

        addTo = OptionGroup(parser, "Recovery Source Options")
        parser.add_option_group(addTo)
        addTo.add_option("-i", None, type="string",
                         dest="recoveryConfigFile",
                         metavar="<configFile>",
                         help="Recovery configuration file")
        addTo.add_option("-o", None,
                         dest="outputSampleConfigFile",
                         metavar="<configFile>", type="string",
                         help="Sample configuration file name to output; "
                              "this file can be passed to a subsequent call using -i option")

        addTo = OptionGroup(parser, "Recovery Destination Options")
        parser.add_option_group(addTo)
        addTo.add_option("-p", None, type="string",
                         dest="newRecoverHosts",
                         metavar="<targetHosts>",
                         help="Spare new hosts to which to recover segments")

        addTo = OptionGroup(parser, "Recovery Options")
        parser.add_option_group(addTo)
        addTo.add_option('-F', None, default=False, action='store_true',
                         dest="forceFullResynchronization",
                         metavar="<forceFullResynchronization>",
                         help="Force full segment resynchronization")
        addTo.add_option("-B", None, type="int", default=16,
                         dest="parallelDegree",
                         metavar="<parallelDegree>",
                         help="Max # of workers to use for building recovery segments.  [default: %default]")
        addTo.add_option("-r", None, default=False, action='store_true',
                         dest='rebalanceSegments', help='Rebalance synchronized segments.')

        parser.set_defaults()
        return parser
Ejemplo n.º 16
0
def parse_command_line():
    parser = OptParser(option_class=OptChecker,
                description=' '.join(_description.split()))
    parser.setHelp(_help)
    parser.set_usage('%prog ' + _usage)
    parser.remove_option('-h')
    
    parser.add_option('--start', action='store_true',
                        help='Start the Greenplum Performance Monitor web server.')
    parser.add_option('--stop', action='store_true',
                      help='Stop the Greenplum Performance Monitor web server.')
    parser.add_option('--restart', action='store_true',
                      help='Restart the Greenplum Performance Monitor web server.')                        
    parser.add_option('--status', action='store_true',
                      help='Display the status of the Gerrnplum Performance Monitor web server.')
    parser.add_option('--setup', action='store_true',
                      help='Setup the Greenplum Performance Monitor web server.')
    parser.add_option('--version', action='store_true',
                       help='Display version information')
    parser.add_option('--upgrade', action='store_true',
                      help='Upgrade a previous installation of the Greenplum Performance Monitors web UI')
        
    parser.set_defaults(verbose=False,filters=[], slice=(None, None))
    
    # Parse the command line arguments
    (options, args) = parser.parse_args()

    if options.version:
        version()
        sys.exit(0)
    
    # check for too many options
    opt_count = 0
    if options.start:
        opt_count+=1
    if options.stop:
        opt_count+=1
    if options.setup:
        opt_count+=1
    if options.upgrade:
        opt_count+=1
    if options.status:
        opt_count+=1

    if opt_count > 1:
        parser.print_help()
        parser.exit()
    
    return options, args
Ejemplo n.º 17
0
def parseargs():
    parser = OptParser(option_class=OptChecker,
                       description=' '.join(DESCRIPTION.split()),
                       version='%prog version $Revision: #12 $')
    parser.setHelp(_help)
    parser.set_usage('%prog ' + _usage)
    parser.remove_option('-h')

    parser.add_option(
        '-f',
        '--file',
        default='',
        help='the name of a file containing the re-sync file list.')
    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      help='debug output.',
                      default=False)
    parser.add_option('-h',
                      '-?',
                      '--help',
                      action='help',
                      help='show this help message and exit.',
                      default=False)
    parser.add_option('--usage', action="briefhelp")
    parser.add_option(
        '-d',
        '--master_data_directory',
        type='string',
        dest="masterDataDirectory",
        metavar="<master data directory>",
        help=
        "Optional. The master host data directory. If not specified, the value set for $MASTER_DATA_DIRECTORY will be used.",
        default=get_masterdatadir())
    parser.add_option('-a',
                      help='don\'t ask to confirm repairs',
                      dest='confirm',
                      default=True,
                      action='store_false')
    """
     Parse the command line arguments
    """
    (options, args) = parser.parse_args()

    if len(args) > 0:
        logger.error('Unknown argument %s' % args[0])
        parser.exit()

    return options, args
Ejemplo n.º 18
0
def parseargs():
    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help', action='store_true')
    parser.add_option('-d', '--dbname', type='string')
    parser.add_option('-p', '--password', type='string')
    parser.add_option('-n', '--nthreads', type='int')
    parser.add_option('-u', '--user', type='string')
    parser.add_option('-l', '--location', type='string')
    parser.add_option('-m', '--stat_mem', type='string')
    parser.add_option('-f', '--filename', type='string')
    (options, args) = parser.parse_args()
    if options.help:
        print """Script performs serial restore of the backup files in case
of the cluster itopology change.
Usage:
./parallel_analyze.py -n thread_number -d dbname [-p gpadmin_password]
Parameters:
    thread_number    - number of parallel threads to run
    dbname           - name of the database
    gpadmin_password - password of the gpadmin user"""
        sys.exit(0)
    if not options.dbname:
        logger.error(
            'Failed to start utility. Please, specify database name with "-d" key'
        )
        sys.exit(1)
    if not options.nthreads:
        logger.error(
            'Failed to start utility. Please, specify number of threads with "-n" key'
        )
        sys.exit(1)
    if not options.stat_mem:
        logger.error(
            'Failed to start utility. Please, specify statement_mem parameter  with "-m" key'
        )
        sys.exit(1)
    if not options.filename:
        logger.error(
            'Failed to start utility. Please, specify filename parameter (e.g. initial, compare)  with "-f" key'
        )
        sys.exit(1)
    if not options.location:
        logger.error(
            'Failed to start utility. Please, specify result folder parameter  with "-l" key'
        )
        sys.exit(1)
    return options
Ejemplo n.º 19
0
def parseargs():
    parser = OptParser(option_class=OptChecker,
                       description=' '.join(description.split()))

    parser.setHelp([])

    parser.add_option('-d',
                      '--data-dir',
                      dest='datadir',
                      metavar='<data dir of the segment>',
                      help='Data dir of the segment to update pg_hba.conf')
    parser.add_option("-e",
                      "--entries",
                      dest="entries",
                      metavar="<entries to be added>",
                      help="entries to be added to pg_hba.conf")
    options, args = parser.parse_args()
    return validate_args(options)
Ejemplo n.º 20
0
    def createParser():
        """
        Constructs and returns an option parser.

        Called by simple_main()
        """
        parser = OptParser(option_class=OptChecker,
                           version='%prog version $Revision: $')
        parser.setHelp(__help__)

        addStandardLoggingAndHelpOptions(parser, False)

        opts = OptionGroup(parser, "Required Options")
        opts.add_option('-d', '--directory', type='string')
        opts.add_option('-i', '--dbid', type='int')
        parser.add_option_group(opts)

        parser.set_defaults()
        return parser
Ejemplo n.º 21
0
    def createParser():
        """
        Constructs and returns an option parser.

        Called by simple_main()
        """
        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision: #1 $')
        parser.setHelp([])

        addStandardLoggingAndHelpOptions(parser,
                                         includeNonInteractiveOption=False)

        parser.add_option(
            "-C",
            "--collation",
            type="string",
            help="values for lc_collate, lc_monetary, lc_numeric separated by :"
        )
        parser.add_option("-D",
                          "--datadir",
                          dest="dblist",
                          action="append",
                          type="string")
        parser.add_option("-p",
                          "--pickledTransitionData",
                          dest="pickledTransitionData",
                          type="string")
        parser.add_option("-M",
                          "--mirroringMode",
                          dest="mirroringMode",
                          type="string")
        parser.add_option("-V",
                          "--gp-version",
                          dest="gpversion",
                          metavar="GP_VERSION",
                          help="expected software version")

        parser.set_defaults(verbose=False, filters=[], slice=(None, None))

        return parser
Ejemplo n.º 22
0
    def createParser():
        description = ("""
        This utility is NOT SUPPORTED and is for internal-use only.

        Used to inject faults into the file replication code.
        """)

        help = [
            """

        Return codes:
          0 - Fault injected
          non-zero: Error or invalid options
        """
        ]

        parser = OptParser(option_class=OptChecker,
                           description='  '.join(description.split()),
                           version='%prog version $Revision$')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, False)

        # these options are used to determine the target segments
        addTo = OptionGroup(parser, 'Target Segment Options: ')
        parser.add_option_group(addTo)
        addTo.add_option(
            '-r',
            '--role',
            dest="targetRole",
            type='string',
            metavar="<role>",
            help=
            "Role of segments to target: primary, mirror, or primary_mirror")
        addTo.add_option(
            "-s",
            "--seg_dbid",
            dest="targetDbId",
            type="string",
            metavar="<dbid>",
            help="The segment  dbid on which fault should be set and triggered."
        )
        addTo.add_option(
            "-H",
            "--host",
            dest="targetHost",
            type="string",
            metavar="<host>",
            help=
            "The hostname on which fault should be set and triggered; pass ALL to target all hosts"
        )

        addTo = OptionGroup(parser, 'Master Connection Options')
        parser.add_option_group(addTo)

        addMasterDirectoryOptionForSingleClusterProgram(addTo)
        addTo.add_option("-p", "--master_port", dest="masterPort",  type="int", default=None,
                         metavar="<masterPort>",
                         help="DEPRECATED, use MASTER_DATA_DIRECTORY environment variable or -d option.  " \
                         "The port number of the master database on localhost, " \
                         "used to fetch the segment configuration.")

        addTo = OptionGroup(parser, 'Client Polling Options: ')
        parser.add_option_group(addTo)
        addTo.add_option('-m', '--mode', dest="syncMode", type='string', default="async",
                         metavar="<syncMode>",
                         help="Synchronization mode : sync (client waits for fault to occur)" \
                         " or async (client only sets fault request on server)")

        # these options are used to build the message for the segments
        addTo = OptionGroup(parser, 'Fault Options: ')
        parser.add_option_group(addTo)
        # NB: This list needs to be kept in sync with:
        # - FaultInjectorTypeEnumToString
        # - FaultInjectorType_e
        addTo.add_option('-y','--type', dest="type", type='string', metavar="<type>",
                         help="fault type: sleep (insert sleep), fault (report fault to postmaster and fts prober), " \
         "fatal (inject FATAL error), panic (inject PANIC error), error (inject ERROR), " \
         "infinite_loop, data_curruption (corrupt data in memory and persistent media), " \
         "suspend (suspend execution), resume (resume execution that was suspended), " \
         "skip (inject skip i.e. skip checkpoint), " \
         "memory_full (all memory is consumed when injected), " \
         "reset (remove fault injection), status (report fault injection status), " \
         "segv (inject a SEGV), " \
         "interrupt (inject an Interrupt), " \
         "finish_pending (set QueryFinishPending to true), " \
         "checkpoint_and_panic (inject a panic following checkpoint) ")
        addTo.add_option("-z", "--sleep_time_s", dest="sleepTimeSeconds", type="int", default="10" ,
                            metavar="<sleepTime>",
                            help="For 'sleep' faults, the amount of time for the sleep.  Defaults to %default." \
     "Min Max Range is [0, 7200 sec] ")
        addTo.add_option('-f','--fault_name', dest="faultName", type='string', metavar="<name>",
                         help="fault name: " \
         "postmaster (inject fault when new connection is accepted in postmaster), " \
         "pg_control (inject fault when global/pg_control file is written), " \
         "pg_xlog (inject fault when files in pg_xlog directory are written), " \
         "start_prepare (inject fault during start prepare transaction), " \
         "filerep_consumer (inject fault before data are processed, i.e. if mirror " \
         "then before file operation is issued to file system, if primary " \
         "then before mirror file operation is acknowledged to backend processes), " \
         "filerep_consumer_verificaton (inject fault before ack verification data are processed on primary), " \
         "filerep_change_tracking_compacting (inject fault when compacting change tracking log files), " \
         "filerep_sender (inject fault before data are sent to network), " \
         "filerep_receiver (inject fault after data are received from network), " \
         "filerep_flush (inject fault before fsync is issued to file system), " \
         "filerep_resync (inject fault while in resync when first relation is ready to be resynchronized), " \
         "filerep_resync_in_progress (inject fault while resync is in progress), " \
         "filerep_resync_worker (inject fault after write to mirror), " \
         "filerep_resync_worker_read (inject fault before read required for resync), " \
         "filerep_transition_to_resync (inject fault during transition to InResync before mirror re-create), " \
         "filerep_transition_to_resync_mark_recreate (inject fault during transition to InResync before marking re-created), " \
         "filerep_transition_to_resync_mark_completed (inject fault during transition to InResync before marking completed), " \
         "filerep_transition_to_sync_begin (inject fault before transition to InSync begin), " \
         "filerep_transition_to_sync (inject fault during transition to InSync), " \
         "filerep_transition_to_sync_before_checkpoint (inject fault during transition to InSync before checkpoint is created), " \
         "filerep_transition_to_sync_mark_completed (inject fault during transition to InSync before marking completed), " \
         "filerep_transition_to_change_tracking (inject fault during transition to InChangeTracking), " \
                              "fileRep_is_operation_completed (inject fault in FileRep Is Operation completed function just for ResyncWorker Threads), "\
                              "filerep_immediate_shutdown_request (inject fault just before sending the shutdown SIGQUIT to child processes), "\
         "checkpoint (inject fault before checkpoint is taken), " \
         "change_tracking_compacting_report (report if compacting is in progress), " \
         "change_tracking_disable (inject fault before fsync to Change Tracking log files), " \
         "transaction_abort_after_distributed_prepared (abort prepared transaction), " \
         "transaction_commit_pass1_from_create_pending_to_created, " \
         "transaction_commit_pass1_from_drop_in_memory_to_drop_pending, " \
         "transaction_commit_pass1_from_aborting_create_needed_to_aborting_create, " \
         "transaction_abort_pass1_from_create_pending_to_aborting_create, " \
         "transaction_abort_pass1_from_aborting_create_needed_to_aborting_create, " \
         "transaction_commit_pass2_from_drop_in_memory_to_drop_pending, " \
         "transaction_commit_pass2_from_aborting_create_needed_to_aborting_create, " \
         "transaction_abort_pass2_from_create_pending_to_aborting_create, " \
         "transaction_abort_pass2_from_aborting_create_needed_to_aborting_create, " \
         "finish_prepared_transaction_commit_pass1_from_create_pending_to_created, " \
         "finish_prepared_transaction_commit_pass2_from_create_pending_to_created, " \
         "finish_prepared_transaction_abort_pass1_from_create_pending_to_aborting_create, " \
         "finish_prepared_transaction_abort_pass2_from_create_pending_to_aborting_create, " \
         "finish_prepared_transaction_commit_pass1_from_drop_in_memory_to_drop_pending, " \
         "finish_prepared_transaction_commit_pass2_from_drop_in_memory_to_drop_pending, " \
         "finish_prepared_transaction_commit_pass1_aborting_create_needed, " \
         "finish_prepared_transaction_commit_pass2_aborting_create_needed, " \
         "finish_prepared_transaction_abort_pass1_aborting_create_needed, " \
         "finish_prepared_transaction_abort_pass2_aborting_create_needed, " \
         "twophase_transaction_commit_prepared (inject fault before transaction commit is inserted in xlog), " \
         "twophase_transaction_abort_prepared (inject fault before transaction abort is inserted in xlog), " \
         "dtm_broadcast_prepare (inject fault after prepare broadcast), " \
         "dtm_broadcast_commit_prepared (inject fault after commit broadcast), " \
         "dtm_broadcast_abort_prepared (inject fault after abort broadcast), " \
         "dtm_xlog_distributed_commit (inject fault after distributed commit was inserted in xlog), " \
         "fault_before_pending_delete_relation_entry (inject fault before putting pending delete relation entry, " \
         "fault_before_pending_delete_database_entry (inject fault before putting pending delete database entry, " \
         "fault_before_pending_delete_tablespace_entry (inject fault before putting pending delete tablespace entry, " \
         "fault_before_pending_delete_filespace_entry (inject fault before putting pending delete filespace entry, " \
         "dtm_init (inject fault before initializing dtm), " \
                              "end_prepare_two_phase_sleep (inject sleep after two phase file creation), " \
                              "segment_transition_request (inject fault after segment receives state transition request), " \
                              "segment_probe_response (inject fault after segment is probed by FTS), " \
         "local_tm_record_transaction_commit (inject fault for local transactions after transaction commit is recorded and flushed in xlog ), " \
         "malloc_failure (inject fault to simulate memory allocation failure), " \
         "transaction_abort_failure (inject fault to simulate transaction abort failure), " \
         "workfile_creation_failure (inject fault to simulate workfile creation failure), " \
         "update_committed_eof_in_persistent_table (inject fault before committed EOF is updated in gp_persistent_relation_node for Append Only segment files), " \
         "exec_simple_query_end_command (inject fault before EndCommand in exec_simple_query), " \
         "multi_exec_hash_large_vmem (allocate large vmem using palloc inside MultiExecHash to attempt to exceed vmem limit), " \
         "execsort_before_sorting (inject fault in nodeSort after receiving all tuples and before sorting), " \
         "execsort_mksort_mergeruns (inject fault in MKSort during the mergeruns phase), " \
         "execshare_input_next (inject fault after shared input scan retrieved a tuple), " \
         "base_backup_post_create_checkpoint (inject fault after requesting checkpoint as part of basebackup), " \
         "compaction_before_segmentfile_drop (inject fault after compaction, but before the drop of the segment file), "  \
         "compaction_before_cleanup_phase (inject fault after compaction and drop, but before the cleanup phase), " \
         "appendonly_insert (inject fault before an append-only insert), " \
         "appendonly_delete (inject fault before an append-only delete), " \
         "appendonly_update (inject fault before an append-only update), " \
         "reindex_db (inject fault while reindex db is in progress), "\
         "reindex_relation (inject fault while reindex relation is in progress), "\
         "fault_during_exec_dynamic_table_scan (inject fault during scanning of a partition), " \
         "fault_in_background_writer_main (inject fault in BackgroundWriterMain), " \
         "cdb_copy_start_after_dispatch (inject fault in cdbCopyStart after dispatch), " \
         "repair_frag_end (inject fault at the end of repair_frag), " \
         "vacuum_full_before_truncate (inject fault before truncate in vacuum full), " \
         "vacuum_full_after_truncate (inject fault after truncate in vacuum full), " \
         "vacuum_relation_end_of_first_round (inject fault at the end of first round of vacuumRelation loop), " \
         "rebuild_pt_db (inject fault while rebuilding persistent tables (for each db)), " \
         "procarray_add (inject fault while adding PGPROC to procarray), " \
         "exec_hashjoin_new_batch (inject fault before switching to a new batch in Hash Join), " \
         "fts_wait_for_shutdown (pause FTS before committing changes), " \
         "runaway_cleanup (inject fault before starting the cleanup for a runaway query), " \
                  "opt_task_allocate_string_buffer (inject fault while allocating string buffer), " \
                  "opt_relcache_translator_catalog_access (inject fault while translating relcache entries), " \
                  "send_qe_details_init_backend (inject fault before sending QE details during backend initialization)" \
         "all (affects all faults injected, used for 'status' and 'reset'), ")
        addTo.add_option("-c", "--ddl_statement", dest="ddlStatement", type="string",
                         metavar="ddlStatement",
                         help="The DDL statement on which fault should be set and triggered " \
                         "(i.e. create_database, drop_database, create_table, drop_table)")
        addTo.add_option(
            "-D",
            "--database_name",
            dest="databaseName",
            type="string",
            metavar="databaseName",
            help="The database name on which fault should be set and triggered."
        )
        addTo.add_option(
            "-t",
            "--table_name",
            dest="tableName",
            type="string",
            metavar="tableName",
            help="The table name on which fault should be set and triggered.")
        addTo.add_option("-o", "--occurrence", dest="numOccurrences", type="int", default=1,
                         metavar="numOccurrences",
                         help="The number of occurrence of the DDL statement with the database name " \
                         "and the table name before fault is triggered.  Defaults to %default. Max is 1000. " \
    "Fault is triggered always if set to '0'. ")
        parser.set_defaults()
        return parser
Ejemplo n.º 23
0
    def createParser():
        description = ("""
        This utility is NOT SUPPORTED and is for internal-use only.

        Used to inject faults into the file replication code.
        """)

        help = [
            """

        Return codes:
          0 - Fault injected
          non-zero: Error or invalid options
        """
        ]

        parser = OptParser(option_class=OptChecker,
                           description='  '.join(description.split()),
                           version='%prog version $Revision$')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, False)

        # these options are used to determine the target segments
        addTo = OptionGroup(parser, 'Target Segment Options: ')
        parser.add_option_group(addTo)
        addTo.add_option(
            '-r',
            '--role',
            dest="targetRole",
            type='string',
            metavar="<role>",
            help=
            "Role of segments to target: primary, mirror, or primary_mirror")
        addTo.add_option(
            "-s",
            "--seg_dbid",
            dest="targetDbId",
            type="string",
            metavar="<dbid>",
            help="The segment  dbid on which fault should be set and triggered."
        )
        addTo.add_option(
            "-H",
            "--host",
            dest="targetHost",
            type="string",
            metavar="<host>",
            help=
            "The hostname on which fault should be set and triggered; pass ALL to target all hosts"
        )

        addTo = OptionGroup(parser, 'Master Connection Options')
        parser.add_option_group(addTo)

        addMasterDirectoryOptionForSingleClusterProgram(addTo)
        addTo.add_option("-p", "--master_port", dest="masterPort",  type="int", default=None,
                         metavar="<masterPort>",
                         help="DEPRECATED, use MASTER_DATA_DIRECTORY environment variable or -d option.  " \
                         "The port number of the master database on localhost, " \
                         "used to fetch the segment configuration.")

        addTo = OptionGroup(parser, 'Client Polling Options: ')
        parser.add_option_group(addTo)
        addTo.add_option('-m', '--mode', dest="syncMode", type='string', default="async",
                         metavar="<syncMode>",
                         help="Synchronization mode : sync (client waits for fault to occur)" \
                         " or async (client only sets fault request on server)")

        # these options are used to build the message for the segments
        addTo = OptionGroup(parser, 'Fault Options: ')
        parser.add_option_group(addTo)
        # NB: This list needs to be kept in sync with:
        # - FaultInjectorTypeEnumToString
        # - FaultInjectorType_e
        addTo.add_option('-y','--type', dest="type", type='string', metavar="<type>",
                         help="fault type: sleep (insert sleep), fault (report fault to postmaster and fts prober), " \
         "fatal (inject FATAL error), panic (inject PANIC error), error (inject ERROR), " \
         "infinite_loop, data_curruption (corrupt data in memory and persistent media), " \
         "suspend (suspend execution), resume (resume execution that was suspended), " \
         "skip (inject skip i.e. skip checkpoint), " \
         "memory_full (all memory is consumed when injected), " \
         "reset (remove fault injection), status (report fault injection status), " \
         "segv (inject a SEGV), " \
         "interrupt (inject an Interrupt), " \
         "finish_pending (set QueryFinishPending to true), " \
         "checkpoint_and_panic (inject a panic following checkpoint) ")
        addTo.add_option("-z", "--sleep_time_s", dest="sleepTimeSeconds", type="int", default="10" ,
                            metavar="<sleepTime>",
                            help="For 'sleep' faults, the amount of time for the sleep.  Defaults to %default." \
     "Min Max Range is [0, 7200 sec] ")
        addTo.add_option(
            '-f',
            '--fault_name',
            dest="faultName",
            type='string',
            metavar="<name>",
            help=
            "See src/include/utils/faultinjector_lists.h for list of fault names"
        )
        addTo.add_option("-c", "--ddl_statement", dest="ddlStatement", type="string",
                         metavar="ddlStatement",
                         help="The DDL statement on which fault should be set and triggered " \
                         "(i.e. create_database, drop_database, create_table, drop_table)")
        addTo.add_option(
            "-D",
            "--database_name",
            dest="databaseName",
            type="string",
            metavar="databaseName",
            help="The database name on which fault should be set and triggered."
        )
        addTo.add_option(
            "-t",
            "--table_name",
            dest="tableName",
            type="string",
            metavar="tableName",
            help="The table name on which fault should be set and triggered.")
        addTo.add_option("-o", "--occurrence", dest="numOccurrences", type="int", default=1,
                         metavar="numOccurrences",
                         help="The number of occurrence of the DDL statement with the database name " \
                         "and the table name before fault is triggered.  Defaults to %default. Max is 1000. " \
    "Fault is triggered always if set to '0'. ")
        parser.set_defaults()
        return parser
Ejemplo n.º 24
0
def create_parser():
    parser = OptParser(option_class=OptChecker,
                       version='%prog version $Revision: #1 $',
                       description='Persistent tables backp and restore')

    addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True)

    addTo = OptionGroup(parser, 'Connection opts')
    parser.add_option_group(addTo)
    addMasterDirectoryOptionForSingleClusterProgram(addTo)

    addTo = OptionGroup(parser, 'Persistent tables backup and restore options')
    addTo.add_option(
        '--backup',
        metavar="<pickled dbid info>",
        type="string",
        help=
        "A list of dbid info where backups need to be done in pickled format")
    addTo.add_option(
        '--restore',
        metavar="<pickled dbid info>",
        type="string",
        help=
        "A list of dbid info where restore needs to be done in pickled format")
    addTo.add_option(
        '--validate-backups',
        metavar="<pickled dbid info>",
        type="string",
        help=
        "A list of dbid info where validation needs to be done in pickled format"
    )
    addTo.add_option(
        '--validate-backup-dir',
        metavar="<pickled dbid info>",
        type="string",
        help=
        "A list of dbid info where validation needs to be done in pickled format"
    )
    addTo.add_option(
        '--timestamp',
        metavar="<timestamp of backup>",
        type="string",
        help="A timestamp for the backup that needs to be validated")
    addTo.add_option('--batch-size',
                     metavar="<batch size for the worker pool>",
                     type="int",
                     help="Batch size for parallelism in worker pool")
    addTo.add_option(
        '--backup-dir',
        metavar="<backup directory>",
        type="string",
        help="Backup directory for persistent tables and transaction logs")
    addTo.add_option('--perdbpt',
                     metavar="<per database pt filename>",
                     type="string",
                     help="Filenames for per database persistent files")
    addTo.add_option('--globalpt',
                     metavar="<global pt filenames>",
                     type="string",
                     help="Filenames for global persistent files")
    addTo.add_option(
        '--validate-source-file-only',
        action='store_true',
        default=False,
        help=
        "validate that required source files existed for backup and restore")

    parser.setHelp(
        ["""
    This tool is used to backup persistent table files.
    """])

    return parser
Ejemplo n.º 25
0
    def parseargs(self):
        parser = OptParser(option_class=OptChecker,
                           description=' '.join(self.description.split()),
                           version='%prog version $Revision: $')
        parser.set_usage(
            '%prog is a utility script used by gprecoverseg, and gpaddmirrors and is not intended to be run separately.'
        )
        parser.remove_option('-h')

        #TODO we may not need the verbose flag
        parser.add_option('-v',
                          '--verbose',
                          action='store_true',
                          help='debug output.',
                          default=False)
        parser.add_option('-c', '--confinfo', type='string')
        parser.add_option('-b',
                          '--batch-size',
                          type='int',
                          default=DEFAULT_SEGHOST_NUM_WORKERS,
                          metavar='<batch_size>')
        parser.add_option('-f',
                          '--force-overwrite',
                          dest='forceoverwrite',
                          action='store_true',
                          default=False)
        parser.add_option('-l',
                          '--log-dir',
                          dest="logfileDirectory",
                          type="string")
        parser.add_option(
            '',
            '--era',
            dest="era",
            help="coordinator era",
        )

        # Parse the command line arguments
        self.options, _ = parser.parse_args()

        if not self.options.confinfo:
            raise Exception('Missing --confinfo argument.')
        if not self.options.logfileDirectory:
            raise Exception('Missing --log-dir argument.')

        self.logger = gplog.setup_tool_logging(
            os.path.split(self.file_name)[-1],
            unix.getLocalHostname(),
            unix.getUserName(),
            logdir=self.options.logfileDirectory)

        if self.options.batch_size <= 0:
            self.logger.warn('batch_size was less than zero.  Setting to 1.')
            self.options.batch_size = 1

        if self.options.verbose:
            gplog.enable_verbose_logging()

        self.seg_recovery_info_list = recoveryinfo.deserialize_list(
            self.options.confinfo)
        if len(self.seg_recovery_info_list) == 0:
            raise Exception(
                'No segment configuration values found in --confinfo argument')
Ejemplo n.º 26
0
    def createParser():

        description = ("Add mirrors to a system")
        help = [""]

        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision$')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, True)

        addTo = OptionGroup(parser, "Connection Options")
        parser.add_option_group(addTo)
        addCoordinatorDirectoryOptionForSingleClusterProgram(addTo)

        addTo = OptionGroup(parser, "Mirroring Options")
        parser.add_option_group(addTo)
        addTo.add_option("-i",
                         None,
                         type="string",
                         dest="mirrorConfigFile",
                         metavar="<configFile>",
                         help="Mirroring configuration file")

        addTo.add_option(
            "-o",
            None,
            dest="outputSampleConfigFile",
            metavar="<configFile>",
            type="string",
            help="Sample configuration file name to output; "
            "this file can be passed to a subsequent call using -i option")

        addTo.add_option("-m",
                         None,
                         type="string",
                         dest="mirrorDataDirConfigFile",
                         metavar="<dataDirConfigFile>",
                         help="Mirroring data directory configuration file")

        addTo.add_option(
            '-s',
            default=False,
            action='store_true',
            dest="spreadMirroring",
            help="use spread mirroring for placing mirrors on hosts")

        addTo.add_option(
            "-p",
            None,
            type="int",
            default=1000,
            dest="mirrorOffset",
            metavar="<mirrorOffset>",
            help=
            "Mirror port offset.  The mirror port offset will be used multiple times "
            "to derive three sets of ports [default: %default]")

        addTo.add_option(
            "-B",
            "--batch-size",
            type="int",
            default=gp.DEFAULT_COORDINATOR_NUM_WORKERS,
            dest="batch_size",
            metavar="<batch_size>",
            help=
            'Max number of hosts to operate on in parallel. Valid values are 1-%d'
            % gp.MAX_COORDINATOR_NUM_WORKERS)
        addTo.add_option(
            "-b",
            "--segment-batch-size",
            type="int",
            default=gp.DEFAULT_SEGHOST_NUM_WORKERS,
            dest="segment_batch_size",
            metavar="<segment_batch_size>",
            help=
            'Max number of segments per host to operate on in parallel. Valid values are: 1-%d'
            % gp.MAX_SEGHOST_NUM_WORKERS)

        addTo.add_option('',
                         '--hba-hostnames',
                         action='store_true',
                         dest='hba_hostnames',
                         help='use hostnames instead of CIDR in pg_hba.conf')

        parser.set_defaults()
        return parser
Ejemplo n.º 27
0
    """
    Only strip the '\n' as it is one of the non-supported chars to be part
    of the schema or table name 
    """
    if not os.path.exists(change_schema_file):
        raise Exception('change schema file path %s does not exist' %
                        change_schema_file)
    change_schema_name = None
    with open(change_schema_file) as fr:
        line = fr.read()
        change_schema_name = line.strip('\n')
    return change_schema_name


if __name__ == "__main__":
    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help', action='store_true')
    parser.add_option('-t', '--tablefile', type='string', default=None)
    parser.add_option('-m', '--master_only', action='store_true')
    parser.add_option('-c',
                      '--change-schema-file',
                      type='string',
                      default=None)
    parser.add_option('-s', '--schema-level-file', type='string', default=None)
    (options, args) = parser.parse_args()
    if not (options.tablefile or options.schema_level_file):
        raise Exception(
            '-t table file name or -s schema level file name must be specified'
        )
    elif options.schema_level_file and options.change_schema_file:
Ejemplo n.º 28
0
    def createParser():
        """
        Creates the command line options parser object for gpverify.
        """

        description = ("Initiates primary/mirror verification.")
        help = []

        parser = OptParser(option_class=OptChecker,
                           description=' '.join(description.split()),
                           version='%prog version $Revision: #1 $')
        parser.setHelp(help)

        addStandardLoggingAndHelpOptions(parser, True)

        addTo = OptionGroup(parser, "Request Type")
        parser.add_option_group(addTo)
        addTo.add_option('--full', dest='full', action='store_true',
                         help='Perform a full verification pass.  Use --token option to ' \
                         'give the verification pass an identifier.')
        addTo.add_option('--file', dest='verify_file', metavar='<file>',
                         help='Based on file type, perform either a physical or logical verification of <file>.  ' \
                         'Use --token option to give the verification request an identifier.')
        addTo.add_option('--directorytree', dest='verify_dir',
                         metavar='<verify_dir>',
                         help='Perform a full verification pass on the specified directory.  ' \
                         'Use --token option to assign the verification pass an identifier.' )

        addTo = OptionGroup(parser, "Request Options")
        parser.add_option_group(addTo)
        addTo.add_option('--token', dest='token', metavar='<token>',
                         help='A token to use for the request.  ' \
                         'This identifier will be used in the logs and can be used to identify ' \
                         'a verification pass to the --abort, --suspend, --resume and --results ' \
                         'options.')

        addTo.add_option(
            '-c',
            '--content',
            dest='content',
            metavar='<content_id>',
            help=
            'Send verification request only to the primary segment with the given <content_id>.'
        )
        addTo.add_option('--abort', dest='abort', action='store_true',
                         help='Abort a verification request that is in progress.  ' \
                         'Can use --token option to abort a specific verification request.')
        addTo.add_option('--suspend', dest='suspend', action='store_true',
                         help='Suspend a verification request that is in progress.' \
                         'Can use --token option to suspend a specific verification request.')
        addTo.add_option('--resume', dest='resume', action='store_true',
                         help='Resume a suspended verification request.  Can use the ' \
                         '--token option to resume a specific verification request.')
        addTo.add_option('--fileignore', dest='ignore_file', metavar='<ignore_file>',
                         help='Ignore any filenames matching <ignore_file>.  Multiple ' \
                         'files can be specified using a comma separated list.')
        addTo.add_option('--dirignore', dest='ignore_dir', metavar='<ignore_dir>',
                         help='Ignore any directories matching <ignore_dir>.  Multiple ' \
                         'directories can be specified using a comma separated list.')

        addTo = OptionGroup(parser, "Reporting Options")
        parser.add_option_group(addTo)
        addTo.add_option('--results', dest='results', action='store_true',
                         help='Display verification results.  Can use' \
                         'the --token option to view results of a specific verification request.')
        addTo.add_option(
            '--resultslevel',
            dest='results_level',
            action='store',
            metavar='<detail_level>',
            type=int,
            help=
            'Level of detail to show for results. Valid levels are from 1 to 10.'
        )
        addTo.add_option(
            '--clean',
            dest='clean',
            action='store_true',
            help=
            'Clean up verification artifacts and the gp_verification_history table.'
        )

        addTo = OptionGroup(parser, "Misc. Options")
        parser.add_option_group(addTo)
        addTo.add_option(
            '-B',
            '--parallel',
            action='store',
            default=64,
            type=int,
            help='Number of worker threads used to send verification requests.'
        )

        parser.set_defaults()
        return parser
def parseargs():
    parser = OptParser(option_class=OptChecker)
    parser.remove_option('-h')
    parser.add_option('-h', '-?', '--help',          action='store_true')
    parser.add_option('-d', '--dbname',              type='string')
    parser.add_option('-u', '--user',                type='string')
    parser.add_option('-p', '--password',            type='string')
    parser.add_option('-n', '--nthreads',            type='int')
    parser.add_option('-s', '--stat_mem',            type='string')
    parser.add_option('-f', '--tablefile',           type='string')
    parser.add_option('-t', '--distkeyfile',         type='string')
    parser.add_option('-m', '--metadatatablesuffix', type='string')
    (options, args) = parser.parse_args()
    if options.help:
        print """Script performs analysis of table row number and number of
unique values of table distribution key
Usage:
./data_consistency_check.py -d dbname [-n thread_number] [-u user_name] [-p password]
                                      [-s statement_mem] [-f tablefile] [-t distkeyfile]
                                      [-m metadatatablesuffix]
Parameters:
    -d | --dbname    - name of the database to process
    -n | --nthreads  - number of parallel threads to run
    -u | --user      - user to connect to the database
    -p | --password  - password to connect to the database
    -s | --statement_mem    - the value of statement_mem to use
    -f | --tablefile        - file with the list of tables to process
    -t | --distkeyfile      - file with the tables which should be analyzed with
                              counting distinct values of distribution key
    -m | --metadatatablesuffix
                            - suffix for the table to store script metadata in
Metadata objects created are:
    public.__zz_pivotal_{suffix}   - view with the final information on row counts
    public.__zz_pivotal_{suffix}_l - list of tables to process
    public.__zz_pivotal_{suffix}_p - current progress of table row count calculation
After the run has finished for the second time, join two metadata tables by
the "tablename" field like this:
select  m1.tablename as table_first,
        m2.tablename as table_second,
        m1.rowcount  as rows_before,
        m2.rowcount  as rows_after,
        m1.distkeycount as dist_keys_before,
        m2.distkeycount as dist_keys_after
    from {metadatatable1} as m1
        full outer join {metadatatable2} as m2
        on m1.tablename = m2.tablename
    where m1.tablename is null
        or m2.tablename is null
        or m1.rowcount is distinct from m2.rowcount
        or m1.distkeycount is distinct from m2.distkeycount
"""
        sys.exit(0)
    if not options.dbname:
        logger.error('Failed to start utility. Please, specify database name with "-d" key')
        sys.exit(1)
    if not options.nthreads:
        logger.info('Number of threads is not specified. Using 1 by default')
        options.nthreads = 1
    if not options.stat_mem:
        logger.info('Statement memory is not specified. Using 125MB by default')
        options.stat_mem = '125MB'
    if not options.metadatatablesuffix:
        logger.info('Metadata table suffix is not specified. Using "table_list" by default')
        options.metadatatablesuffix = 'table_list'
    else:
        if not re.match('^[0-9a-z_]*$', options.metadatatablesuffix):
            logger.error ('Metadata suffix must contain only lowercase letters, numbers 0-9 and underscore sign')
            sys.exit(1)
    if not options.tablefile:
        logger.info('No tablefile specified. Will process all the tables in database by default')
    if not options.distkeyfile:
        logger.info('No distribution key table file specified. Will omit distribution key analysis')
    return options