コード例 #1
0
def makeFlaskOpts(parser):
    flaskOpts = OptionGroup(parser, 'Flask Options')

    flaskOpts.add_option('--public', action='store_true', dest='public', default=False,
        help='By default a flask server is not accessible from any external ip address, if this is set, the Flask web server will accept connections from external addresses, can be dangerous if used in conjunction with the debug mode.')

    return flaskOpts
コード例 #2
0
ファイル: options.py プロジェクト: Gamix2Tos/wharfee
def format_command_help(cmd):
    """
    Format help string for the command.
    :param cmd: string: command name
    :return: string
    """
    usage = [cmd, '[options]']
    alls = all_options(cmd)

    standards = [_ for _ in alls if _.cli_match]
    extras = [_ for _ in alls if not _.cli_match]

    for opt in alls:
        if not opt.name.startswith('-'):
            optname = "[{0}]".format(opt.name) if opt.is_optional else opt.name
            usage.append(optname)

    usage = ' '.join(usage)

    parser = OptParser(prog=cmd, add_help_option=False, usage=usage,
                       conflict_handler='resolve')

    for opt in standards:
        if opt.name.startswith('-'):
            parser.add_option(*opt.args, **opt.kwargs)

    if extras:
        g = OptionGroup(parser, "Non-standard options")
        for opt in extras:
            g.add_option(*opt.args, **opt.kwargs)
        parser.add_option_group(g)

    return parser.format_help()
コード例 #3
0
ファイル: run_options.py プロジェクト: SEGaL-Group/scm2pgsql
  def _get_partial_conversion_options_group(self):
    group = OptionGroup(self.parser, 'Partial conversions')
    group.add_option(ManOption(
        '--pass', type='string',
        action='callback', callback=self.callback_passes,
        help='execute only specified PASS of conversion',
        man_help=(
            'Execute only pass \\fIpass\\fR of the conversion. '
            '\\fIpass\\fR can be specified by name or by number (see '
            '\\fB--help-passes\\fR).'
            ),
        metavar='PASS',
        ))
    group.add_option(ManOption(
        '--passes', '-p', type='string',
        action='callback', callback=self.callback_passes,
        help=(
            'execute passes START through END, inclusive (PASS, '
            'START, and END can be pass names or numbers)'
            ),
        man_help=(
            'Execute passes \\fIstart\\fR through \\fIend\\fR of the '
            'conversion (inclusive). \\fIstart\\fR and \\fIend\\fR can be '
            'specified by name or by number (see \\fB--help-passes\\fR). '
            'If \\fIstart\\fR or \\fIend\\fR is missing, it defaults to '
            'the first or last pass, respectively. For this to work the '
            'earlier passes must have been completed before on the '
            'same CVS repository, and the generated data files must be '
            'in the temporary directory (see \\fB--tmpdir\\fR).'
            ),
        metavar='[START]:[END]',
        ))

    return group
コード例 #4
0
ファイル: s7comm_client.py プロジェクト: agnivesh/conpot
def AddOptions(parser):
    group = OptionGroup(parser, "S7 scanner options")
    group.add_option("--src-tsap", help="Try this src-tsap (list) (default: 0x100,0x200)",
                     type="string", metavar="LIST")
    group.add_option("--dst-tsap", help="Try this dst-tsap (list) (default: 0x102,0x200,0x201)",
                     type="string", metavar="LIST")
    parser.add_option_group(group)
コード例 #5
0
def bmfont2json_parser(description, epilog=None):
    """Standard set of parser options."""
    parser = OptionParser(description=description, epilog=epilog,
                          formatter=TitledHelpFormatter())

    parser.add_option("--version", action="store_true", dest="output_version", default=False,
                      help="output version number")
    parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="verbose outout")
    parser.add_option("-s", "--silent", action="store_true", dest="silent", default=False, help="silent running")
    parser.add_option("-m", "--metrics", action="store_true", dest="metrics", default=False,
                      help="output asset metrics")
    parser.add_option("--log", action="store", dest="output_log", default=None, help="write log to file")

    group = OptionGroup(parser, "Asset Generation Options")
    group.add_option("-j", "--json_indent", action="store", dest="json_indent", type="int", default=0, metavar="SIZE",
                     help="json output pretty printing indent size, defaults to 0")
    parser.add_option_group(group)

    group = OptionGroup(parser, "Asset Location Options")
    group.add_option("-p", "--prefix", action="store", dest="texture_prefix", default="textures/", metavar="URL",
                     help="texture URL to prefix to all texture references")
    group.add_option("-a", "--assets", action="store", dest="asset_root", default=".", metavar="PATH",
                     help="PATH of the asset root")
    parser.add_option_group(group)

    group = OptionGroup(parser, "File Options")
    group.add_option("-i", "--input", action="store", dest="input", default=None, metavar="FILE",
                     help="source FILE to process")
    group.add_option("-o", "--output", action="store", dest="output", default="default.json", metavar="FILE",
                     help="output FILE to write to")
    parser.add_option_group(group)

    return parser
コード例 #6
0
ファイル: util.py プロジェクト: mesosphere/aws-cfn-bootstrap
def get_proxy_options(parser):
    proxy_group = OptionGroup(parser, "Proxy", "Options for specifying proxies. Format: [scheme://][user:password@]host:port")

    proxy_group.add_option("", "--http-proxy", help="A (non-SSL) HTTP proxy", type="string", dest="http_proxy")
    proxy_group.add_option("", "--https-proxy", help="An HTTPS proxy", type="string", dest="https_proxy")

    return proxy_group
コード例 #7
0
ファイル: run_options.py プロジェクト: SEGaL-Group/scm2pgsql
 def _get_options_file_options_group(self):
   group = OptionGroup(
       self.parser, 'Configuration via options file'
       )
   self.parser.set_default('options_file_found', False)
   group.add_option(ManOption(
       '--options', type='string',
       action='callback', callback=self.callback_options,
       help=(
           'read the conversion options from PATH.  This '
           'method allows more flexibility than using '
           'command-line options.  See documentation for info'
           ),
       man_help=(
           'Read the conversion options from \\fIpath\\fR instead of from '
           'the command line.  This option allows far more conversion '
           'flexibility than can be achieved using the command-line alone. '
           'See the documentation for more information.  Only the following '
           'command-line options are allowed in combination with '
           '\\fB--options\\fR: \\fB-h\\fR/\\fB--help\\fR, '
           '\\fB--help-passes\\fR, \\fB--version\\fR, '
           '\\fB-v\\fR/\\fB--verbose\\fR, \\fB-q\\fR/\\fB--quiet\\fR, '
           '\\fB-p\\fR/\\fB--pass\\fR/\\fB--passes\\fR, \\fB--dry-run\\fR, '
           '\\fB--profile\\fR, \\fB--sort\\fR, \\fB--trunk-only\\fR, '
           '\\fB--encoding\\fR, and \\fB--fallback-encoding\\fR. '
           'Options are processed in the order specified on the command '
           'line.'
           ),
       metavar='PATH',
       ))
   return group
コード例 #8
0
ファイル: freedm.py プロジェクト: akella84/FREEDM
def generate_parser():
    parser = OptionParser()
    parser.add_option("-f","--hostname-file",dest="hostfile",
        help="File containing Hostnames's used in the test, one per line.")
    parser.add_option("-d","--dry-run",action="store_true",dest="dryrun",
        default=False, help="Don't actually issue the DGI startup commands, "
                            "just show the procedure.")
    parser.add_option("-n","--hostname",dest="hostnames",action="append",
        help="UUIDs to use for this experiment set.")
    parser.add_option("-t","--time",dest="time",default=None,
        help="The option that will be provided to the unix command"
             "timeout to terminate the run. Fomrat is of an argument to the"
             "UNIX timeout command. e.g. 10m for a 10 minute run.")
    parser.add_option("-c","--line-client",dest="lineclient",
        help="If specified, indicates the hostname which should be connected"
             "to in order to start the line server")
    parser.add_option("-e","--experiment",dest="experiment",action="store_true",
        help="Enable experiment mode, which can be used to observe the affects"
             "of network link reliability on the system")
    expgroup = OptionGroup(parser, "Network Experiments", 
        "These options will only be used in network experiment mode (-e). In"
        "this mode, the program will run for a specified period of time with"
        "incrementing network settings (from 0 up to full reliability")
    expgroup.add_option("-o","--output-file",dest="outputfile",default="exp.dat",
        help="File to write the experiment table to, if not provided, "
             "defaults to exp.dat")
    expgroup.add_option("-g","--granularity",dest="granularity",default=5,type="int",
        help="The simulator will run experiments with a step up or down in the"
             "granularity for each network connection.")
    parser.add_option_group(expgroup)
    return parser
コード例 #9
0
ファイル: StatsServer.py プロジェクト: AndresTanasijczuk/DBS
def get_command_line_options(executable_name, arguments):
    parser = OptionParser(usage="%s options" % executable_name)
    parser.set_defaults(port = 9876)
    parser.add_option("-x", "--xmlrpc", action="store_true", dest="xml", help="Start XMLRPC StatServer")
    parser.add_option("-n", "--npipe", action="store_true", dest="pipe", help="Start named pipe StatServer")
    parser.add_option("-o", "--out", type="string", dest="output", help="Output DB File")
    
    xml_group = OptionGroup(parser, "Options for XMLRPC Server")
    xml_group.add_option("-p", "--port", type="string", dest="port", help="port to run the server")

    parser.add_option_group(xml_group)

    pipe_group = OptionGroup(parser, "Options for named pipe Server")
    pipe_group.add_option("-i", "--input", type="string", dest="pipe_name", help="Filename of the named pipe")

    parser.add_option_group(pipe_group)
    
    (options, args) = parser.parse_args()

    if (options.xml and options.pipe) or (options.xml==None and options.pipe==None):
        parser.print_help()
        parser.error("You need to provide one of the following options, --xmlrpc or --npipe")

    if not options.output:
        parser.print_help()
        parser.error("You need to provide following options, --out=OutputDB.db")

    if options.pipe and not options.pipe_name:
        parser.print_help()
        parser.error("You need to provide following options, --input=named_pipe")
        
    return options
コード例 #10
0
    def run(self, *args):
        """Create a new site."""
        parser = OptionParser(usage=self.usage)
        group = OptionGroup(parser, "Site Options")
        group.add_option(
            "--empty", action="store_true", dest='empty', default=True,
            help="Create an empty site with only a config.")
        group.add_option("--demo", action="store_false", dest='empty',
                         help="Create a site filled with example data.")
        parser.add_option_group(group)
        (options, args) = parser.parse_args(list(args))

        if not args:
            print("Usage: nikola init folder [options]")
            return
        target = args[0]
        if target is None:
            print(self.usage)
        else:
            if options.empty:
                self.create_empty_site(target)
                print('Created empty site at %s.' % target)
            else:
                self.copy_sample_site(target)
                print("A new site with example data has been created at %s."
                      % target)
                print("See README.txt in that folder for more information.")

            self.create_configuration(target)
コード例 #11
0
ファイル: dependencyFactory.py プロジェクト: UAEDF/vbfHbb
def parser(mp=None):
	if mp==None: mp = OptionParser()
	mg1 = OptionGroup(mp,cyan+"dependencyFactory settings"+plain)
	mg1.add_option('--bmap',help='Calculate btagmap JetMon/QCD.',default=False,action='store_true')
	mg1.add_option('--kfac',help='Calculate k-factor.',default=False,action='store_true')
	mp.add_option_group(mg1)
	return mp
コード例 #12
0
    def register_options(cls, parser):
        group = OptionGroup(parser, 'Nagios specific options')

        group.add_option('-w', '--warning', dest='warning')
        group.add_option('-c', '--critical', dest='critical')

        parser.add_option_group(group)
コード例 #13
0
    def register_options(cls, parser):
        group = OptionGroup(parser, 'Cacti specific options')
        
        group.add_option('-l', '--leader', dest='leader', 
            action="store_true", help="only query the cluster leader")

        parser.add_option_group(group)
コード例 #14
0
ファイル: SegAmp.py プロジェクト: psibre/artimate
def parse_options():
    parser = OptionParser()
    parser.add_option(
        "-a", "--ampfile", dest="ampsname", metavar="AMP", help="single input .amp file (default: %default)"
    )
    parser.add_option(
        "-s", "--segfile", dest="segsname", metavar="SEG", help="single input .seg file (default: %default)"
    )
    parser.add_option(
        "-w", "--wavfile", dest="wavname", metavar="WAV", help="single input .wav file (default: %default)"
    )

    group = OptionGroup(parser, "Batch processing")
    group.add_option(
        "-i",
        "--input-dir",
        dest="indir",
        help="input directory; must contain wav/ and amps/ subdirectories" " with matching filesets.",
    )
    group.add_option("-o", "--output-dir", dest="outdir", help="output directory (will be created if it doesn't exist)")
    parser.add_option_group(group)

    parser.set_defaults(ampsname="test.amp", segsname="test.seg", wavname="test.wav")

    return parser.parse_args()
コード例 #15
0
    def add_options(cls, parser):
        super(CACertManage, cls).add_options(parser)

        parser.add_option(
            "-p", "--password", dest='password',
            help="Directory Manager password")

        renew_group = OptionGroup(parser, "Renew options")
        renew_group.add_option(
            "--self-signed", dest='self_signed',
            action='store_true',
            help="Sign the renewed certificate by itself")
        renew_group.add_option(
            "--external-ca", dest='self_signed',
            action='store_false',
            help="Sign the renewed certificate by external CA")
        renew_group.add_option(
            "--external-cert-file", dest="external_cert_files",
            action="append", metavar="FILE",
            help="File containing the IPA CA certificate and the external CA "
                 "certificate chain")
        parser.add_option_group(renew_group)

        install_group = OptionGroup(parser, "Install options")
        install_group.add_option(
            "-n", "--nickname", dest='nickname',
            help="Nickname for the certificate")
        install_group.add_option(
            "-t", "--trust-flags", dest='trust_flags', default='C,,',
            help="Trust flags for the certificate in certutil format")
        parser.add_option_group(install_group)
コード例 #16
0
ファイル: jardiff.py プロジェクト: jenix21/python-javatools
def jardiff_optgroup(parser):

    """ option group specific to the tests in jardiff """

    from optparse import OptionGroup

    og = OptionGroup(parser, "JAR Checking Options")

    og.add_option("--ignore-jar-entry", action="append", default=[])

    og.add_option("--ignore-jar-signature",
                  action="store_true", default=False,
                  help="Ignore JAR signing changes")

    og.add_option("--ignore-manifest",
                  action="store_true", default=False,
                  help="Ignore changes to manifests")

    og.add_option("--ignore-manifest-subsections",
                  action="store_true", default=False,
                  help="Ignore changes to manifest subsections")

    og.add_option("--ignore-manifest-key",
                  action="append", default=[],
                  help="case-insensitive manifest keys to ignore")

    return og
コード例 #17
0
def parse_options():

    description = "This example script gather information provided by a SAP Netweaver Application Server during the " \
                  "login process. This information includes generally hostname, instance, database name, etc."

    epilog = "pysap %(version)s - %(url)s - %(repo)s" % {"version": pysap.__version__,
                                                         "url": pysap.__url__,
                                                         "repo": pysap.__repo__}

    usage = "Usage: %prog [options] -H <remote host>"

    parser = OptionParser(usage=usage, description=description, epilog=epilog)

    target = OptionGroup(parser, "Target")
    target.add_option("-d", "--remote-host", dest="remote_host", help="SAP remote host")
    target.add_option("-p", "--remote-port", dest="remote_port", type="int", help="SAP remote port [%default]", default=3200)
    target.add_option("--route-string", dest="route_string", help="Route string for connecting through a SAP Router")
    parser.add_option_group(target)

    misc = OptionGroup(parser, "Misc options")
    misc.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Verbose output [%default]")
    misc.add_option("--terminal", dest="terminal", default=None, help="Terminal name")
    parser.add_option_group(misc)

    (options, _) = parser.parse_args()

    if not options.remote_host:
        parser.error("Remote host is required")

    return options
コード例 #18
0
ファイル: mssql_query.py プロジェクト: wstrucke/sysadmin
def parseArgs( req , opt ):
        tome = {}
        usage = "usage: %prog -H hostname -U user -P password [-D database] [-q query_sql]"
        parser = OptionParser(usage=usage)
        # Declare Required Options
        required = OptionGroup(parser, "Required Options")
        for arg in req:
                required.add_option( arg[0] , '--' + arg[1] , help = arg[2] , default = arg[3] )
        parser.add_option_group(required)
        # Declare Optional Options
        optional = OptionGroup(parser, "Optional Options of Redundancy")
        for arg in opt:
                optional.add_option( arg[0] , '--' + arg[1], help = arg[2] , default = arg[3] )
        parser.add_option_group(optional)
        # Parse all args in options.args, convert to iterable dictionary
        ( options, args ) = parser.parse_args()
        for arg in required.option_list:
                tome[ arg.dest ] = getattr(options, arg.dest)
                if not tome[ arg.dest ]:
                        print "All arguments listed in Required Options must have a value."
                        parser.print_help()
                        sys.exit(3)
        for arg in optional.option_list:
                tome [ arg.dest ] = getattr(options, arg.dest)
        return tome
コード例 #19
0
ファイル: manager.py プロジェクト: troolee/jscc
    def run(self, argv):
        parser = OptionParser(usage="%prog [options] [mode] [project]",
                              description='The jscc is a tool that helps you compile js code using google closure compiler.')

        mode_group = OptionGroup(parser, "Mode options (only specify one)")

        mode_group.add_option('-c', '--create', action='store_const', dest='mode', const='create', help='Create a new project.')
        mode_group.add_option('-u', '--update', action='store_const', dest='mode', const='update', help='Update the project. This is default mode.')
        mode_group.add_option('-w', '--watch', action='store_const', dest='mode', const='watch', help='Monitor the project for changes and update.')

        parser.add_option_group(mode_group)

        parser.add_option('--compiler', default='closure-compiler', help='Path to the google closure compiler. By default used: closure-compiler (see INSTALL for more details)')
        parser.add_option('-f', '--force', dest='force', action='store_true', help='Force recompile the project.')
        parser.add_option('-D', '--debug-mode', dest='debug_mode', action='store_true', help='Do not compile files. Just concatinate them.')
        parser.add_option('-d', '--debug', action='store_true', help='Display debug output')

        (options, args) = parser.parse_args(argv)
        if len(args) > 2:
            parser.error("incorrect number of arguments")

        self.compiler = options.compiler
        self.debug_mode = options.debug_mode or False
        force = options.force or False

        if options.debug:
            logging.basicConfig(level=logging.DEBUG)
        else:
            logging.basicConfig(level=logging.INFO)

        mode = options.mode or 'update'
        project = args[1] if len(args) > 1 else DEFAULT_PROJECT_FILENAME
        getattr(self, mode)(project, force=force)
コード例 #20
0
ファイル: dx.py プロジェクト: swails/spam
def test(args):
   from optparse import OptionParser, OptionGroup
   import sys

   parser = OptionParser()
   parser.add_option('-O', '--overwrite', dest='owrite', default=False,
                     action='store_true', help='Allow overwriting files')
   group = OptionGroup(parser, 'ThreeDGrid', 'Test the ThreeDGrid Class')
   group.add_option('-d', '--dx', dest='indx', default=None,
                    help='Input DX file.', metavar='FILE')
   group.add_option('-o', '--out-dx', dest='outdx', default=None,
                    help='Output DX file. Will just be a copy of the input.',
                    metavar='FILE')
   parser.add_option_group(group)
   group = OptionGroup(parser, 'Density Queries', 'Test some of the querying ' +
                       'functionality of the ThreeDGrid class')
   group.add_option('-x', '--x', dest='x', metavar='X_COORDINATE', default=None,
                    type='float', help='X-coordinate to find the density from')
   group.add_option('-y', '--y', dest='y', metavar='Y_COORDINATE', default=None,
                    type='float', help='Y-coordinate to find the density from')
   group.add_option('-z', '--z', dest='z', metavar='Z_COORDINATE', default=None,
                    type='float', help='Z-coordinate to find the density from')
   parser.add_option_group(group)

   opt, arg = parser.parse_args(args=args)

   global overwrite
   overwrite = opt.owrite

   if opt.outdx and not opt.indx:
      print("I cannot output a DX file without an input!")
      sys.exit(1)

   if opt.indx:
      test_dx = read_dx(opt.indx)
      print "%s read successfully" % opt.indx
      print "Grid Statistics:"
      print "  Origin:     (%g, %g, %g)" % (test_dx.xorigin, test_dx.yorigin,
                                        test_dx.zorigin)
      print "  Resolution:  %g [%g, %g]" % (test_dx.xres, test_dx.yres, 
                                           test_dx.zres)
      print "  Grid points: %d x %d x %d" % test_dx.shape
      print "  Grid desc: [ %s ]" % test_dx.description

      if opt.outdx:
         print "\nWriting output DX file %s" % opt.outdx
         test_dx.write_dx(opt.outdx)

   if (opt.x is None or opt.y is None or opt.z is None) and (
       opt.x is not None or opt.y is not None or opt.z is not None):
      print 'You must specify -x, -y, and -z all together!'
      sys.exit(1)

   if opt.x is not None and opt.indx is None:
      print 'You must give me an input DX file to analyze...'
      sys.exit(1)

   if opt.x is not None:
      print "The density at {%f, %f, %f} is %g" % (opt.x, opt.y, opt.z,
         test_dx.get_density_cartesian(opt.x, opt.y, opt.z))
コード例 #21
0
ファイル: doxypypy.py プロジェクト: ashuang/procman
    def optParse():
        """
        Parses command line options.

        Generally we're supporting all the command line options that doxypy.py
        supports in an analogous way to make it easy to switch back and forth.
        We additionally support a top-level namespace argument that is used
        to trim away excess path information.
        """

        parser = OptionParser(prog=basename(argv[0]))

        parser.set_usage("%prog [options] filename")
        parser.add_option(
            "-a", "--autobrief",
            action="store_true", dest="autobrief",
            help="parse the docstring for @brief description and other information"
        )
        parser.add_option(
            "-c", "--autocode",
            action="store_true", dest="autocode",
            help="parse the docstring for code samples"
        )
        parser.add_option(
            "-n", "--ns",
            action="store", type="string", dest="topLevelNamespace",
            help="specify a top-level namespace that will be used to trim paths"
        )
        parser.add_option(
            "-t", "--tablength",
            action="store", type="int", dest="tablength", default=4,
            help="specify a tab length in spaces; only needed if tabs are used"
        )
        group = OptionGroup(parser, "Debug Options")
        group.add_option(
            "-d", "--debug",
            action="store_true", dest="debug",
            help="enable debug output on stderr"
        )
        parser.add_option_group(group)

        ## Parse options based on our definition.
        (options, filename) = parser.parse_args()

        # Just abort immediately if we are don't have an input file.
        if not filename:
            stderr.write("No filename given." + linesep)
            sysExit(-1)

        # Turn the full path filename into a full path module location.
        fullPathNamespace = filename[0].replace(sep, '.')[:-3]
        # Use any provided top-level namespace argument to trim off excess.
        realNamespace = fullPathNamespace
        if options.topLevelNamespace:
            namespaceStart = fullPathNamespace.find(options.topLevelNamespace)
            if namespaceStart >= 0:
                realNamespace = fullPathNamespace[namespaceStart:]
        options.fullPathNamespace = realNamespace

        return options, filename[0]
コード例 #22
0
def parse_options(argv):
    '''
    option parser
    '''
    ### required input
    parser = OptionParser()
    required  = OptionGroup(parser, 'Input')
    required.add_option('-s', '--star', dest = 'fn_star', metavar = 'FILE', help = 'STAR count file') 
    required.add_option('-t', '--tophat', dest = 'fn_tophat', metavar = 'FILE', help = 'Tophat count file') 


    ### optional arguments
    optional = OptionGroup(parser, 'optional')
    
    ## outdir
    outdir_hstr = 'Output direcory for all files'
    outdir = os.path.realpath(__file__).rsplit('/',1)[0]
    optional.add_option('-o', '--out_dir', dest = 'out_dir', help = outdir_hstr, default = outdir)

    ## genelength buffe
    fnl_hstr = 'Location of file with gene length'
    fnlfile =  os.path.join(os.path.realpath(__file__).rsplit('/',1)[0], 'data','geneLength.tsv')
    optional.add_option('-g', '--genelength', dest = 'fn_length',metavar = 'FILE', help = fnl_hstr, default = fnlfile)

    ## annotation file
    fna_hstr = 'Annotation file [gtf]'
    fnafile = os.path.join(os.path.realpath(__file__).rsplit('/',1)[0], 'data','gencode.v19.annotation.hs37d5_chr.gtf')
    optional.add_option('-a', '--anno', dest = 'fn_anno', metavar = 'FILE', help = fna_hstr, default = fnafile)

    
    parser.add_option_group(required)
    parser.add_option_group(optional)
    (options, args) = parser.parse_args()
    
    return options
コード例 #23
0
ファイル: ms_listener.py プロジェクト: HPxpat/pysap
def parse_options():

    description = "This example script connects with the Message Server service and listen for messages coming from " \
                  "the server."

    epilog = "pysap %(version)s - %(url)s - %(repo)s" % {"version": pysap.__version__,
                                                         "url": pysap.__url__,
                                                         "repo": pysap.__repo__}

    usage = "Usage: %prog [options] -d <remote host>"

    parser = OptionParser(usage=usage, description=description, epilog=epilog)

    target = OptionGroup(parser, "Target")
    target.add_option("-d", "--remote-host", dest="remote_host", help="Remote host")
    target.add_option("-p", "--remote-port", dest="remote_port", type="int", help="Remote port [%default]", default=3900)
    target.add_option("--route-string", dest="route_string", help="Route string for connecting through a SAP Router")
    parser.add_option_group(target)

    misc = OptionGroup(parser, "Misc options")
    misc.add_option("-c", "--client", dest="client", default="pysap's-listener", help="Client name [%default]")
    misc.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Verbose output [%default]")
    parser.add_option_group(misc)

    (options, _) = parser.parse_args()

    if not options.remote_host:
        parser.error("Remote host is required")

    return options
コード例 #24
0
    def test_help_description_groups(self):
        self.parser.set_description(
            "This is the program description for %prog.  %prog has " "an option group as well as single options."
        )

        group = OptionGroup(
            self.parser,
            "Dangerous Options",
            "Caution: use of these options is at your own risk.  " "It is believed that some of them bite.",
        )
        group.add_option("-g", action="store_true", help="Group option.")
        self.parser.add_option_group(group)

        self.assertHelpEquals(
            """\
usage: bar.py [options]

This is the program description for bar.py.  bar.py has an option group as
well as single options.

options:
  -a APPLE           throw APPLEs at basket
  -b NUM, --boo=NUM  shout "boo!" NUM times (in order to frighten away all the
                     evil spirits that cause trouble and mayhem)
  --foo=FOO          store FOO in the foo list for later fooing
  -h, --help         show this help message and exit

  Dangerous Options:
    Caution: use of these options is at your own risk.  It is believed
    that some of them bite.

    -g               Group option.
"""
        )
コード例 #25
0
ファイル: modtool_rename.py プロジェクト: Andy-Vuong/gnuradio
 def setup_parser(self):
     parser = ModTool.setup_parser(self)
     ogroup = OptionGroup(parser, "Rename module options")
     ogroup.add_option("-o", "--old-name", type="string", default=None, help="Current name of the block to rename.")
     ogroup.add_option("-u", "--new-name", type="string", default=None, help="New name of the block.")
     parser.add_option_group(ogroup)
     return parser
コード例 #26
0
ファイル: main.py プロジェクト: pymontecarlo/pymontecarlo-cli
def _create_parser(programs):
    description = "pyMonteCarlo Command Line Interface. Runs simulation(s) " + \
                  "with different Monte Carlo programs from the same interface." + \
                  "After the simulations, the results are automatically saved " + \
                  "in the output directory."
    epilog = "For more information, see http://pymontecarlo.bitbucket.org"

    parser = OptionParser(usage="%prog [options] [OPTION_FILE...]",
                          description=description, epilog=epilog)

    # Base options
    parser.add_option("-o", '--outputdir', dest="outputdir", default=os.getcwd(), metavar="DIR",
                      help="Output directory where results from simulation(s) will be saved [default: current directory]")
    parser.add_option("-w", '--workdir', dest="workdir", default=None, metavar="DIR",
                      help="Work directory where temporary files from simulation(s) will be stored [default: temporary folder]")
    parser.add_option('-v', '--verbose', dest='verbose', default=False,
                      action='store_true', help='Debug mode')
    parser.add_option('-q', '--quiet', dest='quiet', default=False,
                      action='store_true', help='Quite mode (no progress bar is shown)')
    parser.add_option("-n", dest="nbprocesses", default=1, type="int",
                      help="Number of processes/threads to use (not applicable for all Monte Carlo programs) [default: %default]")
    parser.add_option('-s', '--skip', dest='skip', default=False,
                      action='store_true', help='Skip simulation if results already exist')

    # Program group
    group = OptionGroup(parser, "Monte Carlo programs",
                        "Note: Only supported programs are shown.")

    for alias in sorted(map(attrgetter('alias'), programs)):
        group.add_option('--%s' % alias, dest=alias, action="store_true")

    parser.add_option_group(group)

    return parser
コード例 #27
0
ファイル: build_configuration.py プロジェクト: zweiein/kaldi
 def add_opts(self, parser, module_list = None):
     if not module_list:
         module_list = ['general', 'logging']
     else:
         module_list = ['general', 'logging'] + module_list
     for m in self.dataseq:
         if not m[0] in module_list:
             continue
         options = self.getval(m[0], 'options')
         if not options: options = []
         else: options = options.split(',')
         group = None
         if len(options):
             group = OptionGroup(parser, m[0])
         for k, v in m[1]:
             if k[-5:] == '_desc':
                 continue
             if not (k in options):
                 continue
             desc = self.getval(m[0], k + '_desc')
             if not desc:
                 desc = 'Refer to Idlak voice build documentation'
             if v == 'True' or v == 'False':
                 #print v, k
                 group.add_option("--" + k, dest=k, action="store_true",
                                   help=desc)
             else:
                 #print v, k
                 group.add_option("--" + k, dest=k, default=None,
                                   help=desc)
         if group:
             parser.add_option_group(group)
コード例 #28
0
ファイル: delete.py プロジェクト: nzwulfin/spaceclone
def run(parser, rhn, logger):

    parser.add_satellite_options()

    parser.set_required(["sat_server", "sat_username", "sat_password", "cloneset"])

    group = OptionGroup(parser.parser, "Show Options")
    group.add_option("-c", "--cloneset", action="store", type="string", dest="cloneset", help="Cloneset")
    parser.add_group(group)

    (options, args) = parser.parse()

    rhn = Satellite(options.sat_server, options.sat_username, options.sat_password)

    try:
        cloneset = rhn.cloneset_info(options.cloneset)
    except KeyError:
        print "Cloneset not found: %s" % options.cloneset
        sys.exit()

    print ""
    print "Selected cloneset: " + options.cloneset
    print ""
    show.run(parser, rhn, logger)

    response = raw_input("Are you sure? (This can't be undone) [Y/n] ")
    if response.rstrip() == "Y":
        cloneset.delete()
    else:
        print "Aborted."
コード例 #29
0
ファイル: script_options.py プロジェクト: pearu/iocbio
def get_microscope_options_group(parser):
    group = OptionGroup (parser, 'Microscope options',
                         description = '''\
Specify microscope environment options. Note that these options should \
be used only when input images do not contain information about required options. \
Be warned that options specified here will override the options values \
found in input image files, so it is recommended to keep the fields of \
microscope options empty.''')
    group.add_option ("--objective-na", dest='objective_na',
                      type = 'float',  metavar='FLOAT',
                      help='Specify the numerical aperture of microscope objectve.')
    group.add_option ("--excitation-wavelength", dest='excitation_wavelength',
                      type = 'float',  metavar='FLOAT',
                      help='Specify excitation wavelength in nm.')
    group.add_option ("--emission-wavelength", dest='emission_wavelength',
                      type = 'float',  metavar='FLOAT',
                      help='Specify emission wavelength in nm.')
    group.add_option ("--refractive-index", dest='refractive_index',
                      type = 'float', default=NO_DEFAULT, metavar='FLOAT',
                      help='Specify refractive index of immersion medium:')
    group.add_option ("--microscope-type", dest='microscope_type',
                      choices = ['<detect>', 'confocal', 'widefield'],
                      default = '<detect>',
                      help = 'Specify microscope type.',
                      )
    return group
コード例 #30
0
ファイル: base.py プロジェクト: pombredanne/weboob
 def __init__(self, option_parser=None):
     self.logger = getLogger(self.APPNAME)
     self.weboob = self.create_weboob()
     if self.CONFDIR is None:
         self.CONFDIR = self.weboob.workdir
     self.config = None
     self.options = None
     self.condition = None
     if option_parser is None:
         self._parser = OptionParser(self.SYNOPSIS, version=self._get_optparse_version())
     else:
         self._parser = option_parser
     if self.DESCRIPTION:
         self._parser.description = self.DESCRIPTION
     app_options = OptionGroup(self._parser, '%s Options' % self.APPNAME.capitalize())
     self.add_application_options(app_options)
     if len(app_options.option_list) > 0:
         self._parser.add_option_group(app_options)
     self._parser.add_option('-b', '--backends', help='what backend(s) to enable (comma separated)')
     self._parser.add_option('-e', '--exclude-backends', help='what backend(s) to exclude (comma separated)')
     self._parser.add_option('-I', '--insecure', action='store_true', help='do not validate SSL')
     logging_options = OptionGroup(self._parser, 'Logging Options')
     logging_options.add_option('-d', '--debug', action='store_true', help='display debug messages')
     logging_options.add_option('-q', '--quiet', action='store_true', help='display only error messages')
     logging_options.add_option('-v', '--verbose', action='store_true', help='display info messages')
     logging_options.add_option('--logging-file', action='store', type='string', dest='logging_file', help='file to save logs')
     logging_options.add_option('-a', '--save-responses', action='store_true', help='save every response')
     self._parser.add_option_group(logging_options)
     self._parser.add_option('--shell-completion', action='store_true', help=optparse.SUPPRESS_HELP)
コード例 #31
0
def cmdLineParser(argv=None):
    """
    This function parses the command line parameters and arguments
    """

    if not argv:
        argv = sys.argv

    checkSystemEncoding()

    # Reference: https://stackoverflow.com/a/4012683 (Note: previously used "...sys.getfilesystemencoding() or UNICODE_ENCODING")
    _ = getUnicode(os.path.basename(argv[0]), encoding=sys.stdin.encoding)

    usage = "%s%s [options]" % ("python " if not IS_WIN else "",
                                "\"%s\"" % _ if " " in _ else _)
    parser = OptionParser(usage=usage)

    try:
        parser.add_option("--hh",
                          dest="advancedHelp",
                          action="store_true",
                          help="Show advanced help message and exit")

        parser.add_option("--version",
                          dest="showVersion",
                          action="store_true",
                          help="Show program's version number and exit")

        parser.add_option("-v",
                          dest="verbose",
                          type="int",
                          help="Verbosity level: 0-6 (default %d)" %
                          defaults.verbose)

        # Target options
        target = OptionGroup(
            parser, "Target", "At least one of these "
            "options has to be provided to define the target(s)")

        target.add_option("-d",
                          dest="direct",
                          help="Connection string "
                          "for direct database connection")

        target.add_option(
            "-u",
            "--url",
            dest="url",
            help="Target URL (e.g. \"http://www.site.com/vuln.php?id=1\")")

        target.add_option("-l",
                          dest="logFile",
                          help="Parse target(s) from Burp "
                          "or WebScarab proxy log file")

        target.add_option(
            "-x",
            dest="sitemapUrl",
            help="Parse target(s) from remote sitemap(.xml) file")

        target.add_option("-m",
                          dest="bulkFile",
                          help="Scan multiple targets given "
                          "in a textual file ")

        target.add_option("-r",
                          dest="requestFile",
                          help="Load HTTP request from a file")

        target.add_option("-g",
                          dest="googleDork",
                          help="Process Google dork results as target URLs")

        target.add_option("-c",
                          dest="configFile",
                          help="Load options from a configuration INI file")

        # Request options
        request = OptionGroup(
            parser, "Request", "These options can be used "
            "to specify how to connect to the target URL")

        request.add_option("--method",
                           dest="method",
                           help="Force usage of given HTTP method (e.g. PUT)")

        request.add_option("--data",
                           dest="data",
                           help="Data string to be sent through POST")

        request.add_option(
            "--param-del",
            dest="paramDel",
            help="Character used for splitting parameter values")

        request.add_option("--cookie",
                           dest="cookie",
                           help="HTTP Cookie header value")

        request.add_option("--cookie-del",
                           dest="cookieDel",
                           help="Character used for splitting cookie values")

        request.add_option(
            "--load-cookies",
            dest="loadCookies",
            help="File containing cookies in Netscape/wget format")

        request.add_option("--drop-set-cookie",
                           dest="dropSetCookie",
                           action="store_true",
                           help="Ignore Set-Cookie header from response")

        request.add_option("--user-agent",
                           dest="agent",
                           help="HTTP User-Agent header value")

        request.add_option(
            "--random-agent",
            dest="randomAgent",
            action="store_true",
            help="Use randomly selected HTTP User-Agent header value")

        request.add_option("--host",
                           dest="host",
                           help="HTTP Host header value")

        request.add_option("--referer",
                           dest="referer",
                           help="HTTP Referer header value")

        request.add_option(
            "-H",
            "--header",
            dest="header",
            help="Extra header (e.g. \"X-Forwarded-For: 127.0.0.1\")")

        request.add_option(
            "--headers",
            dest="headers",
            help="Extra headers (e.g. \"Accept-Language: fr\\nETag: 123\")")

        request.add_option(
            "--auth-type",
            dest="authType",
            help="HTTP authentication type (Basic, Digest, NTLM or PKI)")

        request.add_option(
            "--auth-cred",
            dest="authCred",
            help="HTTP authentication credentials (name:password)")

        request.add_option(
            "--auth-file",
            dest="authFile",
            help="HTTP authentication PEM cert/private key file")

        request.add_option("--ignore-code",
                           dest="ignoreCode",
                           type="int",
                           help="Ignore HTTP error code (e.g. 401)")

        request.add_option("--ignore-proxy",
                           dest="ignoreProxy",
                           action="store_true",
                           help="Ignore system default proxy settings")

        request.add_option("--ignore-redirects",
                           dest="ignoreRedirects",
                           action="store_true",
                           help="Ignore redirection attempts")

        request.add_option("--ignore-timeouts",
                           dest="ignoreTimeouts",
                           action="store_true",
                           help="Ignore connection timeouts")

        request.add_option("--proxy",
                           dest="proxy",
                           help="Use a proxy to connect to the target URL")

        request.add_option(
            "--proxy-cred",
            dest="proxyCred",
            help="Proxy authentication credentials (name:password)")

        request.add_option("--proxy-file",
                           dest="proxyFile",
                           help="Load proxy list from a file")

        request.add_option("--tor",
                           dest="tor",
                           action="store_true",
                           help="Use Tor anonymity network")

        request.add_option("--tor-port",
                           dest="torPort",
                           help="Set Tor proxy port other than default")

        request.add_option(
            "--tor-type",
            dest="torType",
            help="Set Tor proxy type (HTTP, SOCKS4 or SOCKS5 (default))")

        request.add_option("--check-tor",
                           dest="checkTor",
                           action="store_true",
                           help="Check to see if Tor is used properly")

        request.add_option("--delay",
                           dest="delay",
                           type="float",
                           help="Delay in seconds between each HTTP request")

        request.add_option(
            "--timeout",
            dest="timeout",
            type="float",
            help="Seconds to wait before timeout connection (default %d)" %
            defaults.timeout)

        request.add_option(
            "--retries",
            dest="retries",
            type="int",
            help="Retries when the connection timeouts (default %d)" %
            defaults.retries)

        request.add_option("--randomize",
                           dest="rParam",
                           help="Randomly change value for given parameter(s)")

        request.add_option(
            "--safe-url",
            dest="safeUrl",
            help="URL address to visit frequently during testing")

        request.add_option("--safe-post",
                           dest="safePost",
                           help="POST data to send to a safe URL")

        request.add_option("--safe-req",
                           dest="safeReqFile",
                           help="Load safe HTTP request from a file")

        request.add_option(
            "--safe-freq",
            dest="safeFreq",
            type="int",
            help="Test requests between two visits to a given safe URL")

        request.add_option("--skip-urlencode",
                           dest="skipUrlEncode",
                           action="store_true",
                           help="Skip URL encoding of payload data")

        request.add_option("--csrf-token",
                           dest="csrfToken",
                           help="Parameter used to hold anti-CSRF token")

        request.add_option(
            "--csrf-url",
            dest="csrfUrl",
            help="URL address to visit to extract anti-CSRF token")

        request.add_option("--force-ssl",
                           dest="forceSSL",
                           action="store_true",
                           help="Force usage of SSL/HTTPS")

        request.add_option("--hpp",
                           dest="hpp",
                           action="store_true",
                           help="Use HTTP parameter pollution method")

        request.add_option(
            "--eval",
            dest="evalCode",
            help=
            "Evaluate provided Python code before the request (e.g. \"import hashlib;id2=hashlib.md5(id).hexdigest()\")"
        )

        # Optimization options
        optimization = OptionGroup(
            parser, "Optimization",
            "These options can be used to optimize the performance of sqlmap")

        optimization.add_option("-o",
                                dest="optimize",
                                action="store_true",
                                help="Turn on all optimization switches")

        optimization.add_option("--predict-output",
                                dest="predictOutput",
                                action="store_true",
                                help="Predict common queries output")

        optimization.add_option("--keep-alive",
                                dest="keepAlive",
                                action="store_true",
                                help="Use persistent HTTP(s) connections")

        optimization.add_option(
            "--null-connection",
            dest="nullConnection",
            action="store_true",
            help="Retrieve page length without actual HTTP response body")

        optimization.add_option("--threads",
                                dest="threads",
                                type="int",
                                help="Max number of concurrent HTTP(s) "
                                "requests (default %d)" % defaults.threads)

        # Injection options
        injection = OptionGroup(
            parser, "Injection",
            "These options can be used to specify which parameters to test for, provide custom injection payloads and optional tampering scripts"
        )

        injection.add_option("-p",
                             dest="testParameter",
                             help="Testable parameter(s)")

        injection.add_option("--skip",
                             dest="skip",
                             help="Skip testing for given parameter(s)")

        injection.add_option(
            "--skip-static",
            dest="skipStatic",
            action="store_true",
            help="Skip testing parameters that not appear to be dynamic")

        injection.add_option(
            "--param-exclude",
            dest="paramExclude",
            help="Regexp to exclude parameters from testing (e.g. \"ses\")")

        injection.add_option("--dbms",
                             dest="dbms",
                             help="Force back-end DBMS to this value")

        injection.add_option(
            "--dbms-cred",
            dest="dbmsCred",
            help="DBMS authentication credentials (user:password)")

        injection.add_option(
            "--os",
            dest="os",
            help="Force back-end DBMS operating system to this value")

        injection.add_option("--invalid-bignum",
                             dest="invalidBignum",
                             action="store_true",
                             help="Use big numbers for invalidating values")

        injection.add_option(
            "--invalid-logical",
            dest="invalidLogical",
            action="store_true",
            help="Use logical operations for invalidating values")

        injection.add_option("--invalid-string",
                             dest="invalidString",
                             action="store_true",
                             help="Use random strings for invalidating values")

        injection.add_option("--no-cast",
                             dest="noCast",
                             action="store_true",
                             help="Turn off payload casting mechanism")

        injection.add_option("--no-escape",
                             dest="noEscape",
                             action="store_true",
                             help="Turn off string escaping mechanism")

        injection.add_option("--prefix",
                             dest="prefix",
                             help="Injection payload prefix string")

        injection.add_option("--suffix",
                             dest="suffix",
                             help="Injection payload suffix string")

        injection.add_option(
            "--tamper",
            dest="tamper",
            help="Use given script(s) for tampering injection data")

        # Detection options
        detection = OptionGroup(
            parser, "Detection",
            "These options can be used to customize the detection phase")

        detection.add_option(
            "--level",
            dest="level",
            type="int",
            help="Level of tests to perform (1-5, default %d)" %
            defaults.level)

        detection.add_option(
            "--risk",
            dest="risk",
            type="int",
            help="Risk of tests to perform (1-3, default %d)" % defaults.risk)

        detection.add_option(
            "--string",
            dest="string",
            help="String to match when query is evaluated to True")

        detection.add_option(
            "--not-string",
            dest="notString",
            help="String to match when query is evaluated to False")

        detection.add_option(
            "--regexp",
            dest="regexp",
            help="Regexp to match when query is evaluated to True")

        detection.add_option(
            "--code",
            dest="code",
            type="int",
            help="HTTP code to match when query is evaluated to True")

        detection.add_option(
            "--text-only",
            dest="textOnly",
            action="store_true",
            help="Compare pages based only on the textual content")

        detection.add_option("--titles",
                             dest="titles",
                             action="store_true",
                             help="Compare pages based only on their titles")

        # Techniques options
        techniques = OptionGroup(
            parser, "Techniques",
            "These options can be used to tweak testing of specific SQL injection techniques"
        )

        techniques.add_option(
            "--technique",
            dest="tech",
            help="SQL injection techniques to use (default \"%s\")" %
            defaults.tech)

        techniques.add_option(
            "--time-sec",
            dest="timeSec",
            type="int",
            help="Seconds to delay the DBMS response (default %d)" %
            defaults.timeSec)

        techniques.add_option(
            "--union-cols",
            dest="uCols",
            help="Range of columns to test for UNION query SQL injection")

        techniques.add_option(
            "--union-char",
            dest="uChar",
            help="Character to use for bruteforcing number of columns")

        techniques.add_option(
            "--union-from",
            dest="uFrom",
            help="Table to use in FROM part of UNION query SQL injection")

        techniques.add_option(
            "--dns-domain",
            dest="dnsDomain",
            help="Domain name used for DNS exfiltration attack")

        techniques.add_option(
            "--second-order",
            dest="secondOrder",
            help="Resulting page URL searched for second-order response")

        # Fingerprint options
        fingerprint = OptionGroup(parser, "Fingerprint")

        fingerprint.add_option(
            "-f",
            "--fingerprint",
            dest="extensiveFp",
            action="store_true",
            help="Perform an extensive DBMS version fingerprint")

        # Enumeration options
        enumeration = OptionGroup(
            parser, "Enumeration",
            "These options can be used to enumerate the back-end database management system information, structure and data contained in the tables. Moreover you can run your own SQL statements"
        )

        enumeration.add_option("-a",
                               "--all",
                               dest="getAll",
                               action="store_true",
                               help="Retrieve everything")

        enumeration.add_option("-b",
                               "--banner",
                               dest="getBanner",
                               action="store_true",
                               help="Retrieve DBMS banner")

        enumeration.add_option("--current-user",
                               dest="getCurrentUser",
                               action="store_true",
                               help="Retrieve DBMS current user")

        enumeration.add_option("--current-db",
                               dest="getCurrentDb",
                               action="store_true",
                               help="Retrieve DBMS current database")

        enumeration.add_option("--hostname",
                               dest="getHostname",
                               action="store_true",
                               help="Retrieve DBMS server hostname")

        enumeration.add_option("--is-dba",
                               dest="isDba",
                               action="store_true",
                               help="Detect if the DBMS current user is DBA")

        enumeration.add_option("--users",
                               dest="getUsers",
                               action="store_true",
                               help="Enumerate DBMS users")

        enumeration.add_option("--passwords",
                               dest="getPasswordHashes",
                               action="store_true",
                               help="Enumerate DBMS users password hashes")

        enumeration.add_option("--privileges",
                               dest="getPrivileges",
                               action="store_true",
                               help="Enumerate DBMS users privileges")

        enumeration.add_option("--roles",
                               dest="getRoles",
                               action="store_true",
                               help="Enumerate DBMS users roles")

        enumeration.add_option("--dbs",
                               dest="getDbs",
                               action="store_true",
                               help="Enumerate DBMS databases")

        enumeration.add_option("--tables",
                               dest="getTables",
                               action="store_true",
                               help="Enumerate DBMS database tables")

        enumeration.add_option("--columns",
                               dest="getColumns",
                               action="store_true",
                               help="Enumerate DBMS database table columns")

        enumeration.add_option("--schema",
                               dest="getSchema",
                               action="store_true",
                               help="Enumerate DBMS schema")

        enumeration.add_option("--count",
                               dest="getCount",
                               action="store_true",
                               help="Retrieve number of entries for table(s)")

        enumeration.add_option("--dump",
                               dest="dumpTable",
                               action="store_true",
                               help="Dump DBMS database table entries")

        enumeration.add_option("--dump-all",
                               dest="dumpAll",
                               action="store_true",
                               help="Dump all DBMS databases tables entries")

        enumeration.add_option(
            "--search",
            dest="search",
            action="store_true",
            help="Search column(s), table(s) and/or database name(s)")

        enumeration.add_option("--comments",
                               dest="getComments",
                               action="store_true",
                               help="Retrieve DBMS comments")

        enumeration.add_option("-D",
                               dest="db",
                               help="DBMS database to enumerate")

        enumeration.add_option("-T",
                               dest="tbl",
                               help="DBMS database table(s) to enumerate")

        enumeration.add_option(
            "-C",
            dest="col",
            help="DBMS database table column(s) to enumerate")

        enumeration.add_option(
            "-X",
            dest="exclude",
            help="DBMS database identifier(s) to not enumerate")

        enumeration.add_option("-U",
                               dest="user",
                               help="DBMS user to enumerate")

        enumeration.add_option(
            "--exclude-sysdbs",
            dest="excludeSysDbs",
            action="store_true",
            help="Exclude DBMS system databases when enumerating tables")

        enumeration.add_option("--pivot-column",
                               dest="pivotColumn",
                               help="Pivot column name")

        enumeration.add_option("--where",
                               dest="dumpWhere",
                               help="Use WHERE condition while table dumping")

        enumeration.add_option("--start",
                               dest="limitStart",
                               type="int",
                               help="First dump table entry to retrieve")

        enumeration.add_option("--stop",
                               dest="limitStop",
                               type="int",
                               help="Last dump table entry to retrieve")

        enumeration.add_option(
            "--first",
            dest="firstChar",
            type="int",
            help="First query output word character to retrieve")

        enumeration.add_option(
            "--last",
            dest="lastChar",
            type="int",
            help="Last query output word character to retrieve")

        enumeration.add_option("--sql-query",
                               dest="query",
                               help="SQL statement to be executed")

        enumeration.add_option("--sql-shell",
                               dest="sqlShell",
                               action="store_true",
                               help="Prompt for an interactive SQL shell")

        enumeration.add_option(
            "--sql-file",
            dest="sqlFile",
            help="Execute SQL statements from given file(s)")

        # Brute force options
        brute = OptionGroup(
            parser, "Brute force",
            "These options can be used to run brute force checks")

        brute.add_option("--common-tables",
                         dest="commonTables",
                         action="store_true",
                         help="Check existence of common tables")

        brute.add_option("--common-columns",
                         dest="commonColumns",
                         action="store_true",
                         help="Check existence of common columns")

        # User-defined function options
        udf = OptionGroup(
            parser, "User-defined function injection",
            "These options can be used to create custom user-defined functions"
        )

        udf.add_option("--udf-inject",
                       dest="udfInject",
                       action="store_true",
                       help="Inject custom user-defined functions")

        udf.add_option("--shared-lib",
                       dest="shLib",
                       help="Local path of the shared library")

        # File system options
        filesystem = OptionGroup(
            parser, "File system access",
            "These options can be used to access the back-end database management system underlying file system"
        )

        filesystem.add_option(
            "--file-read",
            dest="rFile",
            help="Read a file from the back-end DBMS file system")

        filesystem.add_option(
            "--file-write",
            dest="wFile",
            help="Write a local file on the back-end DBMS file system")

        filesystem.add_option(
            "--file-dest",
            dest="dFile",
            help="Back-end DBMS absolute filepath to write to")

        # Takeover options
        takeover = OptionGroup(
            parser, "Operating system access",
            "These options can be used to access the back-end database management system underlying operating system"
        )

        takeover.add_option("--os-cmd",
                            dest="osCmd",
                            help="Execute an operating system command")

        takeover.add_option(
            "--os-shell",
            dest="osShell",
            action="store_true",
            help="Prompt for an interactive operating system shell")

        takeover.add_option("--os-pwn",
                            dest="osPwn",
                            action="store_true",
                            help="Prompt for an OOB shell, Meterpreter or VNC")

        takeover.add_option(
            "--os-smbrelay",
            dest="osSmb",
            action="store_true",
            help="One click prompt for an OOB shell, Meterpreter or VNC")

        takeover.add_option("--os-bof",
                            dest="osBof",
                            action="store_true",
                            help="Stored procedure buffer overflow "
                            "exploitation")

        takeover.add_option("--priv-esc",
                            dest="privEsc",
                            action="store_true",
                            help="Database process user privilege escalation")

        takeover.add_option(
            "--msf-path",
            dest="msfPath",
            help="Local path where Metasploit Framework is installed")

        takeover.add_option(
            "--tmp-path",
            dest="tmpPath",
            help="Remote absolute path of temporary files directory")

        # Windows registry options
        windows = OptionGroup(
            parser, "Windows registry access",
            "These options can be used to access the back-end database management system Windows registry"
        )

        windows.add_option("--reg-read",
                           dest="regRead",
                           action="store_true",
                           help="Read a Windows registry key value")

        windows.add_option("--reg-add",
                           dest="regAdd",
                           action="store_true",
                           help="Write a Windows registry key value data")

        windows.add_option("--reg-del",
                           dest="regDel",
                           action="store_true",
                           help="Delete a Windows registry key value")

        windows.add_option("--reg-key",
                           dest="regKey",
                           help="Windows registry key")

        windows.add_option("--reg-value",
                           dest="regVal",
                           help="Windows registry key value")

        windows.add_option("--reg-data",
                           dest="regData",
                           help="Windows registry key value data")

        windows.add_option("--reg-type",
                           dest="regType",
                           help="Windows registry key value type")

        # General options
        general = OptionGroup(
            parser, "General",
            "These options can be used to set some general working parameters")

        general.add_option("-s",
                           dest="sessionFile",
                           help="Load session from a stored (.sqlite) file")

        general.add_option("-t",
                           dest="trafficFile",
                           help="Log all HTTP traffic into a textual file")

        general.add_option(
            "--batch",
            dest="batch",
            action="store_true",
            help="Never ask for user input, use the default behavior")

        general.add_option(
            "--binary-fields",
            dest="binaryFields",
            help="Result fields having binary values (e.g. \"digest\")")

        general.add_option(
            "--check-internet",
            dest="checkInternet",
            action="store_true",
            help="Check Internet connection before assessing the target")

        general.add_option(
            "--crawl",
            dest="crawlDepth",
            type="int",
            help="Crawl the website starting from the target URL")

        general.add_option(
            "--crawl-exclude",
            dest="crawlExclude",
            help="Regexp to exclude pages from crawling (e.g. \"logout\")")

        general.add_option(
            "--csv-del",
            dest="csvDel",
            help="Delimiting character used in CSV output (default \"%s\")" %
            defaults.csvDel)

        general.add_option(
            "--charset",
            dest="charset",
            help="Blind SQL injection charset (e.g. \"0123456789abcdef\")")

        general.add_option(
            "--dump-format",
            dest="dumpFormat",
            help="Format of dumped data (CSV (default), HTML or SQLITE)")

        general.add_option(
            "--encoding",
            dest="encoding",
            help="Character encoding used for data retrieval (e.g. GBK)")

        general.add_option(
            "--eta",
            dest="eta",
            action="store_true",
            help="Display for each output the estimated time of arrival")

        general.add_option("--flush-session",
                           dest="flushSession",
                           action="store_true",
                           help="Flush session files for current target")

        general.add_option("--forms",
                           dest="forms",
                           action="store_true",
                           help="Parse and test forms on target URL")

        general.add_option("--fresh-queries",
                           dest="freshQueries",
                           action="store_true",
                           help="Ignore query results stored in session file")

        general.add_option("--har",
                           dest="harFile",
                           help="Log all HTTP traffic into a HAR file")

        general.add_option("--hex",
                           dest="hexConvert",
                           action="store_true",
                           help="Use DBMS hex function(s) for data retrieval")

        general.add_option("--output-dir",
                           dest="outputDir",
                           action="store",
                           help="Custom output directory path")

        general.add_option(
            "--parse-errors",
            dest="parseErrors",
            action="store_true",
            help="Parse and display DBMS error messages from responses")

        general.add_option("--save",
                           dest="saveConfig",
                           help="Save options to a configuration INI file")

        general.add_option(
            "--scope",
            dest="scope",
            help="Regexp to filter targets from provided proxy log")

        general.add_option(
            "--test-filter",
            dest="testFilter",
            help="Select tests by payloads and/or titles (e.g. ROW)")

        general.add_option(
            "--test-skip",
            dest="testSkip",
            help="Skip tests by payloads and/or titles (e.g. BENCHMARK)")

        general.add_option("--update",
                           dest="updateAll",
                           action="store_true",
                           help="Update sqlmap")

        # Miscellaneous options
        miscellaneous = OptionGroup(parser, "Miscellaneous")

        miscellaneous.add_option(
            "-z",
            dest="mnemonics",
            help="Use short mnemonics (e.g. \"flu,bat,ban,tec=EU\")")

        miscellaneous.add_option(
            "--alert",
            dest="alert",
            help="Run host OS command(s) when SQL injection is found")

        miscellaneous.add_option(
            "--answers",
            dest="answers",
            help="Set question answers (e.g. \"quit=N,follow=N\")")

        miscellaneous.add_option(
            "--beep",
            dest="beep",
            action="store_true",
            help="Beep on question and/or when SQL injection is found")

        miscellaneous.add_option(
            "--cleanup",
            dest="cleanup",
            action="store_true",
            help="Clean up the DBMS from sqlmap specific UDF and tables")

        miscellaneous.add_option(
            "--dependencies",
            dest="dependencies",
            action="store_true",
            help="Check for missing (non-core) sqlmap dependencies")

        miscellaneous.add_option("--disable-coloring",
                                 dest="disableColoring",
                                 action="store_true",
                                 help="Disable console output coloring")

        miscellaneous.add_option(
            "--gpage",
            dest="googlePage",
            type="int",
            help="Use Google dork results from specified page number")

        miscellaneous.add_option(
            "--identify-waf",
            dest="identifyWaf",
            action="store_true",
            help="Make a thorough testing for a WAF/IPS/IDS protection")

        miscellaneous.add_option(
            "--mobile",
            dest="mobile",
            action="store_true",
            help="Imitate smartphone through HTTP User-Agent header")

        miscellaneous.add_option(
            "--offline",
            dest="offline",
            action="store_true",
            help="Work in offline mode (only use session data)")

        miscellaneous.add_option(
            "--purge-output",
            dest="purgeOutput",
            action="store_true",
            help="Safely remove all content from output directory")

        miscellaneous.add_option(
            "--skip-waf",
            dest="skipWaf",
            action="store_true",
            help="Skip heuristic detection of WAF/IPS/IDS protection")

        miscellaneous.add_option(
            "--smart",
            dest="smart",
            action="store_true",
            help="Conduct thorough tests only if positive heuristic(s)")

        miscellaneous.add_option("--sqlmap-shell",
                                 dest="sqlmapShell",
                                 action="store_true",
                                 help="Prompt for an interactive sqlmap shell")

        miscellaneous.add_option(
            "--tmp-dir",
            dest="tmpDir",
            help="Local directory for storing temporary files")

        miscellaneous.add_option(
            "--web-root",
            dest="webRoot",
            help="Web server document root directory (e.g. \"/var/www\")")

        miscellaneous.add_option(
            "--wizard",
            dest="wizard",
            action="store_true",
            help="Simple wizard interface for beginner users")

        # Hidden and/or experimental options
        parser.add_option("--dummy",
                          dest="dummy",
                          action="store_true",
                          help=SUPPRESS_HELP)

        parser.add_option("--murphy-rate",
                          dest="murphyRate",
                          type="int",
                          help=SUPPRESS_HELP)

        parser.add_option("--disable-precon",
                          dest="disablePrecon",
                          action="store_true",
                          help=SUPPRESS_HELP)

        parser.add_option("--disable-stats",
                          dest="disableStats",
                          action="store_true",
                          help=SUPPRESS_HELP)

        parser.add_option("--profile",
                          dest="profile",
                          action="store_true",
                          help=SUPPRESS_HELP)

        parser.add_option("--force-dbms", dest="forceDbms", help=SUPPRESS_HELP)

        parser.add_option("--force-dns",
                          dest="forceDns",
                          action="store_true",
                          help=SUPPRESS_HELP)

        parser.add_option("--force-pivoting",
                          dest="forcePivoting",
                          action="store_true",
                          help=SUPPRESS_HELP)

        parser.add_option("--force-threads",
                          dest="forceThreads",
                          action="store_true",
                          help=SUPPRESS_HELP)

        parser.add_option("--smoke-test",
                          dest="smokeTest",
                          action="store_true",
                          help=SUPPRESS_HELP)

        parser.add_option("--live-test",
                          dest="liveTest",
                          action="store_true",
                          help=SUPPRESS_HELP)

        parser.add_option("--stop-fail",
                          dest="stopFail",
                          action="store_true",
                          help=SUPPRESS_HELP)

        parser.add_option("--run-case", dest="runCase", help=SUPPRESS_HELP)

        # API options
        parser.add_option("--api",
                          dest="api",
                          action="store_true",
                          help=SUPPRESS_HELP)

        parser.add_option("--taskid", dest="taskid", help=SUPPRESS_HELP)

        parser.add_option("--database", dest="database", help=SUPPRESS_HELP)

        parser.add_option_group(target)
        parser.add_option_group(request)
        parser.add_option_group(optimization)
        parser.add_option_group(injection)
        parser.add_option_group(detection)
        parser.add_option_group(techniques)
        parser.add_option_group(fingerprint)
        parser.add_option_group(enumeration)
        parser.add_option_group(brute)
        parser.add_option_group(udf)
        parser.add_option_group(filesystem)
        parser.add_option_group(takeover)
        parser.add_option_group(windows)
        parser.add_option_group(general)
        parser.add_option_group(miscellaneous)

        # Dirty hack to display longer options without breaking into two lines
        def _(self, *args):
            retVal = parser.formatter._format_option_strings(*args)
            if len(retVal) > MAX_HELP_OPTION_LENGTH:
                retVal = ("%%.%ds.." %
                          (MAX_HELP_OPTION_LENGTH -
                           parser.formatter.indent_increment)) % retVal
            return retVal

        parser.formatter._format_option_strings = parser.formatter.format_option_strings
        parser.formatter.format_option_strings = type(
            parser.formatter.format_option_strings)(_, parser, type(parser))

        # Dirty hack for making a short option '-hh'
        option = parser.get_option("--hh")
        option._short_opts = ["-hh"]
        option._long_opts = []

        # Dirty hack for inherent help message of switch '-h'
        option = parser.get_option("-h")
        option.help = option.help.capitalize().replace("this help",
                                                       "basic help")

        _ = []
        prompt = False
        advancedHelp = True
        extraHeaders = []

        # Reference: https://stackoverflow.com/a/4012683 (Note: previously used "...sys.getfilesystemencoding() or UNICODE_ENCODING")
        for arg in argv:
            _.append(getUnicode(arg, encoding=sys.stdin.encoding))

        argv = _
        checkDeprecatedOptions(argv)

        prompt = "--sqlmap-shell" in argv

        if prompt:
            parser.usage = ""
            cmdLineOptions.sqlmapShell = True

            _ = ["x", "q", "exit", "quit", "clear"]

            for option in parser.option_list:
                _.extend(option._long_opts)
                _.extend(option._short_opts)

            for group in parser.option_groups:
                for option in group.option_list:
                    _.extend(option._long_opts)
                    _.extend(option._short_opts)

            autoCompletion(AUTOCOMPLETE_TYPE.SQLMAP, commands=_)

            while True:
                command = None

                try:
                    command = raw_input("sqlmap-shell> ").strip()
                    command = getUnicode(command, encoding=sys.stdin.encoding)
                except (KeyboardInterrupt, EOFError):
                    print
                    raise SqlmapShellQuitException

                if not command:
                    continue
                elif command.lower() == "clear":
                    clearHistory()
                    dataToStdout("[i] history cleared\n")
                    saveHistory(AUTOCOMPLETE_TYPE.SQLMAP)
                elif command.lower() in ("x", "q", "exit", "quit"):
                    raise SqlmapShellQuitException
                elif command[0] != '-':
                    dataToStdout("[!] invalid option(s) provided\n")
                    dataToStdout(
                        "[i] proper example: '-u http://www.site.com/vuln.php?id=1 --banner'\n"
                    )
                else:
                    saveHistory(AUTOCOMPLETE_TYPE.SQLMAP)
                    loadHistory(AUTOCOMPLETE_TYPE.SQLMAP)
                    break

            try:
                for arg in shlex.split(command):
                    argv.append(getUnicode(arg, encoding=sys.stdin.encoding))
            except ValueError, ex:
                raise SqlmapSyntaxException(
                    "something went wrong during command line parsing ('%s')" %
                    ex.message)

        for i in xrange(len(argv)):
            if argv[i] == "-hh":
                argv[i] = "-h"
            elif len(argv[i]) > 1 and all(
                    ord(_) in xrange(0x2018, 0x2020)
                    for _ in ((argv[i].split('=', 1)[-1].strip() or ' ')[0],
                              argv[i][-1])):
                dataToStdout(
                    "[!] copy-pasting illegal (non-console) quote characters from Internet is, well, illegal (%s)\n"
                    % argv[i])
                raise SystemExit
            elif len(argv[i]) > 1 and u"\uff0c" in argv[i].split('=', 1)[-1]:
                dataToStdout(
                    "[!] copy-pasting illegal (non-console) comma characters from Internet is, well, illegal (%s)\n"
                    % argv[i])
                raise SystemExit
            elif re.search(r"\A-\w=.+", argv[i]):
                dataToStdout(
                    "[!] potentially miswritten (illegal '=') short option detected ('%s')\n"
                    % argv[i])
                raise SystemExit
            elif argv[i] == "-H":
                if i + 1 < len(argv):
                    extraHeaders.append(argv[i + 1])
            elif re.match(r"\A\d+!\Z", argv[i]) and argv[max(
                    0, i - 1)] == "--threads" or re.match(
                        r"\A--threads.+\d+!\Z", argv[i]):
                argv[i] = argv[i][:-1]
                conf.skipThreadCheck = True
            elif argv[i] == "--version":
                print VERSION_STRING.split('/')[-1]
                raise SystemExit
            elif argv[i] in ("-h", "--help"):
                advancedHelp = False
                for group in parser.option_groups[:]:
                    found = False
                    for option in group.option_list:
                        if option.dest not in BASIC_HELP_ITEMS:
                            option.help = SUPPRESS_HELP
                        else:
                            found = True
                    if not found:
                        parser.option_groups.remove(group)

        for verbosity in (_ for _ in argv if re.search(r"\A\-v+\Z", _)):
            try:
                if argv.index(verbosity) == len(argv) - 1 or not argv[
                        argv.index(verbosity) + 1].isdigit():
                    conf.verbose = verbosity.count('v') + 1
                    del argv[argv.index(verbosity)]
            except (IndexError, ValueError):
                pass

        try:
            (args, _) = parser.parse_args(argv)
        except UnicodeEncodeError, ex:
            dataToStdout("\n[!] %s\n" % ex.object.encode("unicode-escape"))
            raise SystemExit
コード例 #32
0
    def add_options(self, parser):
        """
        Populate option parse with options available for this command
        """
        group = OptionGroup(parser, "Global Options")
        group.add_option("--logfile",
                         metavar="FILE",
                         help="log file. if omitted stderr will be used")
        group.add_option("-L",
                         "--loglevel",
                         metavar="LEVEL",
                         default=None,
                         help="log level (default: %s)" %
                         self.settings['LOG_LEVEL'])
        group.add_option("--nolog",
                         action="store_true",
                         help="disable logging completely")
        group.add_option("--profile",
                         metavar="FILE",
                         default=None,
                         help="write python cProfile stats to FILE")
        group.add_option("--pidfile",
                         metavar="FILE",
                         help="write process ID to FILE")
        group.add_option("-s",
                         "--set",
                         action="append",
                         default=[],
                         metavar="NAME=VALUE",
                         help="set/override setting (may be repeated)")
        group.add_option("--pdb",
                         action="store_true",
                         help="enable pdb on failure")

        parser.add_option_group(group)
コード例 #33
0
def main():
    # bootstrap the logger with defaults
    logger.configure()

    try:
        cpus = multiprocessing.cpu_count()
    except NotImplementedError:
        cpus = 1

    #avail_rendermodes = c_overviewer.get_render_modes()
    avail_north_dirs = [
        'lower-left', 'upper-left', 'upper-right', 'lower-right', 'auto'
    ]

    # Parse for basic options
    parser = OptionParser(usage=helptext, add_help_option=False)
    parser.add_option("-h",
                      "--help",
                      dest="help",
                      action="store_true",
                      help="show this help message and exit")
    parser.add_option("-c",
                      "--config",
                      dest="config",
                      action="store",
                      help="Specify the config file to use.")
    parser.add_option(
        "-p",
        "--processes",
        dest="procs",
        action="store",
        type="int",
        help=
        "The number of local worker processes to spawn. Defaults to the number of CPU cores your computer has"
    )

    # Options that only apply to the config-less render usage
    parser.add_option(
        "--rendermodes",
        dest="rendermodes",
        action="store",
        help=
        "If you're not using a config file, specify which rendermodes to render with this option. This is a comma-separated list."
    )

    # Useful one-time render modifiers:
    parser.add_option("--forcerender",
                      dest="forcerender",
                      action="store_true",
                      help="Force re-rendering the entire map.")
    parser.add_option("--check-tiles",
                      dest="checktiles",
                      action="store_true",
                      help="Check each tile on disk and re-render old tiles")
    parser.add_option(
        "--no-tile-checks",
        dest="notilechecks",
        action="store_true",
        help=
        "Only render tiles that come from chunks that have changed since the last render (the default)"
    )

    # Useful one-time debugging options:
    parser.add_option(
        "--check-terrain",
        dest="check_terrain",
        action="store_true",
        help=
        "Tries to locate the texture files. Useful for debugging texture problems."
    )
    parser.add_option("-V",
                      "--version",
                      dest="version",
                      help="Displays version information and then exits",
                      action="store_true")
    parser.add_option(
        "--update-web-assets",
        dest='update_web_assets',
        action="store_true",
        help=
        "Update web assets. Will *not* render tiles or update overviewerConfig.js"
    )

    # Log level options:
    parser.add_option(
        "-q",
        "--quiet",
        dest="quiet",
        action="count",
        default=0,
        help="Print less output. You can specify this option multiple times.")
    parser.add_option(
        "-v",
        "--verbose",
        dest="verbose",
        action="count",
        default=0,
        help="Print more output. You can specify this option multiple times.")
    parser.add_option(
        "--simple-output",
        dest="simple",
        action="store_true",
        default=False,
        help="Use a simple output format, with no colors or progress bars")

    # create a group for "plugin exes" (the concept of a plugin exe is only loosly defined at this point)
    exegroup = OptionGroup(
        parser, "Other Scripts",
        "These scripts may accept different arguments than the ones listed above"
    )
    exegroup.add_option("--genpoi",
                        dest="genpoi",
                        action="store_true",
                        help="Runs the genPOI script")
    exegroup.add_option("--skip-scan",
                        dest="skipscan",
                        action="store_true",
                        help="When running GenPOI, don't scan for entities")

    parser.add_option_group(exegroup)

    options, args = parser.parse_args()

    # first thing to do is check for stuff in the exegroup:
    if options.genpoi:
        # remove the "--genpoi" option from sys.argv before running genPI
        sys.argv.remove("--genpoi")
        #sys.path.append(".")
        g = __import__("overviewer_core.aux_files", {}, {}, ["genPOI"])
        g.genPOI.main()
        return 0
    if options.help:
        parser.print_help()
        return 0

    # re-configure the logger now that we've processed the command line options
    logger.configure(logging.INFO + 10 * options.quiet - 10 * options.verbose,
                     verbose=options.verbose > 0,
                     simple=options.simple)

    ##########################################################################
    # This section of main() runs in response to any one-time options we have,
    # such as -V for version reporting
    if options.version:
        print("Minecraft Overviewer %s" % util.findGitVersion()),
        print("(%s)" % util.findGitHash()[:7])
        try:
            import overviewer_core.overviewer_version as overviewer_version
            print("built on %s" % overviewer_version.BUILD_DATE)
            if options.verbose > 0:
                print("Build machine: %s %s" %
                      (overviewer_version.BUILD_PLATFORM,
                       overviewer_version.BUILD_OS))
        except ImportError:
            print("(build info not found)")
        return 0

    # if --check-terrain was specified, but we have NO config file, then we cannot
    # operate on a custom texture path.  we do terrain checking with a custom texture
    # pack later on, after we've parsed the config file
    if options.check_terrain and not options.config:
        import hashlib
        from overviewer_core.textures import Textures
        tex = Textures()

        logging.info("Looking for a few common texture files...")
        try:
            f = tex.find_file(
                "assets/minecraft/textures/blocks/sandstone_top.png",
                verbose=True)
            f = tex.find_file("assets/minecraft/textures/blocks/grass_top.png",
                              verbose=True)
            f = tex.find_file(
                "assets/minecraft/textures/blocks/diamond_ore.png",
                verbose=True)
            f = tex.find_file(
                "assets/minecraft/textures/blocks/planks_acacia.png",
                verbose=True)
        except IOError:
            logging.error("Could not find any texture files.")
            return 1

        return 0

    # if no arguments are provided, print out a helpful message
    if len(args) == 0 and not options.config:
        # first provide an appropriate error for bare-console users
        # that don't provide any options
        if util.is_bare_console():
            print("\n")
            print(
                "The Overviewer is a console program.  Please open a Windows command prompt"
            )
            print(
                "first and run Overviewer from there.   Further documentation is available at"
            )
            print("http://docs.overviewer.org/\n")
            print("\n")
            print(
                "For a quick-start guide on Windows, visit the following URL:\n"
            )
            print(
                "http://docs.overviewer.org/en/latest/win_tut/windowsguide/\n")

        else:
            # more helpful message for users who know what they're doing
            logging.error(
                "You must either specify --config or give me a world directory and output directory"
            )
            parser.print_help()
            list_worlds()
        return 1

    ##########################################################################
    # This section does some sanity checking on the command line options passed
    # in. It checks to see if --config was given that no worldname/destdir were
    # given, and vice versa
    if options.config and args:
        print()
        print(
            "If you specify --config, you need to specify the world to render as well as"
        )
        print("the destination in the config file, not on the command line.")
        print("Put something like this in your config file:")
        print("worlds['myworld'] = %r" % args[0])
        print("outputdir = %r" %
              (args[1] if len(args) > 1 else "/path/to/output"))
        print()
        logging.error(
            "Cannot specify both --config AND a world + output directory on the command line."
        )
        parser.print_help()
        return 1

    if not options.config and len(args) < 2:
        logging.error(
            "You must specify both the world directory and an output directory"
        )
        parser.print_help()
        return 1
    if not options.config and len(args) > 2:
        # it's possible the user has a space in one of their paths but didn't
        # properly escape it attempt to detect this case
        for start in range(len(args)):
            if not os.path.exists(args[start]):
                for end in range(start + 1, len(args) + 1):
                    if os.path.exists(" ".join(args[start:end])):
                        logging.warning(
                            "It looks like you meant to specify \"%s\" as your world dir or your output\n\
dir but you forgot to put quotes around the directory, since it contains spaces."
                            % " ".join(args[start:end]))
                        return 1
        logging.error("Too many command line arguments")
        parser.print_help()
        return 1

    #########################################################################
    # These two halfs of this if statement unify config-file mode and
    # command-line mode.
    mw_parser = configParser.MultiWorldParser()

    if not options.config:
        # No config file mode.
        worldpath, destdir = map(os.path.expanduser, args)
        logging.debug("Using %r as the world directory", worldpath)
        logging.debug("Using %r as the output directory", destdir)

        mw_parser.set_config_item("worlds", {'world': worldpath})
        mw_parser.set_config_item("outputdir", destdir)

        rendermodes = ['lighting']
        if options.rendermodes:
            rendermodes = options.rendermodes.replace("-", "_").split(",")

        # Now for some good defaults
        renders = util.OrderedDict()
        for rm in rendermodes:
            renders["world-" + rm] = {
                "world": "world",
                "title": "Overviewer Render (%s)" % rm,
                "rendermode": rm,
            }
        mw_parser.set_config_item("renders", renders)

    else:
        if options.rendermodes:
            logging.error(
                "You cannot specify --rendermodes if you give a config file. Configure your rendermodes in the config file instead"
            )
            parser.print_help()
            return 1

        # Parse the config file
        try:
            mw_parser.parse(os.path.expanduser(options.config))
        except configParser.MissingConfigException as e:
            # this isn't a "bug", so don't print scary traceback
            logging.error(str(e))
            util.nice_exit(1)

    # Add in the command options here, perhaps overriding values specified in
    # the config
    if options.procs:
        mw_parser.set_config_item("processes", options.procs)

    # Now parse and return the validated config
    try:
        config = mw_parser.get_validated_config()
    except Exception as ex:
        if options.verbose:
            logging.exception(
                "An error was encountered with your configuration. See the info below."
            )
        else:  # no need to print scary traceback! just
            logging.error("An error was encountered with your configuration.")
            logging.error(str(ex))
        return 1

    if options.check_terrain:  # we are already in the "if configfile" branch
        logging.info("Looking for a few common texture files...")
        for render_name, render in config['renders'].iteritems():
            logging.info("Looking at render %r", render_name)

            # find or create the textures object
            texopts = util.dict_subset(render, ["texturepath"])

            tex = textures.Textures(**texopts)
            f = tex.find_file(
                "assets/minecraft/textures/blocks/sandstone_top.png",
                verbose=True)
            f = tex.find_file("assets/minecraft/textures/blocks/grass_top.png",
                              verbose=True)
            f = tex.find_file(
                "assets/minecraft/textures/blocks/diamond_ore.png",
                verbose=True)
            f = tex.find_file(
                "assets/minecraft/textures/blocks/planks_oak.png",
                verbose=True)
        return 0

    ############################################################
    # Final validation steps and creation of the destination directory
    logging.info("Welcome to Minecraft Overviewer!")
    logging.debug("Current log level: {0}".format(logging.getLogger().level))

    # Override some render configdict options depending on one-time command line
    # modifiers
    if (bool(options.forcerender) + bool(options.checktiles) +
            bool(options.notilechecks)) > 1:
        logging.error(
            "You cannot specify more than one of --forcerender, " +
            "--check-tiles, and --no-tile-checks. These options conflict.")
        parser.print_help()
        return 1
    if options.forcerender:
        logging.info("Forcerender mode activated. ALL tiles will be rendered")
        for render in config['renders'].itervalues():
            render['renderchecks'] = 2
    elif options.checktiles:
        logging.info("Checking all tiles for updates manually.")
        for render in config['renders'].itervalues():
            render['renderchecks'] = 1
    elif options.notilechecks:
        logging.info("Disabling all tile mtime checks. Only rendering tiles " +
                     "that need updating since last render")
        for render in config['renders'].itervalues():
            render['renderchecks'] = 0

    if not config['renders']:
        logging.error(
            "You must specify at least one render in your config file. See the docs if you're having trouble"
        )
        return 1

    #####################
    # Do a few last minute things to each render dictionary here
    for rname, render in config['renders'].iteritems():
        # Convert render['world'] to the world path, and store the original
        # in render['worldname_orig']
        try:
            worldpath = config['worlds'][render['world']]
        except KeyError:
            logging.error(
                "Render %s's world is '%s', but I could not find a corresponding entry in the worlds dictionary.",
                rname, render['world'])
            return 1
        render['worldname_orig'] = render['world']
        render['world'] = worldpath

        # If 'forcerender' is set, change renderchecks to 2
        if render.get('forcerender', False):
            render['renderchecks'] = 2

        # check if overlays are set, if so, make sure that those renders exist
        if render.get('overlay', []) != []:
            for x in render.get('overlay'):
                if x != rname:
                    try:
                        renderLink = config['renders'][x]
                    except KeyError:
                        logging.error(
                            "Render %s's overlay is '%s', but I could not find a corresponding entry in the renders dictionary.",
                            rname, x)
                        return 1
                else:
                    logging.error("Render %s's overlay contains itself.",
                                  rname)
                    return 1

    destdir = config['outputdir']
    if not destdir:
        logging.error(
            "You must specify the output directory in your config file.")
        logging.error("e.g. outputdir = '/path/to/outputdir'")
        return 1
    if not os.path.exists(destdir):
        try:
            os.mkdir(destdir)
        except OSError:
            logging.exception("Could not create the output directory.")
            return 1

    ########################################################################
    # Now we start the actual processing, now that all the configuration has
    # been gathered and validated
    # create our asset manager... ASSMAN
    assetMrg = assetmanager.AssetManager(destdir,
                                         config.get('customwebassets', None))

    # If we've been asked to update web assets, do that and then exit
    if options.update_web_assets:
        assetMrg.output_noconfig()
        logging.info("Web assets have been updated")
        return 0

    # The changelist support.
    changelists = {}
    for render in config['renders'].itervalues():
        if 'changelist' in render:
            path = render['changelist']
            if path not in changelists:
                out = open(path, "w")
                logging.debug("Opening changelist %s (%s)", out, out.fileno())
                changelists[path] = out
            else:
                out = changelists[path]
            render['changelist'] = out.fileno()

    tilesets = []

    # saves us from creating the same World object over and over again
    worldcache = {}
    # same for textures
    texcache = {}

    # Set up the cache objects to use
    caches = []
    caches.append(cache.LRUCache(size=100))
    if config.get("memcached_host", False):
        caches.append(cache.Memcached(config['memcached_host']))
    # TODO: optionally more caching layers here

    renders = config['renders']
    for render_name, render in renders.iteritems():
        logging.debug("Found the following render thing: %r", render)

        # find or create the world object
        try:
            w = worldcache[render['world']]
        except KeyError:
            w = world.World(render['world'])
            worldcache[render['world']] = w

        # find or create the textures object
        texopts = util.dict_subset(
            render, ["texturepath", "bgcolor", "northdirection"])
        texopts_key = tuple(texopts.items())
        if texopts_key not in texcache:
            tex = textures.Textures(**texopts)
            logging.debug("Starting to generate textures")
            tex.generate()
            logging.debug("Finished generating textures")
            texcache[texopts_key] = tex
        else:
            tex = texcache[texopts_key]

        try:
            logging.debug("Asking for regionset %r" % render['dimension'][1])
            rset = w.get_regionset(render['dimension'][1])
        except IndexError:
            logging.error(
                "Sorry, I can't find anything to render!  Are you sure there are .mca files in the world directory?"
            )
            return 1
        if rset == None:  # indicates no such dimension was found:
            logging.error(
                "Sorry, you requested dimension '%s' for %s, but I couldn't find it",
                render['dimension'][0], render_name)
            return 1

        #################
        # Apply any regionset transformations here

        # Insert a layer of caching above the real regionset. Any world
        # tranformations will pull from this cache, but their results will not
        # be cached by this layer. This uses a common pool of caches; each
        # regionset cache pulls from the same underlying cache object.
        rset = world.CachedRegionSet(rset, caches)

        # If a crop is requested, wrap the regionset here
        if "crop" in render:
            rset = world.CroppedRegionSet(rset, *render['crop'])

        # If this is to be a rotated regionset, wrap it in a RotatedRegionSet
        # object
        if (render['northdirection'] > 0):
            rset = world.RotatedRegionSet(rset, render['northdirection'])
        logging.debug("Using RegionSet %r", rset)

        ###############################
        # Do the final prep and create the TileSet object

        # create our TileSet from this RegionSet
        tileset_dir = os.path.abspath(os.path.join(destdir, render_name))

        # only pass to the TileSet the options it really cares about
        render[
            'name'] = render_name  # perhaps a hack. This is stored here for the asset manager
        tileSetOpts = util.dict_subset(render, [
            "name", "imgformat", "renderchecks", "rerenderprob", "bgcolor",
            "defaultzoom", "imgquality", "optimizeimg", "rendermode",
            "worldname_orig", "title", "dimension", "changelist", "showspawn",
            "overlay", "base", "poititle", "maxzoom", "showlocationmarker"
        ])
        tileSetOpts.update({"spawn": w.find_true_spawn()
                            })  # TODO find a better way to do this
        tset = tileset.TileSet(w, rset, assetMrg, tex, tileSetOpts,
                               tileset_dir)
        tilesets.append(tset)

    # Do tileset preprocessing here, before we start dispatching jobs
    for ts in tilesets:
        ts.do_preprocessing()

    # Output initial static data and configuration
    assetMrg.initialize(tilesets)

    # multiprocessing dispatcher
    if config['processes'] == 1:
        dispatch = dispatcher.Dispatcher()
    else:
        dispatch = dispatcher.MultiprocessingDispatcher(
            local_procs=config['processes'])
    dispatch.render_all(tilesets, config['observer'])
    dispatch.close()

    assetMrg.finalize(tilesets)

    for out in changelists.itervalues():
        logging.debug("Closing %s (%s)", out, out.fileno())
        out.close()

    if config['processes'] == 1:
        logging.debug("Final cache stats:")
        for c in caches:
            logging.debug("\t%s: %s hits, %s misses", c.__class__.__name__,
                          c.hits, c.misses)

    return 0
コード例 #34
0
def main():

    # Parse command line
    usage = ("Usage: %prog <delta-G-file> [options]\n\n"
             "Gibbs free energy changes are taken from the <delta-g-file> if "
             "given.\nElse, they are computed from the data specified via the "
             "-r/-c/-d/-s/-t set of\noptions.")
    version = "%prog\n" + COPYRIGHT_VERSION_STRING
    parser = OptionParser(usage=usage, version=version)
    standardOptGroup = OptionGroup(parser, "Standard parameters")
    standardOptGroup.add_option("-p",
                                "--parameters",
                                dest="paramFile",
                                help="check the given scenario FILE",
                                metavar="FILE")
    standardOptGroup.add_option("-o",
                                "--output",
                                dest="outputFile",
                                help="write modified scenario file to FILE "
                                "(must not be the same as scenario file)",
                                metavar="FILE")
    standardOptGroup.add_option(
        "-e",
        "--epsilon",
        dest="epsilon",
        help="set THRESHOLD for recognizing Gibbs free "
        "energy change as clearly positive (default 0)",
        metavar="THRESHOLD")
    parser.add_option_group(standardOptGroup)

    computeOptGroup = OptionGroup(
        parser, "Parameters for computation of "
        "Gibbs free energies", "These are only needed"
        " if delta-G values are not read from file.")
    computeOptGroup.add_option("-r",
                               "--reactions",
                               dest="reactionFile",
                               help="use reactions from reaction FILE",
                               metavar="FILE")
    computeOptGroup.add_option("-c",
                               "--concentrations",
                               dest="concentrationFile",
                               help="use "
                               "concentrations from FILE",
                               metavar="FILE")
    computeOptGroup.add_option("-d",
                               "--thermodynamics",
                               dest="thermodynFile",
                               help="use thermodynamic data from FILE",
                               metavar="FILE")
    computeOptGroup.add_option("-s",
                               "--synonyms",
                               dest="synonymFile",
                               help="use metabolite synonyms from FILE",
                               metavar="FILE")
    computeOptGroup.add_option("-t",
                               "--temperature",
                               dest="temperature",
                               help="set temperature to VALUE (in degrees "
                               "Celsius)",
                               metavar="VALUE")
    parser.add_option_group(computeOptGroup)
    parser.set_defaults(epsilon='0.')

    options, args = parser.parse_args()
    parser.check_required("-p")
    parser.check_required("-o")
    parser.check_required("-t")

    try:
        epsilon = float(options.epsilon)
    except ValueError:
        print("Error: Invalid floating point value for epsilon (%s)" %
              options.epsilon)
        exit()
    if (os.path.exists(options.outputFile)
            and os.path.samefile(options.outputFile, options.paramFile)):
        print("Error: Input and output scenario files are the same (%s)" %
              options.paramFile)
        exit()

    if epsilon < 0.:
        print "Warning: epsilon < 0. Using default value of 0 instead."
        epsilon = 0.

    if len(args) > 0:
        gibbsR = readReaEnthalpiesFromFile(args[0])
    else:
        print(
            "\nInfo: No file with Gibbs free energies given. Launching "
            "computation of values.\n")
        parser.check_required("-r")
        parser.check_required("-c")
        parser.check_required("-d")
        parser.check_required("-s")
        parser.check_required("-t")
        try:
            temperature = float(options.temperature)
        except ValueError:
            print("Error: Invalid floating point value for temperature (%s)" %
                  options.temperature)
            exit()

        # Compute Gibbs free energies from the given data
        gibbsR = getReaEnthalpies(options.concentrationFile,
                                  options.thermodynFile, options.synonymFile,
                                  options.reactionFile, temperature)

    # Parse scenario file
    pparser = ParamParser()
    try:
        # Parse file
        maxmin, obj_name, solver, numiter, lb, ub =\
            pparser.parse(options.paramFile)
    except IOError, strerror:
        print("An error occurred while trying to read file %s:" %
              os.path.basename(options.paramFile))
        print strerror
        exit()
コード例 #35
0
"""


_ = os.path.normpath(sys.argv[0])

usage = "python %prog [option(s)]"

parser = OptionParser(usage=usage)

# General options
general = OptionGroup(parser, Style.BRIGHT + "General" + Style.RESET_ALL,
                      "These options relate to general matters. ")

general.add_option("-v",
                   action="store",
                   type="int",
                   dest="verbose",
                   help="Verbosity level (0-4, Default: 0).")

general.add_option("--install",
                   action="store_true",
                   dest="install",
                   default=False,
                   help="Install 'commix' to your system.")

general.add_option("--version",
                   action="store_true",
                   dest="version",
                   help="Show version number and exit.")

general.add_option("--update",
コード例 #36
0
ファイル: minimization.py プロジェクト: swails/JmsScripts
import amber_simulations as am_sim
from optparse import OptionParser, OptionGroup
import os, sys


class MinimizationError(Exception):
    pass


parser = OptionParser(usage='%prog [options] <prmtop> <inpcrd>')

group = OptionGroup(parser, 'Timing Options',
                    'These options control how long the simulation is run')
group.add_option('--maxcyc',
                 dest='maxcyc',
                 default=1000,
                 type='int',
                 help='Number of minimization steps to run. Default 1000.')
parser.add_option_group(group)

group = OptionGroup(
    parser, 'Implicit Solvent Options',
    'If the system does not have periodic boundaries, ' +
    'these options control the implicit solvation model used')
group.add_option('--igb',
                 dest='igb',
                 default=5,
                 type='int',
                 help='GB model to run for non-periodic systems. Must be ' +
                 '1, 2, 5, 7, or 8. Default 5')
parser.add_option_group(group)
コード例 #37
0
ファイル: poclbm.py プロジェクト: wesavetheworld/poclbm-zcash
                  action='store_true',
                  help="search for and use stratum proxies in subnet")
parser.add_option(
    '-d',
    '--device',
    dest='device',
    default=[],
    help=
    'comma separated device IDs, by default will use all (for OpenCL - only GPU devices)'
)

group = OptionGroup(parser, "Miner Options")
group.add_option(
    '-r',
    '--rate',
    dest='rate',
    default=1,
    help='hash rate display interval in seconds, default=1 (60 with --verbose)',
    type='float')
group.add_option(
    '-e',
    '--estimate',
    dest='estimate',
    default=900,
    help='estimated rate time window in seconds, default 900 (15 minutes)',
    type='int')
group.add_option(
    '-t',
    '--tolerance',
    dest='tolerance',
    default=2,
コード例 #38
0
def parse_cli_options(argv):
    parser = OptionParser(description=u"VMs Backup & Restore",
                          usage=u"%prog [--help] [options]")
    option_group_CONF = OptionGroup(parser, u"Configuration")
    option_group_CONF.add_option("-c",
                                 "--config",
                                 dest="CONF_CFG_PATH",
                                 help=u"Use specific configuration file",
                                 action="store_true")
    option_group_CONF.add_option("-a",
                                 "--action",
                                 dest="CONF_ACTION_MODE",
                                 help=u"Script mode : Backup or Restore",
                                 action="store_true")
    option_group_CONF.add_option("-d",
                                 "--debug",
                                 dest="CONF_DEBUG_RUN",
                                 help=u"Display script debug actions",
                                 action="store_true")
    option_group_CONF.add_option(
        "-D",
        "--dry-run",
        dest="CONF_DRY_RUN",
        help=u"Doesn't do anything, just display what it would have done",
        action="store_true")
    option_group_CONF.add_option("-v",
                                 "--verbose",
                                 dest="CONF_VERBOSE_RUN",
                                 help=u"Increase log verbosity",
                                 action="store_true")
    option_group_CONF.add_option("-V",
                                 "--version",
                                 dest="version",
                                 help=u"Display " + PROG + " script version",
                                 action="store_true",
                                 default=False)
    parser.add_option_group(option_group_CONF)

    (option, args) = parser.parse_args()

    if option.version:
        print(PROG + "\t version " + VERSION)
        sys.exit(0)
    return option
コード例 #39
0
def getopts():
    global hosturl, cimport, user, password, vendor, verbose, perfdata, urlise_country, timeout, ignore_list, get_power, get_volts, get_current, get_temp, get_fan, get_lcd
    usage = "usage: %prog -H hostname -U username -P password [-C port -V system -v -p -I XX]\n" \
      "example: %prog -H my-shiny-new-vmware-server -U root -P fakepassword -C 5989 -V auto -I uk\n\n" \
      "or, verbosely:\n\n" \
      "usage: %prog --host=hostname --user=username --pass=password [--cimport=port --vendor=system --verbose --perfdata --html=XX]\n"

    parser = OptionParser(usage=usage, version="%prog " + version)
    group1 = OptionGroup(parser, 'Mandatory parameters')
    group2 = OptionGroup(parser, 'Optional parameters')

    group1.add_option("-H",
                      "--host",
                      dest="host",
                      help="report on HOST",
                      metavar="HOST")
    group1.add_option("-U",
                      "--user",
                      dest="user",
                      help="user to connect as",
                      metavar="USER")
    group1.add_option("-P", "--pass", dest="password", \
        help="password, if password matches file:<path>, first line of given file will be used as password", metavar="PASS")

    group2.add_option("-C",
                      "--cimport",
                      dest="cimport",
                      help="CIM port (default 5989)",
                      metavar="CIMPORT")
    group2.add_option("-V", "--vendor", dest="vendor", help="Vendor code: auto, dell, hp, ibm, intel, or unknown (default)", \
        metavar="VENDOR", type='choice', choices=['auto','dell','hp','ibm','intel','unknown'],default="unknown")
    group2.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, \
        help="print status messages to stdout (default is to be quiet)")
    group2.add_option("-p", "--perfdata", action="store_true", dest="perfdata", default=False, \
        help="collect performance data for pnp4nagios (default is not to)")
    group2.add_option("-I", "--html", dest="urlise_country", default="", \
        help="generate html links for country XX (default is not to)", metavar="XX")
    group2.add_option("-t", "--timeout", action="store", type="int", dest="timeout", default=0, \
        help="timeout in seconds - no effect on Windows (default = no timeout)")
    group2.add_option("-i", "--ignore", action="store", type="string", dest="ignore", default="", \
        help="comma-separated list of elements to ignore")
    group2.add_option("--no-power", action="store_false", dest="get_power", default=True, \
        help="don't collect power performance data")
    group2.add_option("--no-volts", action="store_false", dest="get_volts", default=True, \
        help="don't collect voltage performance data")
    group2.add_option("--no-current", action="store_false", dest="get_current", default=True, \
        help="don't collect current performance data")
    group2.add_option("--no-temp", action="store_false", dest="get_temp", default=True, \
        help="don't collect temperature performance data")
    group2.add_option("--no-fan", action="store_false", dest="get_fan", default=True, \
        help="don't collect fan performance data")
    group2.add_option("--no-lcd", action="store_false", dest="get_lcd", default=True, \
        help="don't collect lcd/front display status")

    parser.add_option_group(group1)
    parser.add_option_group(group2)

    # check input arguments
    if len(sys.argv) < 2:
        print "no parameters specified\n"
        parser.print_help()
        sys.exit(-1)
    # if first argument starts with 'https://' we have old-style parameters, so handle in old way
    if re.match("https://", sys.argv[1]):
        # check input arguments
        if len(sys.argv) < 5:
            print "too few parameters\n"
            parser.print_help()
            sys.exit(-1)
        if len(sys.argv) > 5:
            if sys.argv[5] == "verbose":
                verbose = True
        hosturl = sys.argv[1]
        user = sys.argv[2]
        password = sys.argv[3]
        vendor = sys.argv[4]
    else:
        # we're dealing with new-style parameters, so go get them!
        (options, args) = parser.parse_args()

        # Making sure all mandatory options appeared.
        mandatories = ['host', 'user', 'password']
        for m in mandatories:
            if not options.__dict__[m]:
                print "mandatory parameter '--" + m + "' is missing\n"
                parser.print_help()
                sys.exit(-1)

        hostname = options.host.lower()
        # if user has put "https://" in front of hostname out of habit, do the right thing
        # hosturl will end up as https://hostname
        if re.match('^https://', hostname):
            hosturl = hostname
        else:
            hosturl = 'https://' + hostname

        user = options.user
        password = options.password
        cimport = options.cimport
        vendor = options.vendor.lower()
        verbose = options.verbose
        perfdata = options.perfdata
        urlise_country = options.urlise_country.lower()
        timeout = options.timeout
        ignore_list = options.ignore.split(',')
        get_power = options.get_power
        get_volts = options.get_volts
        get_current = options.get_current
        get_temp = options.get_temp
        get_fan = options.get_fan
        get_lcd = options.get_lcd

    # if user or password starts with 'file:', use the first string in file as user, second as password
    if (re.match('^file:', user) or re.match('^file:', password)):
        if re.match('^file:', user):
            filextract = re.sub('^file:', '', user)
            filename = open(filextract, 'r')
            filetext = filename.readline().split()
            user = filetext[0]
            password = filetext[1]
            filename.close()
        elif re.match('^file:', password):
            filextract = re.sub('^file:', '', password)
            filename = open(filextract, 'r')
            filetext = filename.readline().split()
            password = filetext[0]
            filename.close()
コード例 #40
0
def parseArgs():

    parser = OptionParser()

    parser.description = "A utility to control Flux WiFi LED Bulbs. "
    #parser.description += ""
    #parser.description += "."
    power_group = OptionGroup(parser, 'Power options (mutually exclusive)')
    mode_group = OptionGroup(parser, 'Mode options (mutually exclusive)')
    info_group = OptionGroup(parser, 'Program help and information option')
    other_group = OptionGroup(parser, 'Other options')

    parser.add_option_group(info_group)
    info_group.add_option("-e", "--examples",
                      action="store_true", dest="showexamples", default=False,
                      help="Show usage examples")
    info_group.add_option("", "--timerhelp",
                      action="store_true", dest="timerhelp", default=False,
                      help="Show detailed help for setting timers")
    info_group.add_option("-l", "--listpresets",
                      action="store_true", dest="listpresets", default=False,
                      help="List preset codes")
    info_group.add_option("--listcolors",
                      action="store_true", dest="listcolors", default=False,
                      help="List color names")

    parser.add_option("-s", "--scan",
                      action="store_true", dest="scan", default=False,
                      help="Search for bulbs on local network")
    parser.add_option("-S", "--scanresults",
                      action="store_true", dest="scanresults", default=False,
                      help="Operate on scan results instead of arg list")
    power_group.add_option("-1", "--on",
                      action="store_true", dest="on", default=False,
                      help="Turn on specified bulb(s)")
    power_group.add_option("-0", "--off",
                      action="store_true", dest="off", default=False,
                      help="Turn off specified bulb(s)")
    parser.add_option_group(power_group)

    mode_group.add_option("-c", "--color", dest="color", default=None,
                  help="Set single color mode.  Can be either color name, web hex, or comma-separated RGB triple",
                  metavar='COLOR')
    mode_group.add_option("-w", "--warmwhite", dest="ww", default=None,
                  help="Set warm white mode (LEVEL is percent)",
                  metavar='LEVEL', type="int")
    mode_group.add_option("-p", "--preset", dest="preset", default=None,
                  help="Set preset pattern mode (SPEED is percent)",
                  metavar='CODE SPEED', type="int", nargs=2)
    mode_group.add_option("-C", "--custom", dest="custom", metavar='TYPE SPEED COLORLIST',
                            default=None, nargs=3,
                            help="Set custom pattern mode. " +
                              "TYPE should be jump, gradual, or strobe. SPEED is percent. " +
                              "COLORLIST is a should be a space-separated list of color names, web hex values, or comma-separated RGB triples")
    parser.add_option_group(mode_group)

    parser.add_option("-i", "--info",
                      action="store_true", dest="info", default=False,
                      help="Info about bulb(s) state")
    parser.add_option("", "--getclock",
                      action="store_true", dest="getclock", default=False,
                      help="Get clock")
    parser.add_option("", "--setclock",
                      action="store_true", dest="setclock", default=False,
                      help="Set clock to same as current time on this computer")
    parser.add_option("-t", "--timers",
                      action="store_true", dest="showtimers", default=False,
                      help="Show timers")
    parser.add_option("-T", "--settimer", dest="settimer", metavar='NUM MODE SETTINGS',
                            default=None, nargs=3,
                            help="Set timer. " +
                              "NUM: number of the timer (1-6). " +
                              "MODE: inactive, poweroff, default, color, preset, or warmwhite. " +
                              "SETTINGS: a string of settings including time, repeatdays or date, " +
                              "and other mode specific settings.   Use --timerhelp for more details.")


    other_group.add_option("-v", "--volatile",
                      action="store_true", dest="volatile", default=False,
                      help="Don't persist mode setting with hard power cycle (RGB and WW modes only).")
    parser.add_option_group(other_group)

    parser.usage = "usage: %prog [-sS10cwpCiltThe] [addr1 [addr2 [addr3] ...]."
    (options, args) = parser.parse_args()

    if options.showexamples:
        showUsageExamples()
        sys.exit(0)

    if options.timerhelp:
        showTimerHelp()
        sys.exit(0)

    if options.listpresets:
        for c in range(PresetPattern.seven_color_cross_fade, PresetPattern.seven_color_jumping+1):
            print("{:2} {}".format(c, PresetPattern.valtostr(c)))
        sys.exit(0)

    global webcolors_available
    if options.listcolors:
        if webcolors_available:
            for c in utils.get_color_names_list():
                print("{}, ".format(c))
            print("")
        else:
            print("webcolors package doesn't seem to be installed. No color names available")
        sys.exit(0)

    if options.settimer:
        new_timer = processSetTimerArgs(parser, options.settimer)
        options.new_timer = new_timer
    else:
        options.new_timer = None

    mode_count = 0
    if options.color:  mode_count += 1
    if options.ww:     mode_count += 1
    if options.preset: mode_count += 1
    if options.custom: mode_count += 1
    if mode_count > 1:
        parser.error("options --color, --warmwhite, --preset, and --custom are mutually exclusive")

    if options.on and options.off:
        parser.error("options --on and --off are mutually exclusive")

    if options.custom:
        options.custom = processCustomArgs(parser, options.custom)

    if options.color:
        options.color = utils.color_object_to_tuple(options.color)
        if options.color is None:
            parser.error("bad color specification")

    if options.preset:
        if not PresetPattern.valid(options.preset[0]):
            parser.error("Preset code is not in range")

    # asking for timer info, implicitly gets the state
    if options.showtimers:
        options.info = True

    op_count = mode_count
    if options.on:   op_count += 1
    if options.off:  op_count += 1
    if options.info: op_count += 1
    if options.getclock: op_count += 1
    if options.setclock: op_count += 1
    if options.listpresets: op_count += 1
    if options.settimer: op_count += 1

    if (not options.scan or options.scanresults) and (op_count == 0):
        parser.error("An operation must be specified")

    # if we're not scanning, IP addresses must be specified as positional args
    if  not options.scan and not options.scanresults and not options.listpresets:
        if len(args) == 0:
            parser.error("You must specify at least one IP address as an argument, or use scan results")


    return (options, args)
コード例 #41
0
def build_opts(transformers=None):
    opts = OptionParser(usage='usage: %prog [options] file1 [file2...]')

    naming_opts = OptionGroup(opts, "Naming")
    opts.add_option_group(naming_opts)
    naming_opts.add_option(
        '-t',
        '--type',
        dest='type',
        default=None,
        help=
        'The suggested file type: movie, episode. If undefined, type will be guessed.'
    )
    naming_opts.add_option(
        '-n',
        '--name-only',
        dest='name_only',
        action='store_true',
        default=False,
        help=
        'Parse files as name only. Disable folder parsing, extension parsing, and file content analysis.'
    )
    naming_opts.add_option('-c',
                           '--split-camel',
                           dest='split_camel',
                           action='store_true',
                           default=False,
                           help='Split camel case part of filename.')

    naming_opts.add_option(
        '',
        '--disabled-transformers',
        type='string',
        action='callback',
        callback=options_list_callback,
        dest='disabled_transformers',
        default=None,
        help=
        'List of transformers to disable. Separate transformers names with ";"'
    )

    output_opts = OptionGroup(opts, "Output")
    opts.add_option_group(output_opts)
    output_opts.add_option('-v',
                           '--verbose',
                           action='store_true',
                           dest='verbose',
                           default=False,
                           help='Display debug output')
    output_opts.add_option(
        '-P',
        '--show-property',
        dest='show_property',
        default=None,
        help=
        'Display the value of a single property (title, series, videoCodec, year, type ...)'
    ),
    output_opts.add_option('-u',
                           '--unidentified',
                           dest='unidentified',
                           action='store_true',
                           default=False,
                           help='Display the unidentified parts.'),
    output_opts.add_option(
        '-a',
        '--advanced',
        dest='advanced',
        action='store_true',
        default=False,
        help='Display advanced information for filename guesses, as json output'
    )
    output_opts.add_option(
        '-y',
        '--yaml',
        dest='yaml',
        action='store_true',
        default=False,
        help=
        'Display information for filename guesses as yaml output (like unit-test)'
    )
    output_opts.add_option('-f',
                           '--input-file',
                           dest='input_file',
                           default=False,
                           help='Read filenames from an input file.')
    output_opts.add_option(
        '-d',
        '--demo',
        action='store_true',
        dest='demo',
        default=False,
        help='Run a few builtin tests instead of analyzing a file')

    information_opts = OptionGroup(opts, "Information")
    opts.add_option_group(information_opts)
    information_opts.add_option('-p',
                                '--properties',
                                dest='properties',
                                action='store_true',
                                default=False,
                                help='Display properties that can be guessed.')
    information_opts.add_option(
        '-V',
        '--values',
        dest='values',
        action='store_true',
        default=False,
        help='Display property values that can be guessed.')
    information_opts.add_option('-s',
                                '--transformers',
                                dest='transformers',
                                action='store_true',
                                default=False,
                                help='Display transformers that can be used.')
    information_opts.add_option('',
                                '--version',
                                dest='version',
                                action='store_true',
                                default=False,
                                help='Display the guessit version.')

    webservice_opts = OptionGroup(opts, "guessit.io")
    opts.add_option_group(webservice_opts)
    webservice_opts.add_option(
        '-b',
        '--bug',
        action='store_true',
        dest='submit_bug',
        default=False,
        help='Submit a wrong detection to the guessit.io service')

    other_opts = OptionGroup(opts, "Other features")
    opts.add_option_group(other_opts)
    other_opts.add_option(
        '-i',
        '--info',
        dest='info',
        default='filename',
        help=
        'The desired information type: filename, video, hash_mpc or a hash from python\'s '
        'hashlib module, such as hash_md5, hash_sha1, ...; or a list of any of '
        'them, comma-separated')

    if transformers:
        for transformer in transformers:
            transformer.register_options(opts, naming_opts, output_opts,
                                         information_opts, webservice_opts,
                                         other_opts)

    return opts, naming_opts, output_opts, information_opts, webservice_opts, other_opts
コード例 #42
0
def module_Options(parser):
    from optparse import OptionGroup
    group_fuzzer = OptionGroup(parser, 'Fuzzing Mode',
                               'use this options to set fuzzing parameters')
    group_fuzzer.add_option('-l',
                            '--fuzz-fuzzer-list',
                            default=False,
                            action='store_true',
                            dest='list_fuzzers',
                            help='Display a list of available fuzzers')
    group_fuzzer.add_option(
        '',
        '--fuzz-fuzzer',
        dest='fuzzer',
        default='InviteCommonFuzzer',
        help=
        'Set fuzzer. Default is InviteCommonFuzzer. Use -l to see a list of all available fuzzers'
    )
    group_fuzzer.add_option('',
                            '--fuzz-crash',
                            default=False,
                            action='store_true',
                            dest='crash_detect',
                            help='Enables crash detection')
    group_fuzzer.add_option(
        '',
        '--fuzz-crash-method',
        dest='crash_method',
        default='OPTIONS',
        help=
        'Set crash method. By default uses OPTIONS message and stores response.'
    )
    group_fuzzer.add_option(
        '',
        '--fuzz-crash-no-stop',
        default=False,
        action='store_true',
        dest='no_stop_at_crash',
        help=
        'If selected prevents the app to be stoped when a crash is detected.')
    group_fuzzer.add_option(
        '',
        '--fuzz-max',
        dest='fuzz_max_msgs',
        default=99999,
        type="int",
        help=
        'Sets the maximum number of messages to be sent by fuzzing mode. Default is max available in fuzzer.'
    )
    group_fuzzer.add_option(
        '',
        '--fuzz-to-file',
        dest='file_name',
        default=None,
        help='Print the output to a file with the given name.')
    group_fuzzer.add_option(
        '',
        '--fuzz-audit',
        dest='audit_file_name',
        default=None,
        help=
        'Enables fuzzing audit. All messages sent (fuzzing) will be saved into the given file name.'
    )
    parser.add_option_group(group_fuzzer)
    return parser
コード例 #43
0
def parse_args():
    """
    Parse arguments.
    """

    # main options
    parser = OptionParser()
    parser.add_option('--subid',
                      "",
                      dest='subid',
                      help="Subject ID ex: --subid NDARAA075AMK",
                      default=None)
    parser.add_option('--task',
                      "",
                      dest='task',
                      help="Task ex: --task RestingState",
                      default=None)

    # additional options probably best left as the default
    extopts = OptionGroup(parser, "Additional options")
    extopts.add_option(
        '',
        "--rootdir",
        dest='rootdir',
        help=
        "Root directory where data is stored ex: --rootdir /Users/mvlombardo/Dropbox/HBN",
        default="/Users/mvlombardo/Dropbox/HBN")
    extopts.add_option('',
                       "--srate",
                       dest='srate',
                       help="Sampling rate in Hz ex: --srate 500",
                       default=500)
    extopts.add_option(
        '',
        "--winsize",
        dest='winsize',
        help="Window size in number of samples ex: --winsize 2048",
        default=2048)
    extopts.add_option(
        '',
        "--winstep",
        dest='winstep',
        help="Window step size in number of samples ex: --winstep 100",
        default=100)
    extopts.add_option('',
                       "--minfreq",
                       dest='minfreq',
                       help="Minimum frequency for FOOOF ex: --minfreq 1",
                       default=1)
    extopts.add_option('',
                       "--maxfreq",
                       dest='maxfreq',
                       help="Maximum frequency for FOOOF ex: --maxfreq 50",
                       default=50)
    extopts.add_option(
        '',
        "--minwidth",
        dest='minwidth',
        help="Minimum frequency width limit for FOOOF ex: --minwidth 1",
        default=1)
    extopts.add_option(
        '',
        "--maxwidth",
        dest='maxwidth',
        help="Maximum frequency width limit for FOOOF ex: --maxwidth 12",
        default=12)
    extopts.add_option('',
                       "--verbose",
                       action="store_true",
                       dest='verbose',
                       help="Set verbosity ex: --verbose",
                       default=False)
    parser.add_option_group(extopts)

    (options, args) = parser.parse_args()
    return (options)
コード例 #44
0
ファイル: JTSTEST.py プロジェクト: winterjr/jaustoolset
def main():
    """ main entry point function"""
    global base_dir
    global do_generation
    global do_build
    global do_run
    base_dir = os.getcwd()

    parser = OptionParser(usage="Usage: %prog [options]\n\nRun JTSTEST.py --help for info about the options."\
        "\n\nNote this script must be run from directory GUI/test/atf/ to succeed!")

    langs_group = OptionGroup(parser, "Language Testing Options", "These options allow code generation in" \
        " specific languages to be tested.  If no language testing option is selected, all languages are" \
        " are tested.  Multiple languages can be specified, for example by providing both" \
        "'--test_cpp' and '--test_java'.")

    langs_group.add_option("--test_cpp", dest="test_cpp", action="store_true", default=False,\
        help="Generate, build, and/or run C++ unit tests.")
    langs_group.add_option("--test_java", dest="test_java", action="store_true", default=False,\
        help="Generate, build, and/or run Java unit tests.")
    langs_group.add_option("--test_csharp", dest="test_csharp", action="store_true", default=False,\
        help="Generate, build, and/or run C# unit tests.")
    parser.add_option_group(langs_group)

    parser.add_option("--skip_gen", dest="skip_gen", action="store_true", default=False,\
        help="Skips the code-generation step to save time or maintain alterations to generated code.")
    parser.add_option("--skip_build", dest="skip_build", action="store_true", default=False,\
        help="Skips the test code build step, for faster code-generation testing.")
    parser.add_option("--skip_run", dest="skip_run", action="store_true", default=False,\
        help="Skips the test run step, for testing build issues more quickly.")

    (options, discarded) = parser.parse_args()

    # determine which test steps we should perform
    if options.skip_gen:
        do_generation = False

    if options.skip_build:
        do_build = False

    if options.skip_run:
        do_run = False

    # determine which language to use
    # if any of the --test_X options are invoked, test only in the language(s) invoked.
    # if none are invoked, test all languages.
    num_language_opts = 0
    run_cpp_tests = run_java_tests = run_csharp_tests = False

    if options.test_cpp:
        run_cpp_tests = True
        num_language_opts += 1

    if options.test_java:
        run_java_tests = True
        num_language_opts += 1

    if options.test_csharp:
        run_csharp_tests = True
        num_language_opts += 1

    if num_language_opts <= 0:
        run_cpp_tests = run_java_tests = run_csharp_tests = True

    array_tests = [
        ('Array1', None),
        ('Array2', None),
        ('Array3', None),
        ('Array4', None),
        ('Array5', None),
        ('Array6', None),
        ('Array7', None),
        ('Array8', None),
        ('Array9', None),
        ('Array10', None),
    ]
    bitfield_tests = [('BitField1', None)]

    body_tests = [
        ('Body1', None),
        ('Body2', None),
        ('Body3', None),
        ('Body4', None),
        ('Body5', None),
        ('Body6', None),
        ('Body7', None),
        ('Body8', None),
        ('Body9', None),
    ]

    # the FixedField1 test is identical to Body1 test, therefore it was removed
    fixedfield_tests = [('FixedField2', None), ('FixedField3', None)]

    header_tests = [
        ('Header1', None),
        ('Header2', None),
        ('Header3', None),
        ('Header4', None),
        ('Header5', None),
        ('Header6', None),
    ]

    list_tests = [
        ('List1', None),
        ('List2', None),
        ('List3', None),
        ('List4', None),
    ]

    record_tests = [
        ('Record10', None),
        ('Record11', None),
        ('Record12', None),
        ('Record15', None),
        ('Record16', None),
    ]

    sequence_tests = [
        ('Sequence1', None),
        ('Sequence2', None),
        ('Sequence3', None),
    ]

    variant_tests = [
        ('Variant1', None),
        ('Variant2', None),
        ('Variant3', None),
        ('Variant4', None),
    ]

    varlength_tests = [
        ('VariableLengthStuff1', None),
    ]

    optionality_tests = [
        ('Optional1', None),
    ]

    simpleset_tests = [('SimpleSet',
                        'src.urn_DeVivo_jaus_services_SimpleDef_1_0'),
                       ('DefaultTransitionSet',
                        'src.urn_DeVivo_jaus_services_DefaultTransDef_1_0')]
    loopback_tests = [
        ('Loopback1', 'src.urn_DeVivo_jaus_services_LoopbackDef_1_0'),
        ('Loopback2', 'src.urn_DeVivo_jaus_services_LoopbackDef_1_0')
    ]
    references_tests = [('References1', None)]
    inheritance_tests = [('Inheritence1', 'src')]

    nestedset_tests = [
        ('NestedSet', 'src.urn_DeVivo_jaus_services_NestedDef_1_0'),
        ('NestedSet2', 'src.urn_DeVivo_jaus_services_NestedDef_1_0'),
    ]

    per_language_elements = []

    if run_cpp_tests:
        cpp_build_output_file = open(CPP_BLD_OUTPUT_PATH, 'w')
        cpp_test_output_file = open(CPP_TST_OUTPUT_PATH, 'w')
        per_language_elements.append(
            ("C++", run_cpp_test, cpp_build_output_file, cpp_test_output_file))

    if run_java_tests:
        java_build_output_file = open(JAVA_BLD_OUTPUT_PATH, 'w')
        java_test_output_file = open(JAVA_TST_OUTPUT_PATH, 'w')
        per_language_elements.append(
            ("Java", run_java_test, java_build_output_file,
             java_test_output_file))

    if run_csharp_tests:
        csharp_build_output_file = open(CSHARP_BLD_OUTPUT_PATH, 'w')
        csharp_test_output_file = open(CSHARP_TST_OUTPUT_PATH, 'w')
        per_language_elements.append(
            ("C#", run_csharp_test, csharp_build_output_file,
             csharp_test_output_file))

    for (language, test_runner, build_output_file,
         test_output_file) in per_language_elements:
        run_test_set_with_runner(test_runner, array_tests, language + " Array Tests", \
            build_output_file, test_output_file)
        run_test_set_with_runner(test_runner, bitfield_tests, language + " BitField Tests", \
            build_output_file, test_output_file)
        run_test_set_with_runner(test_runner, body_tests, language + " Body Tests", \
            build_output_file, test_output_file)
        run_test_set_with_runner(test_runner, fixedfield_tests, language + " FixedField Tests", \
            build_output_file, test_output_file)
        run_test_set_with_runner(test_runner, header_tests, language + " Header Tests", \
            build_output_file, test_output_file)
        run_test_set_with_runner(test_runner, list_tests, language + " List Tests", \
            build_output_file, test_output_file)
        run_test_set_with_runner(test_runner, record_tests, language + " Record Tests", \
            build_output_file, test_output_file)
        run_test_set_with_runner(test_runner, sequence_tests, language + " Sequence Tests", \
            build_output_file, test_output_file)
        run_test_set_with_runner(test_runner, variant_tests, language + " Variant Tests", \
            build_output_file, test_output_file)
        run_test_set_with_runner(test_runner, optionality_tests, language + " Optionality Tests", \
            build_output_file, test_output_file)

        run_test_set_with_runner(test_runner, simpleset_tests, language + " Simple Set Tests", \
            build_output_file, test_output_file)
        run_test_set_with_runner(test_runner, nestedset_tests, language + " Nested Set Tests", \
            build_output_file, test_output_file)
        run_test_set_with_runner(test_runner, loopback_tests, language + " Loopback Tests", \
            build_output_file, test_output_file)
        run_test_set_with_runner(test_runner, references_tests, language + " References Tests", \
            build_output_file, test_output_file)
        run_test_set_with_runner(test_runner, inheritance_tests, language + " Inheritance Tests", \
            build_output_file, test_output_file)

# TODO: 'core1' tests, 'variable length stuff' test - doesn't exist.

    if run_cpp_tests:
        cpp_build_output_file.close()
        cpp_test_output_file.close()

    if run_java_tests:
        java_build_output_file.close()
        java_test_output_file.close()

    if run_csharp_tests:
        csharp_build_output_file.close()
        csharp_test_output_file.close()

    return 0
コード例 #45
0
def parse_options():

    description = "This script is an example implementation of SAP's Message Server Monitor program (msmon). It " \
                  "allows the monitoring of a Message Server service and allows sending different commands and " \
                  "opcodes. Includes some commands not available on the msmon program. Some commands requires the " \
                  "server running in monitor mode, the most requires access to the Message Server internal port."

    epilog = "pysap %(version)s - %(url)s - %(repo)s" % {"version": pysap.__version__,
                                                         "url": pysap.__url__,
                                                         "repo": pysap.__repo__}

    usage = "Usage: %prog [options] -d <remote host>"

    parser = OptionParser(usage=usage, description=description, epilog=epilog)

    target = OptionGroup(parser, "Target")
    target.add_option("-d", "--remote-host", dest="remote_host",
                      help="Remote host")
    target.add_option("-p", "--remote-port", dest="remote_port", type="int", default=3900,
                      help="Remote port [%default]")
    target.add_option("--route-string", dest="route_string",
                      help="Route string for connecting through a SAP Router")
    target.add_option("--domain", dest="domain", default="ABAP",
                      help="Domain to connect to (ABAP, J2EE or JSTARTUP) [%default]")
    parser.add_option_group(target)

    misc = OptionGroup(parser, "Misc options")
    misc.add_option("-c", "--client", dest="client", default="pysap's-monitor",
                    help="Client name [%default]")
    misc.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False,
                    help="Verbose output [%default]")
    misc.add_option("--log-file", dest="logfile", metavar="FILE",
                    help="Log file")
    misc.add_option("--console-log", dest="consolelog", metavar="FILE",
                    help="Console log file")
    misc.add_option("--script", dest="script", metavar="FILE",
                    help="Script file to run")
    parser.add_option_group(misc)

    (options, _) = parser.parse_args()

    if not (options.remote_host or options.route_string):
        parser.error("Remote host or route string is required")
    if options.domain not in ms_domain_values_inv.keys():
        parser.error("Invalid domain specified")

    return options
コード例 #46
0
def main():
    """ Generate a configuration. """
    parser = OptionParser(version="%%prog %s" % VERSION)
    
    parser.add_options(cone_common.COMMON_OPTIONS)
    
    parser.add_option("-c", "--configuration",\
                        dest="configuration",\
                        help="defines the name of the configuration for the action",\
                        metavar="CONFIG")

    parser.add_option("-p", "--project",\
                       dest="project",\
                       help="defines the location of current project. Default is the current working directory.",\
                       default=".",\
                       metavar="STORAGE")
    
    gen_group = OptionGroup(parser, 'Generate options',
                    'The generate function will create target files from a specific configuration.'\
                    'The generate will always work with read-only mode of the project, so no changes are saved to project')
  
    gen_group.add_option("-o", "--output",\
                   dest="output",\
                   help="defines the target folder where the files are is generated or copied",\
                   metavar="FOLDER",\
                   default="output")

    gen_group.add_option("-i", "--impl",\
                   dest="impls",\
                   action="append",
                   help=\
"""Define a Python regular expression filter for actual ImplML plugin(s) that needs to be executed. The whole path to ImplML filename is used in the regexp matching.
The impl operation can be used several times in a single command.
                                                                        
Example1 --impl crml => matches for any ImplML file that has a CrML string in the path.
Example2 --impl makeml$ => matches for ImplML file that has ends with MakeML string.
""",
                   metavar="IMPLS",\
                   default=None)

    gen_group.add_option("--impl-tag",\
                   dest="tags",\
                   type="string",
                   action="append",
                   help="define a tag for the implementations that are included to the output. "\
                        "A tag is name value pair and has the following format: name:value, e.g. target:rofs3."\
                        "Example --impl-tag=target:uda --impl-tag=target:content, which would include impls include both tags.",
                   metavar="TAG",\
                   default=None)

    gen_group.add_option("--impl-tag-policy",\
                   dest="tags_policy",\
                   type="string",
                   action="append",
                   help="Policy for implementation tags. May have one of the following values: --impl-tag-policy=AND, --impl-tag-policy=OR. "\
                   "Default is OR.",
                   metavar="TAGS_POLICY",\
                   default=None)
    
    gen_group.add_option("-s", "--set",\
                   dest="overrides",\
                   action="append",
                   type="string",
                   help="Override a ConfML reference in the execution."\
                        "The set operation can be used several times in a single command."\
                        "Example -s foo.bar=10 -s foo.fea='test'.",
                   metavar="SET",\
                   default=None)

    gen_group.add_option("--add",\
                   dest="added",\
                   action="append",
                   type="string",
                   help="Add a given configuration to the given configuration as last element."\
                        "The add operation can be used several times in a single command."\
                        "Example --add foo/root.confml --add bar/root-confml.",
                   metavar="CONF",\
                   default=None)

    gen_group.add_option("-r", "--report",\
                   dest="report",\
                   action="store",
                   type="string",
                   help="Generates a report about settings that are properly generated."\
                        "Example -r report.html.",
                   metavar="FILE",\
                   default=None)

    gen_group.add_option("--report-option",\
                   action="append",
                   help="Specifies the report verbose options, that defines "\
                        "what data is included to the report. The option can be "\
                        "used multiple times."\
                        "choises=[default|all]"\
                        "Example --report-option=all",
                   metavar="OPTION",\
                   default=[])

    gen_group.add_option("-t", "--template",\
                   dest="template",\
                   action="store",
                   type="string",
                   help="Template used in report generation."\
                        "Example -t report_template.html.",
                   metavar="FILE",\
                   default=None)
    
    gen_group.add_option("--report-data-output",\
                   type="string",
                   help="Specifies a file where intermediary report data is generated.",
                   metavar="FILE",\
                   default=None)

    gen_group.add_option("-n", "--dryrun",\
                   dest="dryrun",\
                   action="store_true",
                   help="Executes generation without generation output.",
                   default=False)

    gen_group.add_option("--add-setting-file",\
                   dest="settings",\
                   action="append",
                   type="string",
                   help="Generate specific settings in ini format."\
                        "Example -o my_generate_settings.cfg.",
                   metavar="FILE",\
                   default=None)
    gen_group.add_option("--dump-autodata",\
                   dest="dump_autodata",\
                   action="store",
                   type="string",
                   metavar="FILE",
                   help="Specifies a confml file for storing autodata.confml permanently.",
                   default=None)
    gen_group.add_option("-w", "--what",\
                   dest="what",\
                   action="store",
                   type="string",
                   metavar="FILE",
                   help="List output files to a txt file",
                   default=None)
    
    lf_group = OptionGroup(parser, 'Layer filtering options',
                    'Layer filtering options define configuration layers to be used for filtering '\
                    'the implementations that are used to generate output. Filtering by a layer means that '\
                    'only implementations that generate their output based on settings changed on that layer '\
                    'are included in the generation.')
    
    lf_group.add_option("-l", "--layer",\
                   dest="layers",\
                   type="int",
                   action="append",
                   help="Define a layer by giving its index in the root configuration. "\
                        "0 is first, 1 the second, -1 the last, -2 the second to last and so on. "\
                        "The layer operation can be used several times in a single command. "\
                        "Example -l -1 --layer=-2, which would append a layers -1 and -2 to the layers => layers = -1,-2",
                   metavar="LAYER",\
                   default=None)
    
    lf_group.add_option("--layer-regex",
                   dest="layer_regexes",
                   action="append",
                   help="Define a regular expression for including layers into the generation process, "\
                        "e.g. --layer-regex layer[0-9]/root.confml. The pattern is matched against the layer root "\
                        "path, which could be e.g. 'assets/layer1/root.confml'.",
                   metavar="REGEX",)
    
    lf_group.add_option("--layer-wildcard",
                   dest="layer_wildcards",
                   action="append",
                   help="Define a wildcard for including layers into the generation process, e.g "\
                        "--layer-wildcard layer*",
                   metavar="WILDCARD",)
    
    lf_group.add_option("--all-layers",
                   dest="all_layers",
                   action="store_true",
                   help="Include all layers in generation. This switch overrides all other layer "\
                        "configurations (iMaker API and using the --layer, --layer-regex and --layer-wildcard parameters)",
                   default=False)
    
    
    start_time = time.time()
    
    parser.add_option_group(gen_group)
    parser.add_option_group(lf_group)
    (options, _) = parser.parse_args()

    settinglist = [os.path.join(ROOT_PATH,'conesub_generate.cfg')]
    if options.settings:
        for setting_file in options.settings:
            settinglist.append(os.path.normpath(os.path.join(ROOT_PATH, setting_file)))            
    gset = cone_common.get_settings(settinglist)
    
    cone_common.handle_common_options(options, settings=gset)
          
    current = api.Project(api.Storage.open(options.project,"r"))
    active_root = current.get_storage().get_active_configuration()
    if not options.configuration:
        if active_root == "":
            parser.error("configuration must be given")
        else:
            logging.getLogger('cone').info('No configuration given! Using active root configuration %s' % active_root)
            options.configuration = active_root
    try:
        config  = current.get_configuration(options.configuration)
    except exceptions.NotFound:
        parser.error("No such configuration: %s" % options.configuration)
    reffilters = None
    implfilters = None
    impltags = None
    
    # Include possible additional configurations
    if options.added:
        for configname in options.added:
            logging.getLogger('cone').info('Adding configuration %s' % configname) 
            config.include_configuration(utils.resourceref.norm(configname))
    
    # Get implementation filters from configuration
    try:
        implfilters = (config.get_default_view().get_feature('imakerapi.cone_impls').get_value() or '').split(',')
    except exceptions.NotFound:
        implfilters = []
    
    # Get filters from command line if they exist => cmd overrides configuration
    if options.impls:
        implfilters = options.impls
    if options.tags and len(options.tags) > 0:
        impltags = {}
        for tag in options.tags:
            (name,value) = tag.split(':',2)
            existingvalue = impltags.get(name,[])
            existingvalue.append(value)
            impltags[name] = existingvalue
        logging.getLogger('cone').info('Tag filter %s' % impltags)
    else:
        impltags = None
    
    tags_policy = 'OR'
    if options.tags_policy:
        tags_policy = options.tags_policy[0]
    
    
    layerdefs = _get_included_layers(config, options, parser)
    filter_by_refs = _filter_by_refs(config, options, parser)
    
    if layerdefs:
        logging.getLogger('cone').info('Included layers:\n%s' % '\n'.join(layerdefs))
    else:
        logging.getLogger('cone').info('Including all layers')
    
    dview = config.get_default_view()
    # Add data references if included layers are defined
    if len(layerdefs) > 0:
        # get the data references from given layers
        logging.getLogger('cone').info('Getting layer specific data reference from %s' % layerdefs)
        reffilters = []
        for layer_path in utils.distinct_array(layerdefs):
            logging.getLogger('cone').info('Searching layer %s' % layer_path)
            layer = config.get_configuration(layer_path)
            refs = _get_new_refs(reffilters, layer.list_leaf_datas())
            # reduce the refs of sequences to single reference of the sequence feature
            layerrefs = set() 
            for fea in dview.get_features(refs):
                layerrefs.add(fea.fqr)
                if fea.is_sequence():
                    layerrefs.add(fea.get_sequence_parent().fqr)
            
            refs = sorted(list(layerrefs))
            #logging.getLogger('cone').info("Refs from layer '%s'\n%s" % (layer.get_path(), '\n'.join(refs)))
            reffilters += refs
          
    # Make sure that the output folder exists
    if not os.path.exists(options.output):
        os.makedirs(options.output)
        
    impls = plugin.filtered_impl_set(config,implfilters)
    impls.output = options.output
    
    log.info("Parsed %s implementation(s)" % len(impls))
    
    logging.getLogger('cone').info("Supported implementation file extensions: %r" % plugin.get_supported_file_extensions())
    
#    logging.getLogger('cone').debug('Loaded implementations:')
#    for impl in impls:
#        msg = "File '%s', impl. type '%s', class '%s', phase '%s'" % \
#              (impl.ref, impl.IMPL_TYPE_ID, type(impl).__name__, impl.invocation_phase())
#        logging.getLogger('cone').debug(msg)
    
    
    # Create temporary variables
    temp_feature_refs = impls.create_temp_features(config)
    
    if reffilters is not None:
        reffilters.extend(temp_feature_refs)
        logging.getLogger('cone').info('Refs from temporary variables:\n%s' % '\n'.join(temp_feature_refs))
    
    # Set overrides only after temp variables are created, so that
    # they can also be modified from the command line
    if options.overrides:
        # Make sure that the last layer is the autodata layer
        plugin.get_autoconfig(config)
        for override in options.overrides:
            (ref,value) = override.split('=',1)
            config.get_default_view().get_feature(ref).set_value(value)
    
    
    # ---------------
    # Generate output
    # ---------------
    
    context = plugin.GenerationContext(configuration = config,
                                       tags = impltags or {},
                                       tags_policy = tags_policy,
                                       output = options.output,
                                       impl_set = impls,
                                       temp_features = temp_feature_refs,
                                       filter_by_refs = filter_by_refs)
    context.changed_refs = reffilters
    context.output = options.output

    impls.output = options.output
    for phase in impls.INVOCATION_PHASES:
        log.info("Generating phase '%s'" % phase)
        context.phase = phase
        impls.generate(context)
        impls.post_generate(context)
     
    if options.what:
        log.info("Write output files to '%s'" % options.what)
        output_files = []
        for op in context.get_output():
            # Only append once
            if op.type == 'file' and output_files.count(op.abspath) < 1:
                output_files.append(op.abspath)       
        try:
            mkpath(os.path.dirname(os.path.abspath(options.what)))
            what_fh = open(os.path.abspath(options.what), 'w')
            try:
                [what_fh.write('%s\n' % ofile) for ofile in output_files]
                print "Wrote output file list to '%s'" % options.what
            finally:
                what_fh.close()
        except Exception:
            log.info("Could not create directory for '%s'" % options.what)
    
    print "Generated %s to %s!" % (options.configuration, impls.output)
    
    # Store temporary rule execution outputs to a new configuration
    if options.dump_autodata:
        # Make sure autodata layer is the one we're dealing with     
        plugin.get_autoconfig(config)
        lastconfig = config.get_last_configuration()
        lastconfig.set_name(utils.resourceref.to_objref(utils.resourceref.get_filename(utils.resourceref.norm(options.dump_autodata))))
        data = persistentconfml.dumps(lastconfig)
        try:
            mkpath(utils.resourceref.get_path(utils.resourceref.norm(options.dump_autodata)))
            fh = open(options.dump_autodata, 'w')
            try:        fh.write(data)
            finally:    fh.close()
            print 'Saved autodata to %s' % options.dump_autodata
        except DistutilsFileError:
            log.info('Unable to dump autodata')
        
    
    # ---------------
    # Generate report
    # ---------------

    # If reporting is enabled collect data for report
    if options.report != None or options.report_data_output != None:
        logging.getLogger('cone').info('Collecting data for report.')
        
        rep_data = generation_report.ReportData() 
        rep_data.context = context
        rep_data.context.log_file = os.path.abspath(options.log_file)
        rep_data.context.log = _read_log(options.log_file)
        rep_data.project_dir = options.project
        logging.getLogger('cone').info('Collecting data found rep_data  %s' % rep_data)
        
        duration = str("%.3f" % (time.time() - start_time) )
        rep_data.set_duration( duration )
        rep_data.options = options
        
        # Save intermediary report data file if necessary
        if options.report_data_output != None:
            logging.getLogger('cone').info('Dumping report data to %s' % options.report_data_output)
            print "Dumping report data to '%s'" % options.report_data_output
            generation_report.save_report_data(rep_data, options.report_data_output)
        
        # Generate the report if necessary
        if options.report != None:
            generation_report.generate_report([rep_data], options.report, options.template, [ROOT_PATH], options.report_option)
            print_summary(rep_data)
    
    if current: current.close()
コード例 #47
0
    parser = WerrorHtmlParser(opt)
    errors = parser.parse_url(opt.url)

    out = WerrorGenerator(opt)
    out.generate(errors)
    pass


if __name__ == '__main__':
    _cur_dir = os.path.abspath(os.path.dirname(__file__))
    opt_parser = OptionParser(usage="usage: %prog [options]",
                              version="%prog 0.3")
    opt_group = OptionGroup(opt_parser, "Main options")
    opt_group.add_option(
        "--url",
        dest="url",
        default=_wspp_werror_url,
        help="url for w32 error codes html - may be local file")
    opt_group.add_option("--out",
                         dest="out_dir",
                         default=_cur_dir,
                         help="output dir for generated files")
    opt_group.add_option("--werror",
                         dest="werror_file",
                         default=os.path.join(_cur_dir, 'werror.h'),
                         help="path to werror.h file")
    opt_group.add_option("--print_skipped",
                         action="store_true",
                         dest="print_skipped",
                         default=False,
                         help="print errors skipped during HTML parsing")
コード例 #48
0
    print STATUS_OK
    print "metric count uint64", s['count']
    print "metric deleted uint32", s['deleted']


if __name__ == "__main__":
    parser = OptionParser()

    parser.add_option("-H",
                      "--host",
                      action="store",
                      type="string",
                      dest="host",
                      default="http://localhost:9200")

    mg = OptionGroup(parser, "Possible Metric Groups")
    mg.add_option("--cluster-health",
                  action="callback",
                  callback=cluster_health)
    mg.add_option("--stats-store", action="callback", callback=stats_store)
    mg.add_option("--stats-indexing",
                  action="callback",
                  callback=stats_indexing)
    mg.add_option("--stats-get", action="callback", callback=stats_get)
    mg.add_option("--stats-search", action="callback", callback=stats_search)
    mg.add_option("--stats-docs", action="callback", callback=stats_docs)

    parser.add_option_group(mg)
    (options, args) = parser.parse_args()
コード例 #49
0
class MRJob(MRJobLauncher):
    """The base class for all MapReduce jobs. See :py:meth:`__init__`
    for details."""

    # inline can be the default because we have the class object in the same
    # process as the launcher
    _DEFAULT_RUNNER = 'inline'

    def __init__(self, args=None):
        """Entry point for running your job from other Python code.

        You can pass in command-line arguments, and the job will act the same
        way it would if it were run from the command line. For example, to
        run your job on EMR::

            mr_job = MRYourJob(args=['-r', 'emr'])
            with mr_job.make_runner() as runner:
                ...

        Passing in ``None`` is the same as passing in ``[]`` (if you want
        to parse args from ``sys.argv``, call :py:meth:`MRJob.run`).

        For a full list of command-line arguments, run:
        ``python -m mrjob.job --help``
        """
        super(MRJob, self).__init__(self.mr_job_script(), args)

    @classmethod
    def _usage(cls):
        return "usage: %prog [options] [input files]"

    ### Defining one-step jobs ###

    def mapper(self, key, value):
        """Re-define this to define the mapper for a one-step job.

        Yields zero or more tuples of ``(out_key, out_value)``.

        :param key: A value parsed from input.
        :param value: A value parsed from input.

        If you don't re-define this, your job will have a mapper that simply
        yields ``(key, value)`` as-is.

        By default (if you don't mess with :ref:`job-protocols`):
         - ``key`` will be ``None``
         - ``value`` will be the raw input line, with newline stripped.
         - ``out_key`` and ``out_value`` must be JSON-encodable: numeric,
           unicode, boolean, ``None``, list, or dict whose keys are unicodes.
        """
        raise NotImplementedError

    def reducer(self, key, values):
        """Re-define this to define the reducer for a one-step job.

        Yields one or more tuples of ``(out_key, out_value)``

        :param key: A key which was yielded by the mapper
        :param value: A generator which yields all values yielded by the
                      mapper which correspond to ``key``.

        By default (if you don't mess with :ref:`job-protocols`):
         - ``out_key`` and ``out_value`` must be JSON-encodable.
         - ``key`` and ``value`` will have been decoded from JSON (so tuples
           will become lists).
        """
        raise NotImplementedError

    def combiner(self, key, values):
        """Re-define this to define the combiner for a one-step job.

        Yields one or more tuples of ``(out_key, out_value)``

        :param key: A key which was yielded by the mapper
        :param value: A generator which yields all values yielded by one mapper
                      task/node which correspond to ``key``.

        By default (if you don't mess with :ref:`job-protocols`):
         - ``out_key`` and ``out_value`` must be JSON-encodable.
         - ``key`` and ``value`` will have been decoded from JSON (so tuples
           will become lists).
        """
        raise NotImplementedError

    def mapper_init(self):
        """Re-define this to define an action to run before the mapper
        processes any input.

        One use for this function is to initialize mapper-specific helper
        structures.

        Yields one or more tuples of ``(out_key, out_value)``.

        By default, ``out_key`` and ``out_value`` must be JSON-encodable;
        re-define :py:attr:`INTERNAL_PROTOCOL` to change this.
        """
        raise NotImplementedError

    def mapper_final(self):
        """Re-define this to define an action to run after the mapper reaches
        the end of input.

        One way to use this is to store a total in an instance variable, and
        output it after reading all input data. See :py:mod:`mrjob.examples`
        for an example.

        Yields one or more tuples of ``(out_key, out_value)``.

        By default, ``out_key`` and ``out_value`` must be JSON-encodable;
        re-define :py:attr:`INTERNAL_PROTOCOL` to change this.
        """
        raise NotImplementedError

    def mapper_cmd(self):
        """Re-define this to define the mapper for a one-step job **as a shell
        command.** If you define your mapper this way, the command will be
        passed unchanged to Hadoop Streaming, with some minor exceptions. For
        important specifics, see :ref:`cmd-steps`.

        Basic example::

            def mapper_cmd(self):
                return 'cat'
        """
        raise NotImplementedError

    def mapper_pre_filter(self):
        """Re-define this to specify a shell command to filter the mapper's
        input before it gets to your job's mapper in a one-step job. For
        important specifics, see :ref:`cmd-filters`.

        Basic example::

            def mapper_pre_filter(self):
                return 'grep "ponies"'
        """
        raise NotImplementedError

    def reducer_init(self):
        """Re-define this to define an action to run before the reducer
        processes any input.

        One use for this function is to initialize reducer-specific helper
        structures.

        Yields one or more tuples of ``(out_key, out_value)``.

        By default, ``out_key`` and ``out_value`` must be JSON-encodable;
        re-define :py:attr:`INTERNAL_PROTOCOL` to change this.
        """
        raise NotImplementedError

    def reducer_final(self):
        """Re-define this to define an action to run after the reducer reaches
        the end of input.

        Yields one or more tuples of ``(out_key, out_value)``.

        By default, ``out_key`` and ``out_value`` must be JSON-encodable;
        re-define :py:attr:`INTERNAL_PROTOCOL` to change this.
        """
        raise NotImplementedError

    def reducer_cmd(self):
        """Re-define this to define the reducer for a one-step job **as a shell
        command.** If you define your mapper this way, the command will be
        passed unchanged to Hadoop Streaming, with some minor exceptions. For
        specifics, see :ref:`cmd-steps`.

        Basic example::

            def reducer_cmd(self):
                return 'cat'
        """
        raise NotImplementedError

    def reducer_pre_filter(self):
        """Re-define this to specify a shell command to filter the reducer's
        input before it gets to your job's reducer in a one-step job. For
        important specifics, see :ref:`cmd-filters`.

        Basic example::

            def reducer_pre_filter(self):
                return 'grep "ponies"'
        """
        raise NotImplementedError

    def combiner_init(self):
        """Re-define this to define an action to run before the combiner
        processes any input.

        One use for this function is to initialize combiner-specific helper
        structures.

        Yields one or more tuples of ``(out_key, out_value)``.

        By default, ``out_key`` and ``out_value`` must be JSON-encodable;
        re-define :py:attr:`INTERNAL_PROTOCOL` to change this.
        """
        raise NotImplementedError

    def combiner_final(self):
        """Re-define this to define an action to run after the combiner reaches
        the end of input.

        Yields one or more tuples of ``(out_key, out_value)``.

        By default, ``out_key`` and ``out_value`` must be JSON-encodable;
        re-define :py:attr:`INTERNAL_PROTOCOL` to change this.
        """
        raise NotImplementedError

    def combiner_cmd(self):
        """Re-define this to define the combiner for a one-step job **as a
        shell command.** If you define your mapper this way, the command will
        be passed unchanged to Hadoop Streaming, with some minor exceptions.
        For specifics, see :ref:`cmd-steps`.

        Basic example::

            def combiner_cmd(self):
                return 'cat'
        """
        raise NotImplementedError

    def combiner_pre_filter(self):
        """Re-define this to specify a shell command to filter the combiner's
        input before it gets to your job's combiner in a one-step job. For
        important specifics, see :ref:`cmd-filters`.

        Basic example::

            def combiner_pre_filter(self):
                return 'grep "ponies"'
        """
        raise NotImplementedError

    ### Defining multi-step jobs ###

    def steps(self):
        """Re-define this to make a multi-step job.

        If you don't re-define this, we'll automatically create a one-step
        job using any of :py:meth:`mapper`, :py:meth:`mapper_init`,
        :py:meth:`mapper_final`, :py:meth:`reducer_init`,
        :py:meth:`reducer_final`, and :py:meth:`reducer` that you've
        re-defined. For example::

            def steps(self):
                return [self.mr(mapper=self.transform_input,
                                reducer=self.consolidate_1),
                        self.mr(reducer_init=self.log_mapper_init,
                                reducer=self.consolidate_2)]

        :return: a list of steps constructed with :py:meth:`mr`
        """
        # Use mapper(), reducer() etc. only if they've been re-defined
        kwargs = dict((func_name, getattr(self, func_name))
                      for func_name in _JOB_STEP_FUNC_PARAMS
                      if (getattr(self, func_name).im_func is not getattr(
                          MRJob, func_name).im_func))

        # MRStep takes commands as strings, but the user defines them in the
        # class as functions that return strings, so call the functions.
        updates = {}
        for k, v in kwargs.iteritems():
            if k.endswith('_cmd'):
                updates[k] = v()

        kwargs.update(updates)

        return [MRStep(**kwargs)]

    @classmethod
    def mr(cls, *args, **kwargs):
        """A deprecated wrapper for :py:class:`~mrjob.step.MRStep`, plus
        a little logic to support deprecated use of positional arguments.
        """
        if args:
            log.warning('Using positional arguments to MRJob.mr() is'
                        ' deprecated and will be removed in v0.5.0')
        else:
            log.warning('mr() is deprecated and will be removed in v0.6.0.'
                        ' Use mrjob.step.MRStep directly instead.')

        if len(args) > 0:
            kwargs['mapper'] = args[0]

        if len(args) > 1:
            kwargs['reducer'] = args[1]

        if len(args) > 2:
            raise ValueError('mr() can take at most two positional arguments.')

        return MRStep(**kwargs)

    @classmethod
    def jar(cls, *args, **kwargs):
        """Alias for :py:class:`~mrjob.step.JarStep`.

        .. deprecated:: 0.4.2
        """
        log.warning('MRJob.jar() is deprecated and will be removed in'
                    ' v0.5.0. Use mrjob.step.JarStep directly.')
        return JarStep(*args, **kwargs)

    def increment_counter(self, group, counter, amount=1):
        """Increment a counter in Hadoop streaming by printing to stderr. If
        the type of either **group** or **counter** is ``unicode``, then the
        counter will be written as unicode. Otherwise, the counter will be
        written as ASCII. Although writing non-ASCII will succeed, the
        resulting counter names may not be displayed correctly at the end of
        the job.

        :type group: str
        :param group: counter group
        :type counter: str
        :param counter: description of the counter
        :type amount: int
        :param amount: how much to increment the counter by

        Commas in ``counter`` or ``group`` will be automatically replaced
        with semicolons (commas confuse Hadoop streaming).
        """
        # don't allow people to pass in floats
        if not isinstance(amount, (int, long)):
            raise TypeError('amount must be an integer, not %r' % (amount, ))

        # Extra commas screw up hadoop and there's no way to escape them. So
        # replace them with the next best thing: semicolons!
        #
        # cast to str() because sometimes people pass in exceptions or whatever
        #
        # The relevant Hadoop code is incrCounter(), here:
        # http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java?view=markup  # noqa
        if isinstance(group, unicode) or isinstance(counter, unicode):
            group = unicode(group).replace(',', ';')
            counter = unicode(counter).replace(',', ';')
            stderr = codecs.getwriter('utf-8')(self.stderr)
        else:
            group = str(group).replace(',', ';')
            counter = str(counter).replace(',', ';')
            stderr = self.stderr

        stderr.write(u'reporter:counter:%s,%s,%d\n' % (group, counter, amount))
        stderr.flush()

    def set_status(self, msg):
        """Set the job status in hadoop streaming by printing to stderr.

        This is also a good way of doing a keepalive for a job that goes a
        long time between outputs; Hadoop streaming usually times out jobs
        that give no output for longer than 10 minutes.

        If the type of **msg** is ``unicode``, then the message will be written
        as unicode. Otherwise, it will be written as ASCII.
        """
        if isinstance(msg, unicode):
            status = u'reporter:status:%s\n' % (msg, )
            stderr = codecs.getwriter('utf-8')(self.stderr)
        else:
            status = 'reporter:status:%s\n' % (msg, )
            stderr = self.stderr
        stderr.write(status)
        stderr.flush()

    ### Running the job ###

    @classmethod
    def run(cls):
        """Entry point for running job from the command-line.

        This is also the entry point when a mapper or reducer is run
        by Hadoop Streaming.

        Does one of:

        * Print step information (:option:`--steps`). See :py:meth:`show_steps`
        * Run a mapper (:option:`--mapper`). See :py:meth:`run_mapper`
        * Run a combiner (:option:`--combiner`). See :py:meth:`run_combiner`
        * Run a reducer (:option:`--reducer`). See :py:meth:`run_reducer`
        * Run the entire job. See :py:meth:`run_job`
        """
        # load options from the command line
        mr_job = cls(args=_READ_ARGS_FROM_SYS_ARGV)
        mr_job.execute()

    def execute(self):
        # MRJob does Hadoop Streaming stuff, or defers to Launcher (superclass)
        # if not otherwise instructed
        if self.options.show_steps:
            self.show_steps()

        elif self.options.run_mapper:
            self.run_mapper(self.options.step_num)

        elif self.options.run_combiner:
            self.run_combiner(self.options.step_num)

        elif self.options.run_reducer:
            self.run_reducer(self.options.step_num)

        else:
            super(MRJob, self).execute()

    def make_runner(self):
        """Make a runner based on command-line arguments, so we can
        launch this job on EMR, on Hadoop, or locally.

        :rtype: :py:class:`mrjob.runner.MRJobRunner`
        """
        bad_words = ('--steps', '--mapper', '--reducer', '--combiner',
                     '--step-num')
        for w in bad_words:
            if w in sys.argv:
                raise UsageError("make_runner() was called with %s. This"
                                 " probably means you tried to use it from"
                                 " __main__, which doesn't work." % w)

        # support inline runner when running from the MRJob itself
        from mrjob.inline import InlineMRJobRunner

        if self.options.runner == 'inline':
            return InlineMRJobRunner(mrjob_cls=self.__class__,
                                     **self.inline_job_runner_kwargs())

        return super(MRJob, self).make_runner()

    def run_mapper(self, step_num=0):
        """Run the mapper and final mapper action for the given step.

        :type step_num: int
        :param step_num: which step to run (0-indexed)

        If we encounter a line that can't be decoded by our input protocol,
        or a tuple that can't be encoded by our output protocol, we'll
        increment a counter rather than raising an exception. If
        --strict-protocols is set, then an exception is raised

        Called from :py:meth:`run`. You'd probably only want to call this
        directly from automated tests.
        """
        steps = self.steps()
        if not 0 <= step_num < len(steps):
            raise ValueError('Out-of-range step: %d' % step_num)
        step = steps[step_num]
        mapper = step['mapper']
        mapper_init = step['mapper_init']
        mapper_final = step['mapper_final']

        # pick input and output protocol
        read_lines, write_line = self._wrap_protocols(step_num, 'mapper')

        if mapper_init:
            for out_key, out_value in mapper_init() or ():
                write_line(out_key, out_value)

        # run the mapper on each line
        for key, value in read_lines():
            for out_key, out_value in mapper(key, value) or ():
                write_line(out_key, out_value)

        if mapper_final:
            for out_key, out_value in mapper_final() or ():
                write_line(out_key, out_value)

    def run_reducer(self, step_num=0):
        """Run the reducer for the given step.

        :type step_num: int
        :param step_num: which step to run (0-indexed)

        If we encounter a line that can't be decoded by our input protocol,
        or a tuple that can't be encoded by our output protocol, we'll
        increment a counter rather than raising an exception. If
        --strict-protocols is set, then an exception is raised

        Called from :py:meth:`run`. You'd probably only want to call this
        directly from automated tests.
        """
        steps = self.steps()
        if not 0 <= step_num < len(steps):
            raise ValueError('Out-of-range step: %d' % step_num)
        step = steps[step_num]
        reducer = step['reducer']
        reducer_init = step['reducer_init']
        reducer_final = step['reducer_final']
        if reducer is None:
            raise ValueError('No reducer in step %d' % step_num)

        # pick input and output protocol
        read_lines, write_line = self._wrap_protocols(step_num, 'reducer')

        if reducer_init:
            for out_key, out_value in reducer_init() or ():
                write_line(out_key, out_value)

        # group all values of the same key together, and pass to the reducer
        #
        # be careful to use generators for everything, to allow for
        # very large groupings of values
        for key, kv_pairs in itertools.groupby(read_lines(),
                                               key=lambda (k, v): k):
            values = (v for k, v in kv_pairs)
            for out_key, out_value in reducer(key, values) or ():
                write_line(out_key, out_value)

        if reducer_final:
            for out_key, out_value in reducer_final() or ():
                write_line(out_key, out_value)

    def run_combiner(self, step_num=0):
        """Run the combiner for the given step.

        :type step_num: int
        :param step_num: which step to run (0-indexed)

        If we encounter a line that can't be decoded by our input protocol,
        or a tuple that can't be encoded by our output protocol, we'll
        increment a counter rather than raising an exception. If
        --strict-protocols is set, then an exception is raised

        Called from :py:meth:`run`. You'd probably only want to call this
        directly from automated tests.
        """
        steps = self.steps()
        if not 0 <= step_num < len(steps):
            raise ValueError('Out-of-range step: %d' % step_num)
        step = steps[step_num]
        combiner = step['combiner']
        combiner_init = step['combiner_init']
        combiner_final = step['combiner_final']
        if combiner is None:
            raise ValueError('No combiner in step %d' % step_num)

        # pick input and output protocol
        read_lines, write_line = self._wrap_protocols(step_num, 'combiner')

        if combiner_init:
            for out_key, out_value in combiner_init() or ():
                write_line(out_key, out_value)

        # group all values of the same key together, and pass to the combiner
        #
        # be careful to use generators for everything, to allow for
        # very large groupings of values
        for key, kv_pairs in itertools.groupby(read_lines(),
                                               key=lambda (k, v): k):
            values = (v for k, v in kv_pairs)
            for out_key, out_value in combiner(key, values) or ():
                write_line(out_key, out_value)

        if combiner_final:
            for out_key, out_value in combiner_final() or ():
                write_line(out_key, out_value)

    def show_steps(self):
        """Print information about how many steps there are, and whether
        they contain a mapper or reducer. Job runners (see
        :doc:`guides/runners`) use this to determine how Hadoop should call
        this script.

        Called from :py:meth:`run`. You'd probably only want to call this
        directly from automated tests.

        We currently output something like ``MR M R``, but expect this to
        change!
        """
        print >> self.stdout, json.dumps(self._steps_desc())

    def _steps_desc(self):
        step_descs = []
        for step_num, step in enumerate(self.steps()):
            step_descs.append(step.description(step_num))
        return step_descs

    @classmethod
    def mr_job_script(cls):
        """Path of this script. This returns the file containing
        this class."""
        return inspect.getsourcefile(cls)

    ### Other useful utilities ###

    def _read_input(self):
        """Read from stdin, or one more files, or directories.
        Yield one line at time.

        - Resolve globs (``foo_*.gz``).
        - Decompress ``.gz`` and ``.bz2`` files.
        - If path is ``-``, read from STDIN.
        - Recursively read all files in a directory
        """
        paths = self.args or ['-']
        for path in paths:
            for line in read_input(path, stdin=self.stdin):
                yield line

    def _wrap_protocols(self, step_num, step_type):
        """Pick the protocol classes to use for reading and writing
        for the given step, and wrap them so that bad input and output
        trigger a counter rather than an exception unless --strict-protocols
        is set.

        Returns a tuple of ``(read_lines, write_line)``

        ``read_lines()`` is a function that reads lines from input, decodes
            them, and yields key, value pairs.
        ``write_line()`` is a function that takes key and value as args,
            encodes them, and writes a line to output.

        :param step_num: which step to run (e.g. 0)
        :param step_type: ``'mapper'``, ``'reducer'``, or ``'combiner'`` from
                          :py:mod:`mrjob.step`
        """
        read, write = self.pick_protocols(step_num, step_type)

        def read_lines():
            for line in self._read_input():
                try:
                    key, value = read(line.rstrip('\r\n'))
                    yield key, value
                except Exception as e:
                    if self.options.strict_protocols:
                        raise
                    else:
                        self.increment_counter('Undecodable input',
                                               e.__class__.__name__)

        def write_line(key, value):
            try:
                print >> self.stdout, write(key, value)
            except Exception as e:
                if self.options.strict_protocols:
                    raise
                else:
                    self.increment_counter('Unencodable output',
                                           e.__class__.__name__)

        return read_lines, write_line

    def _step_key(self, step_num, step_type):
        return '%d-%s' % (step_num, step_type)

    def _script_step_mapping(self, steps_desc):
        """Return a mapping of ``self._step_key(step_num, step_type)`` ->
        (place in sort order of all *script* steps), for the purposes of
        choosing which protocols to use for input and output.

        Non-script steps do not appear in the mapping.
        """
        mapping = {}
        script_step_num = 0
        for i, step in enumerate(steps_desc):
            if 'mapper' in step:
                if step['mapper']['type'] == 'script':
                    k = self._step_key(i, 'mapper')
                    mapping[k] = script_step_num
                    script_step_num += 1
            if 'reducer' in step:
                if step['reducer']['type'] == 'script':
                    k = self._step_key(i, 'reducer')
                    mapping[k] = script_step_num
                    script_step_num += 1

        return mapping

    def _mapper_output_protocol(self, step_num, step_map):
        map_key = self._step_key(step_num, 'mapper')
        if map_key in step_map:
            if step_map[map_key] >= (len(step_map) - 1):
                return self.output_protocol()
            else:
                return self.internal_protocol()
        else:
            # mapper is not a script substep, so protocols don't apply at all
            return RawValueProtocol()

    def _pick_protocol_instances(self, step_num, step_type):
        steps_desc = self._steps_desc()

        step_map = self._script_step_mapping(steps_desc)

        # pick input protocol

        if step_type == 'combiner':
            # Combiners read and write the mapper's output protocol because
            # they have to be able to run 0-inf times without changing the
            # format of the data.
            # Combiners for non-script substeps can't use protocols, so this
            # function will just give us RawValueProtocol() in that case.
            previous_mapper_output = self._mapper_output_protocol(
                step_num, step_map)
            return previous_mapper_output, previous_mapper_output
        else:
            step_key = self._step_key(step_num, step_type)

            if step_key not in step_map:
                # It's unlikely that we will encounter this logic in real life,
                # but if asked what the protocol of a non-script step is, we
                # should just say RawValueProtocol because we have no idea what
                # the jars or commands are doing with our precious data.
                # If --strict-protocols, though, we won't stand for these
                # shenanigans!
                if self.options.strict_protocols:
                    raise ValueError(
                        "Can't pick a protocol for a non-script step")
                else:
                    p = RawValueProtocol()
                    return p, p

            real_num = step_map[step_key]
            if real_num == (len(step_map) - 1):
                write = self.output_protocol()
            else:
                write = self.internal_protocol()

            if real_num == 0:
                read = self.input_protocol()
            else:
                read = self.internal_protocol()
            return read, write

    def pick_protocols(self, step_num, step_type):
        """Pick the protocol classes to use for reading and writing for the
        given step.

        :type step_num: int
        :param step_num: which step to run (e.g. ``0`` for the first step)
        :type step_type: str
        :param step_type: one of `'mapper'`, `'combiner'`, or `'reducer'`
        :return: (read_function, write_function)

        By default, we use one protocol for reading input, one
        internal protocol for communication between steps, and one
        protocol for final output (which is usually the same as the
        internal protocol). Protocols can be controlled by setting
        :py:attr:`INPUT_PROTOCOL`, :py:attr:`INTERNAL_PROTOCOL`, and
        :py:attr:`OUTPUT_PROTOCOL`.

        Re-define this if you need fine control over which protocols
        are used by which steps.
        """

        # wrapping functionality like this makes testing much simpler
        p_read, p_write = self._pick_protocol_instances(step_num, step_type)

        return p_read.read, p_write.write

    ### Command-line arguments ###

    def configure_options(self):
        super(MRJob, self).configure_options()

        # To run mappers or reducers
        self.mux_opt_group = OptionGroup(self.option_parser,
                                         'Running specific parts of the job')
        self.option_parser.add_option_group(self.mux_opt_group)

        self.mux_opt_group.add_option('--mapper',
                                      dest='run_mapper',
                                      action='store_true',
                                      default=False,
                                      help='run a mapper')

        self.mux_opt_group.add_option('--combiner',
                                      dest='run_combiner',
                                      action='store_true',
                                      default=False,
                                      help='run a combiner')

        self.mux_opt_group.add_option('--reducer',
                                      dest='run_reducer',
                                      action='store_true',
                                      default=False,
                                      help='run a reducer')

        self.mux_opt_group.add_option(
            '--step-num',
            dest='step_num',
            type='int',
            default=0,
            help='which step to execute (default is 0)')

        # To describe the steps
        self.mux_opt_group.add_option(
            '--steps',
            dest='show_steps',
            action='store_true',
            default=False,
            help=('print the mappers, combiners, and reducers that this job'
                  ' defines'))

    def all_option_groups(self):
        return super(MRJob, self).all_option_groups() + (self.mux_opt_group, )

    def is_mapper_or_reducer(self):
        """True if this is a mapper/reducer.

        This is mostly useful inside :py:meth:`load_options`, to disable
        loading options when we aren't running inside Hadoop Streaming.
        """
        return (self.options.run_mapper or self.options.run_combiner
                or self.options.run_reducer)

    def _process_args(self, args):
        """mrjob.launch takes the first arg as the script path, but mrjob.job
        uses all args as input files. This method determines the behavior:
        MRJob uses all args as input files.
        """
        self.args = args

    def _help_main(self):
        self.option_parser.option_groups = [
            self.mux_opt_group,
            self.proto_opt_group,
        ]
        self.option_parser.print_help()
        sys.exit(0)

    ### protocols ###

    def input_protocol(self):
        """Instance of the protocol to use to convert input lines to Python
        objects. Default behavior is to return an instance of
        :py:attr:`INPUT_PROTOCOL`.
        """
        if not isinstance(self.INPUT_PROTOCOL, type):
            log.warn('INPUT_PROTOCOL should be a class, not %s' %
                     self.INPUT_PROTOCOL)
        return self.INPUT_PROTOCOL()

    def internal_protocol(self):
        """Instance of the protocol to use to communicate between steps.
        Default behavior is to return an instance of
        :py:attr:`INTERNAL_PROTOCOL`.
        """
        if not isinstance(self.INTERNAL_PROTOCOL, type):
            log.warn('INTERNAL_PROTOCOL should be a class, not %s' %
                     self.INTERNAL_PROTOCOL)
        return self.INTERNAL_PROTOCOL()

    def output_protocol(self):
        """Instance of the protocol to use to convert Python objects to output
        lines. Default behavior is to return an instance of
        :py:attr:`OUTPUT_PROTOCOL`.
        """
        if not isinstance(self.OUTPUT_PROTOCOL, type):
            log.warn('OUTPUT_PROTOCOL should be a class, not %s' %
                     self.OUTPUT_PROTOCOL)
        return self.OUTPUT_PROTOCOL()

    #: Protocol for reading input to the first mapper in your job.
    #: Default: :py:class:`RawValueProtocol`.
    #:
    #: For example you know your input data were in JSON format, you could
    #: set::
    #:
    #:     INPUT_PROTOCOL = JSONValueProtocol
    #:
    #: in your class, and your initial mapper would receive decoded JSONs
    #: rather than strings.
    #:
    #: See :py:data:`mrjob.protocol` for the full list of protocols.
    INPUT_PROTOCOL = RawValueProtocol

    #: Protocol for communication between steps and final output.
    #: Default: :py:class:`JSONProtocol`.
    #:
    #: For example if your step output weren't JSON-encodable, you could set::
    #:
    #:     INTERNAL_PROTOCOL = PickleProtocol
    #:
    #: and step output would be encoded as string-escaped pickles.
    #:
    #: See :py:data:`mrjob.protocol` for the full list of protocols.
    INTERNAL_PROTOCOL = JSONProtocol

    #: Protocol to use for writing output. Default: :py:class:`JSONProtocol`.
    #:
    #: For example, if you wanted the final output in repr, you could set::
    #:
    #:     OUTPUT_PROTOCOL = ReprProtocol
    #:
    #: See :py:data:`mrjob.protocol` for the full list of protocols.
    OUTPUT_PROTOCOL = JSONProtocol

    def parse_output_line(self, line):
        """
        Parse a line from the final output of this MRJob into
        ``(key, value)``. Used extensively in tests like this::

            runner.run()
            for line in runner.stream_output():
                key, value = mr_job.parse_output_line(line)
        """
        return self.output_protocol().read(line)

    ### Hadoop Input/Output Formats ###

    #: Optional name of an optional Hadoop ``InputFormat`` class, e.g.
    #: ``'org.apache.hadoop.mapred.lib.NLineInputFormat'``.
    #:
    #: Passed to Hadoop with the *first* step of this job with the
    #: ``-inputformat`` option.
    #:
    #: If you require more sophisticated behavior, try
    #: :py:meth:`hadoop_input_format` or the *hadoop_input_format* argument to
    #: :py:meth:`mrjob.runner.MRJobRunner.__init__`.
    HADOOP_INPUT_FORMAT = None

    def hadoop_input_format(self):
        """Optional Hadoop ``InputFormat`` class to parse input for
        the first step of the job.

        Normally, setting :py:attr:`HADOOP_INPUT_FORMAT` is sufficient;
        redefining this method is only for when you want to get fancy.
        """
        return self.HADOOP_INPUT_FORMAT

    #: Optional name of an optional Hadoop ``OutputFormat`` class, e.g.
    #: ``'org.apache.hadoop.mapred.FileOutputFormat'``.
    #:
    #: Passed to Hadoop with the *last* step of this job with the
    #: ``-outputformat`` option.
    #:
    #: If you require more sophisticated behavior, try
    #: :py:meth:`hadoop_output_format` or the *hadoop_output_format* argument
    #: to :py:meth:`mrjob.runner.MRJobRunner.__init__`.
    HADOOP_OUTPUT_FORMAT = None

    def hadoop_output_format(self):
        """Optional Hadoop ``OutputFormat`` class to write output for
        the last step of the job.

        Normally, setting :py:attr:`HADOOP_OUTPUT_FORMAT` is sufficient;
        redefining this method is only for when you want to get fancy.
        """
        return self.HADOOP_OUTPUT_FORMAT

    ### Partitioning ###

    #: Optional Hadoop partitioner class to use to determine how mapper
    #: output should be sorted and distributed to reducers. For example:
    #: ``'org.apache.hadoop.mapred.lib.HashPartitioner'``.
    #:
    #: If you require more sophisticated behavior, try :py:meth:`partitioner`.
    PARTITIONER = None

    def partitioner(self):
        """Optional Hadoop partitioner class to use to determine how mapper
        output should be sorted and distributed to reducers.

        By default, returns whatever is passed to :option:`--partitioner`,
        or if that option isn't used, :py:attr:`PARTITIONER`, or if that
        isn't set, and :py:attr:`SORT_VALUES` is true, it's set to
        ``'org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner'``.

        You probably don't need to re-define this; it's just here for
        completeness.
        """
        return (self.options.partitioner or self.PARTITIONER
                or ('org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner'
                    if self.SORT_VALUES else None))

    ### Jobconf ###

    #: Optional jobconf arguments we should always pass to Hadoop. This
    #: is a map from property name to value. e.g.:
    #:
    #: ``{'stream.num.map.output.key.fields': '4'}``
    #:
    #: It's recommended that you only use this to hard-code things that
    #: affect the semantics of your job, and leave performance tweaks to
    #: the command line or whatever you use to launch your job.
    JOBCONF = {}

    def jobconf(self):
        """``-jobconf`` args to pass to hadoop streaming. This should be a map
        from property name to value.

        By default, this combines :option:`jobconf` options from the command
        lines with :py:attr:`JOBCONF`, with command line arguments taking
        precedence.

        If :py:attr:`SORT_VALUES` is set, we also set these jobconf values::

            stream.num.map.output.key.fields=2
            mapred.text.key.partitioner.options=k1,1

        We also blank out ``mapred.output.key.comparator.class``
        and ``mapred.text.key.comparator.options`` to prevent interference
        from :file:`mrjob.conf`.

        :py:attr:`SORT_VALUES` *can* be overridden by :py:attr:`JOBCONF`, the
        command line, and step-specific ``jobconf`` values.

        For example, if you know your values are numbers, and want to sort
        them in reverse, you could do::

            SORT_VALUES = True

            JOBCONF = {
              'mapred.output.key.comparator.class':
                  'org.apache.hadoop.mapred.lib.KeyFieldBasedComparator',
              'mapred.text.key.comparator.options': '-k1 -k2nr',
            }

        If you want to re-define this, it's strongly recommended that do
        something like this, so as not to inadvertently disable
        the :option:`jobconf` option::

            def jobconf(self):
                orig_jobconf = super(MyMRJobClass, self).jobconf()
                custom_jobconf = ...

                return mrjob.conf.combine_dicts(orig_jobconf, custom_jobconf)
        """

        # deal with various forms of bad behavior by users
        unfiltered_jobconf = combine_dicts(self.JOBCONF, self.options.jobconf)
        filtered_jobconf = {}

        def format_hadoop_version(v_float):
            if v_float >= 1.0:
                # e.g. 1.0
                return '%.1f' % v_float
            else:
                # e.g. 0.18 or 0.20
                return '%.2f' % v_float

        for key in unfiltered_jobconf:
            unfiltered_val = unfiltered_jobconf[key]
            filtered_val = unfiltered_val

            # boolean values need to be lowercased
            if isinstance(unfiltered_val, bool):
                if unfiltered_val:
                    filtered_val = 'true'
                else:
                    filtered_val = 'false'

            # hadoop_version should be a string
            elif (key == 'hadoop_version'
                  and isinstance(unfiltered_val, float)):
                log.warn('hadoop_version should be a string, not %s' %
                         unfiltered_val)
                filtered_val = format_hadoop_version(unfiltered_val)
            filtered_jobconf[key] = filtered_val

        if self.SORT_VALUES:
            filtered_jobconf = combine_dicts(_SORT_VALUES_JOBCONF,
                                             filtered_jobconf)

        return filtered_jobconf

    ### Secondary Sort ###

    #: Set this to ``True`` if you would like reducers to receive the values
    #: associated with any key in sorted order (sorted by their *encoded*
    #: value). Also known as secondary sort.
    #:
    #: This can be useful if you expect more values than you can fit in memory
    #: to be associated with one key, but you want to apply information in
    #: a small subset of these values to information in the other values.
    #: For example, you may want to convert counts to percentages, and to do
    #: this you first need to know the total count.
    #:
    #: Even though values are sorted by their encoded value, most encodings
    #: will sort strings in order. For example, you could have values like:
    #: ``['A', <total>]``, ``['B', <count_name>, <count>]``, and the value
    #: containing the total should come first regardless of what protocol
    #: you're using.
    #:
    #: See :py:meth:`jobconf()` and :py:meth:`partitioner()` for more about
    #: how this works.
    #:
    #: .. versionadded:: 0.4.1
    SORT_VALUES = None

    ### Testing ###

    def parse_counters(self, counters=None):
        """.. deprecated:: 0.4.2

        Parse the counters from the given sandboxed job's ``self.stderr``;
        superseded :py:func:`mrjob.parse.parse_mr_job_stderr`.

        This was only useful for testing individual mappers/reducers
        without a runner; normally you'd just use
        :py:meth:`runner.counters() <mrjob.runner.MRJobRunner.counters()>`.
        """
        if self.stderr == sys.stderr:
            raise AssertionError('You must call sandbox() first;'
                                 ' parse_counters() is for testing only.')

        log.warning(
            'parse_counters() is deprecated and will be removed in v0.5.0')

        stderr_results = parse_mr_job_stderr(self.stderr.getvalue(), counters)
        return stderr_results['counters']

    def parse_output(self, protocol=None):
        """.. deprecated:: 0.4.2

        Parse the output from the given sandboxed job's ``self.stdout``.

        This was only useful for testing individual mappers/reducers
        without using a runner; normally you'd just use
        :py:meth:`runner.stream_output()
        <mrjob.runner.MRJobRunner.stream_output()>`

        :type protocol: protocol
        :param protocol: A protocol instance to use. Defaults to
                         ``JSONProtocol()``.
        """
        if self.stdout == sys.stdout:
            raise AssertionError('You must call sandbox() first;'
                                 ' parse_output() is for testing only.')

        log.warning(
            'parse_output() is deprecated and will be removed in v0.5.0')

        if protocol is None:
            protocol = JSONProtocol()

        lines = StringIO(self.stdout.getvalue())
        return [protocol.read(line) for line in lines]
コード例 #50
0
def main():
    from optparse import OptionParser, OptionGroup

    # matrix_file should be a pathway x sample file.
    usage = "usage: %prog [options] <dataset>"
    parser = OptionParser(usage=usage, version="%prog 01")

    parser.add_option("",
                      "--selap",
                      dest="selap_path",
                      default=None,
                      help="Specify the path to SELAPv3.")
    parser.add_option("",
                      "--matlab",
                      dest="matlab",
                      default="matlab",
                      help="Specify the command to run matlab.")
    parser.add_option("",
                      "--python",
                      dest="python",
                      default=None,
                      help="Specify the command to run python (optional).")
    parser.add_option("",
                      "--arrayplot",
                      dest="arrayplot",
                      default=None,
                      help="Specify the command to run arrayplot.")
    parser.add_option("",
                      "--cluster",
                      dest="cluster",
                      default=None,
                      help="Specify the command to run cluster.")
    # This doesn't give as much control over exactly which python
    # version is run.
    #parser.add_option(
    #    "", "--binpath", dest="binpath", action="append", default=[],
    #    help="Add to the binary search path.")
    parser.add_option("",
                      "--libpath",
                      dest="libpath",
                      action="append",
                      default=[],
                      help="Add to the Python library search path.")
    parser.add_option("-o",
                      "--outpath",
                      dest="outpath",
                      type="string",
                      default=None,
                      help="Save files in this path.")
    parser.add_option("-z",
                      "--archive",
                      dest="archive",
                      action="store_true",
                      default=None,
                      help="Archive the raw output.  Helpful for GenePattern.")

    group = OptionGroup(parser, "Model Parameters")
    # Higher numbers have more groups.
    # Range from 0 and lower.
    group.add_option(
        "-p",
        "--penalty",
        dest="penalty",
        default="-33",
        help="Penalty for tuning number of subgroups (default -33).")
    group.add_option(
        "-m",
        "--model",
        dest="model_file",
        default=None,
        help="Specify a file that contains a pre-built subtype model.")
    parser.add_option_group(group)

    # Parse the input arguments.
    options, args = parser.parse_args()
    if len(args) != 1:
        parser.error("Please specify a file with pathway probabilities.")
    filename, = args
    if not os.path.exists(filename):
        parser.error("I could not find file %s." % filename)

    if options.penalty.find(".") >= 0:
        parser.error("Penalties should be integers.")

    if options.libpath:
        sys.path = options.libpath + sys.path
    # Import after the library path is set.
    import arrayio
    from genomicode import genepattern
    from genomicode import archive
    from genomicode import parselib

    genepattern.fix_environ_path()

    # Maximum number of models that someone can create at a time.
    MAX_MODELS = 50

    # Allow people to supply more than one penalty.  Parse into a list
    # of ranges.  Penalties must be integers.
    penalties = []
    for (start, end) in parselib.parse_ranges(options.penalty):
        penalties.extend(range(start, end + 1))
    assert len(penalties) <= MAX_MODELS, "Too many penalties (max is %d)." % \
           MAX_MODELS
    assert penalties, "At least one penalty must be specified."
    assert not (options.model_file and len(penalties) != 1)
    for p in penalties:
        assert p <= 0, "Penalties should be negative."

    num_analyses = len(penalties)

    # Set up the files.
    file_layout = make_file_layout(options.outpath, num_analyses, penalties[0])
    init_paths(file_layout)

    # Read the matrix and convert to GCT format.
    MATRIX = arrayio.read(filename)
    MATRIX = arrayio.convert(MATRIX, to_format=arrayio.gct_format)

    # Align this matrix to the SELAP model, if it already exists.
    if options.model_file:
        MATRIX = align_dataset(MATRIX, options.model_file)
    # Write out the data set.
    write_dataset(file_layout.DATASET, MATRIX)

    for penalty in penalties:
        # Set up the files.
        file_layout = make_file_layout(options.outpath, num_analyses, penalty)
        init_paths(file_layout)

        # Make the model.
        write_selap_dataset(file_layout)
        if options.model_file:
            write_model(options.model_file, file_layout)
        else:
            make_model(options.selap_path, penalty, file_layout,
                       options.matlab)

        # Predict the subgroups.
        predict_subgroups(options.selap_path, file_layout, options.matlab)

        # Generate some files for output.
        summarize_predictions(file_layout)
        summarize_heatmap(options.python, options.arrayplot, options.cluster,
                          file_layout, options.libpath)

        # Archive the SELAP stuff, and any other big files.
        if options.archive:
            print "Archiving results."
            archive.zip_path(file_layout.SELAP, noclobber=False)
            archive.zip_path(file_layout.ATTIC, noclobber=False)

        if num_analyses <= 1:
            continue
        # Now do some cleanup if multiple analyses were requested.

        # If there were multiple penalties specified, make a copy of
        # some files for convenience.
        fl = file_layout
        files_to_copy = [
            (fl.PREDICTIONS_PCL, fl.GLOBAL_PREDICTIONS_PCL),
            (fl.PREDICTIONS_PNG, fl.GLOBAL_PREDICTIONS_PNG),
        ]
        for src, dst in files_to_copy:
            assert os.path.exists(src)
            os.system("cp -p '%s' '%s'" % (src, dst))

        if options.archive:
            archive.zip_path(file_layout.ANALYSIS)
        sys.stdout.flush()

    if num_analyses > 1:
        summarize_subgroups(options.outpath, num_analyses, penalties)

    print "Done."
コード例 #51
0
ファイル: cli.py プロジェクト: WooodHead/plugins-1
    def __init__(self):
        OptionParser.__init__(self, version="%%prog %s" % __version__)

        group = OptionGroup(self, "Network Options")
        group.add_option(
            "-H",
            "--host",
            dest="host",
            default=config.get('gntp', 'hostname'),
            help=
            "Specify a hostname to which to send a remote notification. [%default]"
        )
        group.add_option("--port",
                         dest="port",
                         default=config.getint('gntp', 'port'),
                         type="int",
                         help="port to listen on [%default]")
        group.add_option("-P",
                         "--password",
                         dest='password',
                         default=config.get('gntp', 'password'),
                         help="Network password")
        self.add_option_group(group)

        group = OptionGroup(self, "Notification Options")
        group.add_option("-n",
                         "--name",
                         dest="app",
                         default='Python GNTP Test Client',
                         help="Set the name of the application [%default]")
        group.add_option("-s",
                         "--sticky",
                         dest='sticky',
                         default=False,
                         action="store_true",
                         help="Make the notification sticky [%default]")
        group.add_option("--image",
                         dest="icon",
                         default=None,
                         help="Icon for notification (URL or /path/to/file)")
        group.add_option("-m",
                         "--message",
                         dest="message",
                         default=None,
                         help="Sets the message instead of using stdin")
        group.add_option("-p",
                         "--priority",
                         dest="priority",
                         default=0,
                         type="int",
                         help="-2 to 2 [%default]")
        group.add_option("-d",
                         "--identifier",
                         dest="identifier",
                         help="Identifier for coalescing")
        group.add_option("-t",
                         "--title",
                         dest="title",
                         default=None,
                         help="Set the title of the notification [%default]")
        group.add_option("-N",
                         "--notification",
                         dest="name",
                         default='Notification',
                         help="Set the notification name [%default]")
        group.add_option("--callback", dest="callback", help="URL callback")
        self.add_option_group(group)

        # Extra Options
        self.add_option('-v',
                        '--verbose',
                        dest='verbose',
                        default=0,
                        action='count',
                        help="Verbosity levels")
コード例 #52
0
    # python gaussianize.py test_data.csv
    import csv
    import sys, os
    import traceback
    from optparse import OptionParser, OptionGroup

    parser = OptionParser(
        usage="usage: %prog [options] data_file.csv \n"
        "It is assumed that the first row and first column of the data CSV file are labels.\n"
        "Use options to indicate otherwise.")
    group = OptionGroup(parser, "Input Data Format Options")
    group.add_option(
        "-c",
        "--no_column_names",
        action="store_true",
        dest="nc",
        default=False,
        help="We assume the top row is variable names for each column. "
        "This flag says that data starts on the first row and gives a "
        "default numbering scheme to the variables (1,2,3...).")
    group.add_option(
        "-r",
        "--no_row_names",
        action="store_true",
        dest="nr",
        default=False,
        help="We assume the first column is a label or index for each sample. "
        "This flag says that data starts on the first column.")
    group.add_option(
        "-d",
        "--delimiter",
コード例 #53
0
ファイル: pyevolve_graph.py プロジェクト: ati-ozgur/Pyevolve
                      default="jet")

    parser.add_option(
        "-m",
        "--minimize",
        action="store_true",
        help="Sets the 'Minimize' mode, default is the Maximize mode. "
        "This option makes sense if you are minimizing your evaluation function.",
        dest="minimize")

    group = OptionGroup(parser, "Graph types",
                        "This is the supported graph types")

    group.add_option(
        "-0",
        action="store_true",
        help="Write all graphs to files. Graph types: 1, 2, 3, 4 and 5.",
        dest="all_graphs")

    group.add_option("-1",
                     action="store_true",
                     help="Error bars graph (raw scores).",
                     dest="errorbars_raw")
    group.add_option("-2",
                     action="store_true",
                     help="Error bars graph (fitness scores).",
                     dest="errorbars_fitness")
    group.add_option("-3",
                     action="store_true",
                     help="Max/min/avg/std. dev. graph (raw scores).",
                     dest="maxmin_raw")
コード例 #54
0
ファイル: PageRank.py プロジェクト: jialicatherine/PathLinker
def main(args):
    '''
    Main method, so this can be used on the command line
    '''

    usage = '''
PageRank.py [options] NETWORK
REQUIRED arguments:
    NETWORK - A tab-delimited file with one directed interaction per line. Each
        line should have at least 2 columns: tail, head. Edges are directed from
        tail->head. This file can optionally have a third column specifying the
        edge weight

'''
    parser = OptionParser(usage=usage)

    # General Options
    parser.add_option('-o', '--output', type='string', default='pageranks.txt', metavar='STR',\
        help='Filename to print the resulting weights. (default="pageranks.txt")')

    parser.add_option('-v', '--verbose', action='store_true', default=False,\
        help='Print statistics about each iteration and other information')

    # Random Walk Parameter Group
    group = OptionGroup(parser, 'Random Walk Options')

    group.add_option('-q', '--q-param', action='store', type='float', default=0.5,\
        help='The value of q indicates the probability that the random walker teleports back to a source node during the random walk process. (default=0.5)')

    group.add_option('-e', '--epsilon', action='store', type='float', default=0.01,\
        help='A small value used to test for relative convergence of the iteration. (default=0.01)')

    group.add_option('', '--max-iters', action='store', type='int', default=500,\
        help='Maximum number of iterations to run the PageRank algorithm. (default=500)')

    parser.add_option('', '--tele-weights', type='string', default=None, metavar='STR',\
        help='Incoming teleportation weights for each node, in a tab-separated file ' + \
        'with the form "nodename[tab]weight". If not given, uniform weights are used.')

    parser.add_option_group(group)

    # Parse the command line arguments
    (opts, args) = parser.parse_args()

    # Get the required arguments
    num_req_args = 1
    if len(args) != num_req_args:
        parser.print_help()
        sys.exit(
            '\nERROR: PageRank.py requires %d positional arguments, %d given.'
            % (num_req_args, len(args)))
    NETWORK_FILE = args[0]

    ## Read in the graph from file
    net = nx.DiGraph()

    # Read the network file
    print('\nReading the network from %s' % (NETWORK_FILE))
    infile = open(NETWORK_FILE, 'r')
    for line in infile:
        items = [x.strip() for x in line.rstrip().split('\t')]

        # Skip empty lines or those beginning with '#' comments
        if (line == '') or (line[0] == '#'):
            continue

        id1 = items[0]
        id2 = items[1]

        # If no weight is given for the edge, assign it a weight of 1.
        eWeight = 1
        if (len(items) > 2):
            eWeight = float(items[2])

        net.add_edge(id1, id2, weight=eWeight)

    ## Read teleportation weights if given
    teleProbs = {}  # Node --> weight
    if opts.tele_weights != None:

        print('\nReading incoming teleportation probabilities from %s' %
              (opts.tele_weights))
        infile = open(opts.tele_weights, 'r')

        for line in infile:
            items = [x.strip() for x in line.rstrip().split('\t')]

            # Skip empty lines or those beginning with '#' comments
            if (line == '') or (line[0] == '#'):
                continue

            node = items[0]
            weight = float(items[1])

            if not node in net:
                print(
                    "Error: Node %s from teleportation probability file is not in graph."
                    % (node))
                exit(-2)

            teleProbs[node] = weight

    ## Run PageRank
    finalProbs = pagerank(net,
                          weights=teleProbs,
                          q=opts.q_param,
                          eps=opts.epsilon,
                          maxIters=opts.max_iters,
                          verbose=opts.verbose)

    ## Print the result
    print("\nWriting results to " + opts.output)
    writePageRankWeights(finalProbs, filename=opts.output)
コード例 #55
0
        del dataset[data_element.tag]
    if data_element.tag.group == 0x0007:
        del dataset[data_element.tag]


parser = OptionParser()

required = OptionGroup(parser, "Required parameters",
                       "These parameters are required")
optional = OptionGroup(parser, "Optional parameters",
                       "These parameters are optional")

required.add_option(
    "-i",
    "--input-directory",
    action="store",
    type="string",
    dest="inDir",
    help=
    "Directory containing subdirectories of dicom files, organized by series")
optional.add_option("-o",
                    "--output-directory",
                    action="store",
                    type="string",
                    dest="outDir",
                    help="Output directory")
optional.add_option("-r",
                    "--rename",
                    action="store_true",
                    default=False,
                    help="Rename files by SOPInstanceUID [default=%default]")
optional.add_option("-v",
コード例 #56
0
def get_option_parser():
    parser = CutadaptOptionParser(usage=__doc__, version=__version__)

    parser.add_option("--debug",
                      action='store_true',
                      default=False,
                      help="Print debugging information.")
    parser.add_option(
        "-f",
        "--format",
        help="Input file format ('fasta' or 'fastq'). Default: auto-detect.")
    parser.add_option(
        '-j',
        '--cores',
        type=int,
        default=1,
        help=
        'Number of CPU cores to use. Use 0 to auto-detect. Default: %default')

    # Hidden options
    parser.add_option(
        "--gc-content",
        type=float,
        default=50,  # it's a percentage
        help=SUPPRESS_HELP)
    parser.add_option(
        "--buffer-size", type=int, default=4000000, help=SUPPRESS_HELP
    )  # buffer size for the reader process when running in parallel

    group = OptionGroup(
        parser,
        "Finding adapters",
        description="Parameters -a, -g, -b specify adapters to be removed from "
        "each read (or from the first read in a pair if data is paired). "
        "If specified multiple times, only the best matching adapter is "
        "trimmed (but see the --times option). When the special notation "
        "'file:FILE' is used, adapter sequences are read from the given "
        "FASTA file.")
    group.add_option(
        "-a",
        "--adapter",
        action="append",
        default=[],
        metavar="ADAPTER",
        dest="adapters",
        help="Sequence of an adapter ligated to the 3' end (paired data: of the "
        "first read). The adapter and subsequent bases are trimmed. If a "
        "'$' character is appended ('anchoring'), the adapter is only "
        "found if it is a suffix of the read.")
    group.add_option(
        "-g",
        "--front",
        action="append",
        default=[],
        metavar="ADAPTER",
        help="Sequence of an adapter ligated to the 5' end (paired data: of the "
        "first read). The adapter and any preceding bases are trimmed. "
        "Partial matches at the 5' end are allowed. If a '^' character is "
        "prepended ('anchoring'), the adapter is only found if it is a "
        "prefix of the read.")
    group.add_option(
        "-b",
        "--anywhere",
        action="append",
        default=[],
        metavar="ADAPTER",
        help="Sequence of an adapter that may be ligated to the 5' or 3' end "
        "(paired data: of the first read). Both types of matches as "
        "described under -a und -g are allowed. If the first base of the "
        "read is part of the match, the behavior is as with -g, otherwise "
        "as with -a. This option is mostly for rescuing failed library "
        "preparations - do not use if you know which end your adapter was "
        "ligated to!")
    group.add_option(
        "-e",
        "--error-rate",
        type=float,
        default=0.1,
        metavar="RATE",
        help="Maximum allowed error rate as value between 0 and 1 (no. of "
        "errors divided by length of matching region). Default: %default (=10%)"
    )
    group.add_option("--no-indels",
                     action='store_false',
                     dest='indels',
                     default=True,
                     help="Allow only mismatches in alignments. "
                     "Default: allow both mismatches and indels")
    group.add_option(
        "-n",
        "--times",
        type=int,
        metavar="COUNT",
        default=1,
        help="Remove up to COUNT adapters from each read. Default: %default")
    group.add_option(
        "-O",
        "--overlap",
        type=int,
        metavar="MINLENGTH",
        default=3,
        help="Require MINLENGTH overlap between read and adapter for an adapter "
        "to be found. Default: %default")
    group.add_option(
        "--match-read-wildcards",
        action="store_true",
        default=False,
        help="Interpret IUPAC wildcards in reads. Default: %default")
    group.add_option("-N",
                     "--no-match-adapter-wildcards",
                     action="store_false",
                     default=True,
                     dest='match_adapter_wildcards',
                     help="Do not interpret IUPAC wildcards in adapters.")
    group.add_option(
        "--action",
        choices=('trim', 'mask', 'none'),
        default='trim',
        help="What to do with found adapters: 'trim', 'mask' or 'none'. "
        "mask: replace with 'N' characters; "
        "none: leave unchanged (useful with "
        "--discard-untrimmed). Default: trim")
    group.add_option("--no-trim",
                     dest='action',
                     action='store_const',
                     const='none',
                     help=SUPPRESS_HELP)  # Deprecated, use --action=none
    group.add_option("--mask-adapter",
                     dest='action',
                     action='store_const',
                     const='mask',
                     help=SUPPRESS_HELP)  # Deprecated, use --action=mask
    parser.add_option_group(group)

    group = OptionGroup(parser, "Additional read modifications")
    group.add_option(
        "-u",
        "--cut",
        action='append',
        default=[],
        type=int,
        metavar="LENGTH",
        help="Remove bases from each read (first read only if paired). "
        "If LENGTH is positive, remove bases from the beginning. "
        "If LENGTH is negative, remove bases from the end. "
        "Can be used twice if LENGTHs have different signs. "
        "This is applied *before* adapter trimming.")
    group.add_option(
        "--nextseq-trim",
        type=int,
        default=None,
        metavar="3'CUTOFF",
        help="NextSeq-specific quality trimming (each read). Trims also dark "
        "cycles appearing as high-quality G bases.")
    group.add_option(
        "-q",
        "--quality-cutoff",
        default=None,
        metavar="[5'CUTOFF,]3'CUTOFF",
        help="Trim low-quality bases from 5' and/or 3' ends of each read before "
        "adapter removal. Applied to both reads if data is paired. If one "
        "value is given, only the 3' end is trimmed. If two "
        "comma-separated cutoffs are given, the 5' end is trimmed with "
        "the first cutoff, the 3' end with the second.")
    group.add_option(
        "--quality-base",
        type=int,
        default=33,
        metavar='N',
        help="Assume that quality values in FASTQ are encoded as ascii(quality "
        "+ N). This needs to be set to 64 for some old Illumina "
        "FASTQ files. Default: %default")
    group.add_option(
        "--length",
        "-l",
        type=int,
        default=None,
        metavar="LENGTH",
        help="Shorten reads to LENGTH. Positive values remove bases at the end "
        "while negative ones remove bases at the beginning. This and the following modifications "
        "are applied after adapter trimming.")
    group.add_option("--trim-n",
                     action='store_true',
                     default=False,
                     help="Trim N's on ends of reads.")
    group.add_option(
        "--length-tag",
        metavar="TAG",
        help="Search for TAG followed by a decimal number in the description "
        "field of the read. Replace the decimal number with the correct "
        "length of the trimmed read. For example, use --length-tag 'length=' "
        "to correct fields like 'length=123'.")
    group.add_option(
        "--strip-suffix",
        action='append',
        default=[],
        help=
        "Remove this suffix from read names if present. Can be given multiple times."
    )
    group.add_option(
        "-x",
        "--prefix",
        default='',
        help=
        "Add this prefix to read names. Use {name} to insert the name of the matching adapter."
    )
    group.add_option(
        "-y",
        "--suffix",
        default='',
        help="Add this suffix to read names; can also include {name}")
    parser.add_option_group(group)

    group = OptionGroup(
        parser,
        "Filtering of processed reads",
        description="Filters are applied after above read modifications. "
        "Paired-end reads are always discarded pairwise (see also "
        "--pair-filter).")
    group.add_option("-m",
                     "--minimum-length",
                     default=None,
                     metavar="LEN[:LEN2]",
                     help="Discard reads shorter than LEN. Default: 0")
    group.add_option("-M",
                     "--maximum-length",
                     default=None,
                     metavar="LEN[:LEN2]",
                     help="Discard reads longer than LEN. Default: no limit")
    group.add_option(
        "--max-n",
        type=float,
        default=None,
        metavar="COUNT",
        help=
        "Discard reads with more than COUNT 'N' bases. If COUNT is a number "
        "between 0 and 1, it is interpreted as a fraction of the read length.")
    group.add_option(
        "--discard-trimmed",
        "--discard",
        action='store_true',
        default=False,
        help="Discard reads that contain an adapter. Also use -O to avoid "
        "discarding too many randomly matching reads!")
    group.add_option("--discard-untrimmed",
                     "--trimmed-only",
                     action='store_true',
                     default=False,
                     help="Discard reads that do not contain an adapter.")
    group.add_option(
        "--discard-casava",
        action='store_true',
        default=False,
        help=
        "Discard reads that did not pass CASAVA filtering (header has :Y:).")
    group.add_option("--zero-cap",
                     "-z",
                     action='store_true',
                     default=False,
                     help="Change negative quality values to zero.")
    parser.add_option_group(group)

    group = OptionGroup(parser, "Output")
    group.add_option("--quiet",
                     default=False,
                     action='store_true',
                     help="Print only error messages.")
    group.add_option(
        "--report",
        choices=('full', 'minimal'),
        default=None,
        help="Which type of report to print: 'full' or 'minimal'. Default: full"
    )
    group.add_option(
        "-o",
        "--output",
        metavar="FILE",
        help="Write trimmed reads to FILE. FASTQ or FASTA format is chosen "
        "depending on input. The summary report is sent to standard output. "
        "Use '{name}' in FILE to demultiplex reads into multiple "
        "files. Default: write to standard output")
    group.add_option(
        "--info-file",
        metavar="FILE",
        help=
        "Write information about each read and its adapter matches into FILE. "
        "See the documentation for the file format.")
    group.add_option(
        "-r",
        "--rest-file",
        metavar="FILE",
        help="When the adapter matches in the middle of a read, write the "
        "rest (after the adapter) to FILE.")
    group.add_option(
        "--wildcard-file",
        metavar="FILE",
        help="When the adapter has N wildcard bases, write adapter bases "
        "matching wildcard positions to FILE. (Inaccurate with indels.)")
    group.add_option(
        "--too-short-output",
        metavar="FILE",
        help="Write reads that are too short (according to length specified by "
        "-m) to FILE. Default: discard reads")
    group.add_option(
        "--too-long-output",
        metavar="FILE",
        help="Write reads that are too long (according to length specified by "
        "-M) to FILE. Default: discard reads")
    group.add_option(
        "--untrimmed-output",
        default=None,
        metavar="FILE",
        help="Write reads that do not contain any adapter to FILE. Default: "
        "output to same file as trimmed reads")
    parser.add_option_group(group)

    group = OptionGroup(
        parser,
        "Paired-end options",
        description="The "
        "-A/-G/-B/-U options work like their -a/-b/-g/-u counterparts, but "
        "are applied to the second read in each pair.")
    group.add_option(
        "-A",
        dest='adapters2',
        action='append',
        default=[],
        metavar='ADAPTER',
        help="3' adapter to be removed from second read in a pair.")
    group.add_option(
        "-G",
        dest='front2',
        action='append',
        default=[],
        metavar='ADAPTER',
        help="5' adapter to be removed from second read in a pair.")
    group.add_option(
        "-B",
        dest='anywhere2',
        action='append',
        default=[],
        metavar='ADAPTER',
        help="5'/3 adapter to be removed from second read in a pair.")
    group.add_option("-U",
                     dest='cut2',
                     action='append',
                     default=[],
                     type=int,
                     metavar="LENGTH",
                     help="Remove LENGTH bases from second read in a pair.")
    group.add_option("-p",
                     "--paired-output",
                     metavar="FILE",
                     help="Write second read in a pair to FILE.")
    # Setting the default for pair_filter to None allows us to find out whether
    # the option was used at all.
    group.add_option(
        "--pair-filter",
        metavar='(any|both|first)',
        default=None,
        choices=("any", "both", "first"),
        help="Which of the reads in a paired-end read have to match the "
        "filtering criterion in order for the pair to be filtered. "
        "Default: any")
    group.add_option("--interleaved",
                     action='store_true',
                     default=False,
                     help="Read and write interleaved paired-end reads.")
    group.add_option(
        "--untrimmed-paired-output",
        metavar="FILE",
        help="Write second read in a pair to this FILE when no adapter "
        "was found. Use with --untrimmed-output. Default: output "
        "to same file as trimmed reads")
    group.add_option(
        "--too-short-paired-output",
        metavar="FILE",
        default=None,
        help="Write second read in a pair to this file if pair is too short. "
        "Use also --too-short-output.")
    group.add_option(
        "--too-long-paired-output",
        metavar="FILE",
        default=None,
        help="Write second read in a pair to this file if pair is too long. "
        "Use also --too-long-output.")
    parser.add_option_group(group)

    for opt in ("--colorspace", "-c", "-d", "--double-encode", "-t",
                "--trim-primer", "--strip-f3", "--maq", "--bwa",
                "--no-zero-cap"):
        parser.add_option(opt,
                          dest='colorspace',
                          action='store_true',
                          default=False,
                          help=SUPPRESS_HELP)
    parser.set_defaults(colorspace=False)

    return parser
コード例 #57
0
ファイル: Options.py プロジェクト: rockkoca/Nuitka
    default = False,
    help    = """\
Given warnings for implicit exceptions detected at compile time.""",
)


recurse_group = OptionGroup(
    parser,
    "Control the recursion into imported modules"
)


recurse_group.add_option(
    "--recurse-stdlib",
    action  = "store_true",
    dest    = "recurse_stdlib",
    default = False,
    help    = """\
Also descend into imported modules from standard library. Defaults to off."""
)

recurse_group.add_option(
    "--recurse-none",
    action  = "store_true",
    dest    = "recurse_none",
    default = False,
    help    = """\
When --recurse-none is used, do not descend into any imported modules at all,
overrides all other recursion options. Defaults to off."""
)

recurse_group.add_option(
コード例 #58
0
                  help="Anatomical dataset. ex: -a mprage.nii.gz",
                  default='')
parser.add_option('-o',
                  "",
                  action="store_true",
                  dest='oblique',
                  help="Oblique acqusition",
                  default=False)

regropts = OptionGroup(
    parser,
    "Denoising (baseline regression) options, can specify multiple options. Requires anatomical."
)
regropts.add_option('',
                    "--rall",
                    action="store_true",
                    dest='rall',
                    help="Regress all (except white matter).",
                    default=False)
regropts.add_option('',
                    "--rmot",
                    action="store_true",
                    dest='rmot',
                    help="Regress motion.",
                    default=False)
regropts.add_option('',
                    "--rmotd",
                    action="store_true",
                    dest='rmotd',
                    help="Regress motion derivatives.",
                    default=False)
regropts.add_option('',
コード例 #59
0
        help=
        'Directory where models will be saved, defaults to same as --outdir if not specified'
    )
    parser.add_option("--params",
                      metavar="FILE",
                      default="params.pickle",
                      help="Parameters file")
    parser.add_option("--model",
                      metavar="FILE",
                      default="barchybrid.model",
                      help="Load/Save model file")

    group = OptionGroup(parser, "Experiment options")
    group.add_option("--include",
                     metavar="LIST",
                     help="List of languages by ISO code to be run \
if using UD. If not specified need to specify trainfile at least. When used in combination with \
--multiling, trains a common parser for all languages. Otherwise, train monolingual parsers for \
each")
    group.add_option("--json-isos",
                     metavar="FILE",
                     help="JSON file with treebank to ISO dictionary",
                     default="./src/utils/ud2.2_iso.json")
    group.add_option("--trainfile",
                     metavar="FILE",
                     help="Annotated CONLL(U) train file")
    group.add_option("--devfile",
                     metavar="FILE",
                     help="Annotated CONLL(U) dev file")
    group.add_option("--testfile",
                     metavar="FILE",
                     help="Annotated CONLL(U) test file")
コード例 #60
0
def main():
    # import warnings
    # warnings.filterwarnings('error')

    # parse command line options
    parser = OptionParser()
    parser.add_option(
        "--cellData",
        "-c",
        dest="cell_data",
        default=None,
        help=("The cell genotype file in VCF format or cellSNP folder with "
              "sparse matrices."))
    parser.add_option("--nDonor",
                      "-N",
                      type="int",
                      dest="n_donor",
                      default=None,
                      help=("Number of donors to demultiplex; can be larger "
                            "than provided in donor_file"))
    parser.add_option(
        "--outDir",
        "-o",
        dest="out_dir",
        default=None,
        help=("Dirtectory for output files [default: $cellFilePath/vireo]"))

    group0 = OptionGroup(parser, "Optional input files")
    group0.add_option(
        "--vartrixData",
        dest="vartrix_data",
        default=None,
        help=("The cell genotype files in vartrix outputs (three/four files, "
              "comma separated): alt.mtx,ref.mtx,barcodes.tsv,SNPs.vcf.gz. "
              "This will suppress cellData argument."))
    group0.add_option(
        "--donorFile",
        "-d",
        dest="donor_file",
        default=None,
        help=(
            "The donor genotype file in VCF format. Please filter the sample "
            "and region with bcftools -s and -R first!"))
    group0.add_option(
        "--genoTag",
        "-t",
        dest="geno_tag",
        default='PL',
        help=("The tag for donor genotype: GT, GP, PL [default: %default]"))

    group1 = OptionGroup(parser, "Optional arguments")
    group1.add_option("--noDoublet",
                      dest="no_doublet",
                      action="store_true",
                      default=False,
                      help="If use, not checking doublets.")
    group1.add_option(
        "--nInit",
        "-M",
        type="int",
        dest="n_init",
        default=50,
        help=("Number of random initializations, when GT needs to learn "
              "[default: %default]"))
    group1.add_option("--extraDonor",
                      type=int,
                      dest="n_extra_donor",
                      default=0,
                      help=("Number of extra donor in pre-cluster, when GT "
                            "needs to learn [default: %default]"))
    group1.add_option(
        "--extraDonorMode",
        dest="extra_donor_mode",
        default="distance",
        help=("Method for searching from extra donors. "
              "size: n_cell per donor; distance: GT distance between donors "
              "[default: %default]"))
    group1.add_option("--forceLearnGT",
                      dest="force_learnGT",
                      default=False,
                      action="store_true",
                      help="If use, treat donor GT as prior only.")
    group1.add_option("--ASEmode",
                      dest="ASE_mode",
                      default=False,
                      action="store_true",
                      help="If use, turn on SNP specific allelic ratio.")
    group1.add_option("--noPlot",
                      dest="no_plot",
                      default=False,
                      action="store_true",
                      help="If use, turn off plotting GT distance.")
    group1.add_option(
        "--randSeed",
        type="int",
        dest="rand_seed",
        default=None,
        help="Seed for random initialization [default: %default]")
    group1.add_option(
        "--cellRange",
        type="str",
        dest="cell_range",
        default=None,
        help="Range of cells to process, eg. 0-10000 [default: all]")
    group1.add_option("--callAmbientRNAs",
                      dest="check_ambient",
                      default=False,
                      action="store_true",
                      help=("If use, detect ambient RNAs in each cell "
                            "(under development)"))
    group1.add_option(
        "--nproc",
        "-p",
        type="int",
        dest="nproc",
        default=1,
        help=("Number of subprocesses for computing - this sacrifices memory "
              "for speedups [default: %default]"))

    parser.add_option_group(group0)
    parser.add_option_group(group1)
    (options, args) = parser.parse_args()

    if len(sys.argv[1:]) == 0:
        print("Welcome to vireoSNP v%s!\n" % (__version__))
        print("use -h or --help for help on argument.")
        sys.exit(1)

    ## out directory
    if options.out_dir is None:
        print("Warning: no outDir provided, we use $cellFilePath/vireo.")
        input_dir = os.path.abspath(options.cell_data)
        if input_dir is None and options.vartrix_data is not None:
            input_dir = os.path.abspath(options.cell_data)
        out_dir = os.path.dirname(input_dir) + "/vireo"
    elif os.path.dirname(options.out_dir) == "":
        out_dir = "./" + options.out_dir
    else:
        out_dir = options.out_dir
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    ## input data (VCF.gz or a folder with sparse matrices)
    if options.cell_data is None and options.vartrix_data is None:
        print(
            "Error: need cell data in vcf file, or cellSNP output folder, or "
            "vartrix's alt.mtx,ref.mtx,barcodes.tsv.")
        sys.exit(1)
    elif options.vartrix_data is not None:
        print("[vireo] Loading vartrix files ...")
        vartrix_files = options.vartrix_data.split(",")
        if len(vartrix_files) < 3 or len(vartrix_files) > 4:
            print("Error: vartrixData requires 3 or 4 comma separated files")
            sys.exit(1)
        elif len(vartrix_files) == 3:
            vartrix_files.append(None)

        cell_dat = read_vartrix(vartrix_files[0], vartrix_files[1],
                                vartrix_files[2], vartrix_files[3])
    elif os.path.isdir(os.path.abspath(options.cell_data)):
        print("[vireo] Loading cell folder ...")
        cell_dat = read_cellSNP(options.cell_data)
    else:
        print("[vireo] Loading cell VCF file ...")
        cell_vcf = load_VCF(options.cell_data, biallelic_only=True)
        cell_dat = read_sparse_GeneINFO(cell_vcf['GenoINFO'],
                                        keys=['AD', 'DP'])
        for _key in [
                'samples', 'variants', 'FixedINFO', 'contigs', 'comments'
        ]:
            cell_dat[_key] = cell_vcf[_key]

    ## subset input cell data if necessary
    if options.cell_range is not None:
        cellRange = options.cell_range.split("-")
        cellRange[0] = int(cellRange[0])
        cellRange[1] = int(cellRange[1])
        cell_dat['AD'] = cell_dat['AD'][:, cellRange[0]:cellRange[1]]
        cell_dat['DP'] = cell_dat['DP'][:, cellRange[0]:cellRange[1]]
        cell_dat['samples'] = cell_dat['samples'][cellRange[0]:cellRange[1]]

    ## input donor genotype
    n_donor = options.n_donor
    if options.donor_file is not None:
        if "variants" not in cell_dat.keys():
            print(
                "No variants information is loaded, please provide base.vcf.gz"
            )
            sys.exit(1)

        print("[vireo] Loading donor VCF file ...")
        donor_vcf = load_VCF(options.donor_file,
                             biallelic_only=True,
                             sparse=False,
                             format_list=[options.geno_tag])

        if (donor_vcf['n_SNP_tagged'][0] <
            (0.1 * len(donor_vcf['GenoINFO'][options.geno_tag]))):
            print("[vireo] No " + options.geno_tag + " tag in donor genotype; "
                  "please try another tag for genotype, e.g., GT")
            print("        %s" % options.donor_file)
            sys.exit(1)

        cell_dat, donor_vcf = match_donor_VCF(cell_dat, donor_vcf)
        donor_GPb = parse_donor_GPb(donor_vcf['GenoINFO'][options.geno_tag],
                                    options.geno_tag)

        if n_donor is None or n_donor == donor_GPb.shape[1]:
            n_donor = donor_GPb.shape[1]
            donor_names = donor_vcf['samples']
            learn_GT = False
        elif n_donor < donor_GPb.shape[1]:
            learn_GT = False
            donor_names = ['donor%d' % x for x in range(n_donor)]
        else:
            learn_GT = True
            donor_names = (
                donor_vcf['samples'] +
                ['donor%d' % x for x in range(donor_GPb.shape[1], n_donor)])
    else:
        learn_GT = True
        donor_GPb = None
        donor_names = ['donor%d' % x for x in range(n_donor)]

    n_vars = np.array(np.sum(cell_dat['DP'] > 0, axis=0)).reshape(-1)

    if options.force_learnGT:
        learn_GT = True

    # extra donor for initial search, only for learn_GT
    n_extra_donor = 0
    if learn_GT:
        if options.n_extra_donor is None or options.n_extra_donor == "None":
            n_extra_donor = int(round(np.sqrt(n_donor)))
        else:
            n_extra_donor = options.n_extra_donor

    # number of initials, only for learn_GT
    n_init = options.n_init if learn_GT else 1

    check_doublet = options.no_doublet == False

    ## run vireo model (try multiple initializations)
    print("[vireo] Demultiplex %d cells to %d donors with %d variants." %
          (cell_dat['AD'].shape[1], n_donor, cell_dat['AD'].shape[0]))
    res_vireo = vireo_wrap(cell_dat['AD'],
                           cell_dat['DP'],
                           n_donor=n_donor,
                           GT_prior=donor_GPb,
                           learn_GT=learn_GT,
                           n_init=n_init,
                           n_extra_donor=n_extra_donor,
                           extra_donor_mode=options.extra_donor_mode,
                           check_doublet=check_doublet,
                           random_seed=options.rand_seed,
                           ASE_mode=options.ASE_mode,
                           check_ambient=options.check_ambient,
                           nproc=options.nproc)

    if (n_donor is not None and donor_GPb is not None
            and n_donor < donor_GPb.shape[1]):
        idx = optimal_match(res_vireo['GT_prob'], donor_GPb)[1]
        donor_names = [donor_vcf['samples'][x] for x in idx]

    ## save donor id for each cell
    write_donor_id(out_dir, donor_names, cell_dat['samples'], n_vars,
                   res_vireo)

    if options.no_plot == False and options.vartrix_data is None:
        idx = np.array(
            np.sum(cell_dat['DP'], axis=1) > (3 * n_donor)).reshape(-1)
        if learn_GT and donor_GPb is not None:
            plot_GT(out_dir, res_vireo['GT_prob'][idx, :, :], donor_names,
                    donor_GPb[idx, :, :], donor_vcf['samples'])
        else:
            plot_GT(out_dir, res_vireo['GT_prob'][idx, :, :], donor_names)

    # ## save inferred donor genotype
    if learn_GT and 'variants' in cell_dat.keys():
        donor_vcf_out = cell_dat
        donor_vcf_out['samples'] = donor_names
        donor_vcf_out['GenoINFO'] = GenoINFO_maker(
            res_vireo['GT_prob'], cell_dat['AD'] * res_vireo['ID_prob'],
            cell_dat['DP'] * res_vireo['ID_prob'])
        write_VCF(out_dir + "/GT_donors.vireo.vcf.gz", donor_vcf_out)

    run_time = time.time() - START_TIME
    print("[vireo] All done: %d min %.1f sec" %
          (int(run_time / 60), run_time % 60))
    print()