Example #1
0
class Args:
    def __init__(self, Prog=Prog(), args_nbr=0, Opts=[]):
        self.parser = OptionParser(version="%prog " + Prog.version,
                                   usage=Prog.usage)
        self.prog = self.parser.get_prog_name()

        self._process_options(Opts)
        (self.options, self.args) = self.parser.parse_args()
        self._verify_args(args_nbr)

    def _verify_args(self, nbr):
        """Verify that the number of arguments is correct."""
        if len(self.args) != nbr:
            self._show_error('incorrect number of arguments')

    def _show_error(self, error_msg):
        """Print the error message and usage"""
        self.parser.error = error_msg
        print self.parser.error
        self.parser.print_help()
        sys.exit(0)

    def _process_options(self, Opts):
        """Add the options to the option parser"""
        for Opt in Opts:
            self.parser.add_option(Opt.short,
                                   Opt.long,
                                   action=Opt.action,
                                   dest=Opt.dest,
                                   metavar=Opt.metavar,
                                   default=Opt.default,
                                   help=Opt.help
                                  )
Example #2
0
def main():
    """
    Main
    """

    # 引数のパース
    usage   = "Usage: %prog [option ...]"
    version ="%%prog %s\nCopyright (C) 2014 Yuichiro SAITO." % ( PROGRAM_VERSION )
    parser  = OptionParser( usage = usage, version = version )
    parser.add_option("-w", "--warning",
                      type="string",
                      dest="warning",
                      metavar="<free>",
                      help="Exit with WARNING status if less than value of space is free. You can choice kilobyte (integer) or percent (%).")
    parser.add_option("-c", "--critical",
                      type="string",
                      dest="critical",
                      metavar="<free>",
                      help="Exit with CRITICAL status if less than value of space is free. You can choice kilobyte (integer) or percent (%).")
    parser.add_option("-s", "--without_swap",
                      action="store_true",
                      dest="withoutswap",
                      default=False,
                      help="Calculate without swap. Default is False.")
    parser.add_option("-V", "--verbose",
                      action="store_true",
                      dest="verbose",
                      default=False,
                      help="Verbose mode. (For debug only)")
    ( options, args ) = parser.parse_args()
    prog_name = parser.get_prog_name()

    if len( sys.argv ) < 4:
        OptionParser.print_version( parser )
        return _MemFree.STATE_UNKNOWN

    if options.verbose:
        logging.basicConfig( level=logging.DEBUG, format = LOG_FORMAT )
    else:
        logging.basicConfig( level=logging.WARNING, format = LOG_FORMAT )

    logging.debug( "START" )

    # 評価を実施
    mem_free = _MemFree()
    ret = mem_free.setWarning( options.warning )
    if ret != _MemFree.STATE_OK:
        logging.debug( "EXIT" )
        return ret
    ret = mem_free.setCritical( options.critical )
    if ret != _MemFree.STATE_OK:
        logging.debug( "EXIT" )
        return ret
    ret = mem_free.checkMemFree( options.withoutswap )
    if ret != _MemFree.STATE_OK:
        logging.debug( "EXIT" )
        return ret

    logging.debug( "END" )
def main():
    """
    Main
    """

    # 引数のパース
    usage   = "Usage: %prog [option ...]"
    version ="%%prog %s\nCopyright (C) Yuichiro SAITO." % ( PROGRAM_VERSION )
    parser  = OptionParser( usage = usage, version = version )
    parser.add_option("-w", "--warning",
                      type="int",
                      dest="warning",
                      metavar="<pages>",
                      default=100,
                      help="Exit with WARNING status if more than major page faults per sec. Default value is 100.")
    parser.add_option("-c", "--critical",
                      type="int",
                      dest="critical",
                      metavar="<pages>",
                      default=1000,
                      help="Exit with CRITICAL status if more than major page faults per sec. Default value is 1000.")
    parser.add_option("-i", "--interval",
                      type="int",
                      dest="interval",
                      metavar="<seconds>",
                      default=2,
                      help="Check interval. Default value is 2 (sec).")
    parser.add_option("-V", "--verbose",
                      action="store_true",
                      dest="verbose",
                      default=False,
                      help="Verbose mode. (For debug only)")
    ( options, args ) = parser.parse_args()
    prog_name = parser.get_prog_name()

    if options.verbose:
        logging.basicConfig( level=logging.DEBUG, format = LOG_FORMAT )
    else:
        logging.basicConfig( level=logging.WARNING, format = LOG_FORMAT )

    logging.debug( "START" )

    # 評価を実施
    pf = _PageFault( options.interval )
    ret = pf.setWarning( options.warning )
    if ret != _PageFault.STATE_OK:
        logging.debug( "EXIT" )
        return ret
    ret = pf.setCritical( options.critical )
    if ret != _PageFault.STATE_OK:
        logging.debug( "EXIT" )
        return ret
    ret = pf.checkMajorPageFaluts()
    if ret != _PageFault.STATE_OK:
        logging.debug( "EXIT" )
        return ret

    logging.debug( "END" )
Example #4
0
class ConfigureTextProducts:
    """Command Line Interface for the TextProductsGenerator
    """ 

    USAGE = """Usage: %prog [OPTIONS...] SITE_ID [DESTINATION_DIR]
    
    This script automatically configures the GFESuite set of text formatters 
    based on an afos2awips PIL/WMOID table and a given SITE_ID.  Text formatters
    are placed into the given DESTINATION_DIR.
    
    For example: %prog OAX ~/awips/edex/opt/data/utility/cave_static/site/OAX/gfe/userPython/textProducts"""

    def __init__(self):
        """Class constructor
        
        This constructor initializes the OptionParser
        """
        from optparse import OptionParser
        
        self.__optionParser = OptionParser(ConfigureTextProducts.USAGE)
        
        self.programName = self.__optionParser.get_prog_name()

    def main(self):
        """System entry point.
        
        Executes this script from the command line.
        
        @type args: list
        @param args: contains the commands used to launch the script
        """
        
        
        # get the command line options
        (option, arg) = self.__parseOptions()
        
        request = ConfigureTextProductsRequest()
        request.mode = option.mode.lower()
        request.template = option.template
        request.site = arg[0]
        if (len(arg) > 1):
            request.destinationDir = arg[1]
        else:
            request.destinationDir = None
        
        response = None
        try:
            thriftClient = ThriftClient.ThriftClient(option.host)
            response = thriftClient.sendRequest(request)
        except Exception, e:
            self.__error(e, 2)
        
        if response is not None and \
           response.msg is not None and \
           not "" == response.msg:
            self.__error(response.msg, 2)
def main():
	parser = OptionParser()
	parser.set_usage(parser.get_prog_name() + " -a exp1.searchConfig.xml exp2.searchConfig\n" 
				+ " OR " + parser.get_prog_name() + " -m exp1.YYY.csv exp2.YYY.csv [...]")
	parser.set_description("""Combines BehaviorSearch .csv files (that were created using the same search configuration.)

In -a (auto mode), it will use the *.searchConfig.xml files you specify to find all of the matching CSV search results files, and combine them
into a new file, named based on the common filename stem of the combined files. (i.e. the files: xxxx_00.yyy.csv, xxxx_01.yyy.csv,  => xxxx.yyy.csv)

In -m (manual mode), only those CSV files that you manually specify will be combined, and the results will go to stdout.
(Note that you can specify wildcards, such as "data*.xml") 
""")
	parser.add_option("-m", "--manual", action="store_true", dest="manual", help="manual mode")
	parser.add_option("-a", "--autosort", action="store_true", dest="autosort", help="(auto-sort mode) use XXX.searchConfig.xml files to automatically choose which CSV files should be combined.")
	parser.add_option("-p", "--preserve", action="store_true", dest="preserve", help="keep the original search number indexes, instead of renumbering consecutively.")
	parser.add_option("-d", "--delete", action="store_true", dest="delete", help="delete the input files, after combining")

	options , filepatterns = parser.parse_args()
	if (options.manual == options.autosort):
		print "ERROR: You must specify EITHER -m (manual) or -a (autosort) mode."
		print
		parser.print_help()
		sys.exit(0)

	if (len(filepatterns) == 0):
		parser.print_help()
		sys.exit(0)
		
	filenames = []
	for fPat in filepatterns:
		filenames.extend(glob.glob(fPat))
	
	filenames = uniq(filenames)
	
	if (len(filenames) < 1):
		parser.print_help()
		sys.exit(1)

	if (options.autosort):
		autosort_and_combine(filenames, options.preserve, options.delete)
	else: # (options.manual == True)
		combine(filenames, options.preserve, options.delete, sys.stdout)
def main():
    """
    Main
    """

    # 引数のパース
    usage   = "Usage: %prog [option ...]"
    version ="%%prog %s\nCopyright (C) 2014 Yuichiro SAITO." % ( PROGRAM_VERSION )
    parser  = OptionParser( usage = usage, version = version )
    parser.add_option("-w", "--warning",
                      type="string",
                      dest="warning",
                      metavar="<free>",
                      help="Exit with WARNING status if less than value of space is free. You can choice kilobyte (integer) or percent (%).")
    parser.add_option("-c", "--critical",
                      type="string",
                      dest="critical",
                      metavar="<free>",
                      help="Exit with CRITICAL status if less than value of space is free. You can choice kilobyte (integer) or percent (%).")
    parser.add_option("-V", "--verbose",
                      action="store_true",
                      dest="verbose",
                      default=False,
                      help="Verbose mode. (For debug only)")
    ( options, args ) = parser.parse_args()
    prog_name = parser.get_prog_name()

    if len( sys.argv ) < 4:
        OptionParser.print_version( parser )
        return _MemFree.STATE_UNKNOWN

    if options.verbose:
        logging.basicConfig( level=logging.DEBUG, format = LOG_FORMAT )
    else:
        logging.basicConfig( level=logging.WARNING, format = LOG_FORMAT )

    logging.debug( "START" )

    # 評価を実施
    mem_free = _MemFree()
    ret = mem_free.setWarning( options.warning )
    if ret != _MemFree.STATE_OK:
        logging.debug( "EXIT" )
        return ret
    ret = mem_free.setCritical( options.critical )
    if ret != _MemFree.STATE_OK:
        logging.debug( "EXIT" )
        return ret
    ret = mem_free.checkMemFree()
    if ret != _MemFree.STATE_OK:
        logging.debug( "EXIT" )
        return ret

    logging.debug( "END" )
Example #7
0
File: vzmix.py Project: amr/vzmix
def main():
    from optparse import OptionParser

    cli = OptionParser(usage="%prog [options] <base-file>",
                       description="Generate OpenVZ container configuration files based on an existing file")
    cli.add_option("-m", "--multiply", dest="multiply", type="float", metavar="FACTOR",
                      help="multiply by given factor")
    cli.add_option("-a", "--add", dest="add", type="string", action="append", metavar="FILE",
                      help="add (as in sum) given file, you can add as many files as you need by specifying this option multiple times")
    cli.add_option("-s", "--substract", dest="substract", type="string", action="append", metavar="FILE",
                      help="substract given file, you can add as many files as you need by specifying this option multiple times")
    cli.add_option("-d", "--debug", dest="debug", action="store_true",
                      help="do not catch python exceptions, useful for debugging")
    (options, args) = cli.parse_args();

    if not len(args):
        cli.error("No base file provided")

    try:
        # Require Python >= 2.4
        import sys
        if sys.version_info[0] < 2 or sys.version_info[1] < 4:
            cli.error("Python 2.4.0 or higher is required")

        c = CTConfig(args[0])

        # Multiply
        if options.multiply:
            if options.multiply <= 0:
                cli.error("Invalid multiplication factor %s" % str(options.multiply))
        
            c.multiply(options.multiply)

        # Add
        if options.add is not None:
            for f in options.add:
                c.add(CTConfig(f))

        # Substract
        if options.substract is not None:
            for f in options.substract:
                c.substract(CTConfig(f))

        # Output results
        print c
    except Exception, e:
        if options.debug:
            raise
        else:
            cli.print_usage()
            cli.exit(2, "%s: %s\n" % (cli.get_prog_name(), e))
Example #8
0
def main():
    parser = OptionParser(usage="usage: %prog [options]", version="%prog 0.1")
    parser.add_option("-d", "--dir", dest="dir",
                      action="store",
                      type="string",
                      help="component folder")
    parser.add_option("-n", "--new", dest="new",
                      action="store_true",
                      default=False,
                      help="generate new skeleton for component")

    (options, args) = parser.parse_args()

    if options.new:
        new(options.dir)
    else:
        parser.print_help(sys.stderr)
        parser.exit(2, "\n%s: error: %s\n" % (parser.get_prog_name(), "please enter options"))
def main(argv):
    parser = OptionParser()
    parser.add_option("-a", "--article", dest="url",
                      help="Url of the article to be processed")
    parser.add_option("-o", "--ofile", dest="outputfile",
                      help="Define the output file where the result will be stored - if not set will be defined automatically")
    parser.add_option("-c", "--config", dest="config",
                      help="config file location")
    parser.add_option("-v", "--verbose", dest="verbose",action="store_true", default=False,
                      help="Print HTML parser output to stdout")

    (options, args) = parser.parse_args(argv)
    if (len(argv) < 2):
        progName = parser.get_prog_name()
        if ".py" in progName:
            print 'Please see help : python', progName, '-h'
        else:
            print 'Please see help :', progName, '-h'
        sys.exit()

    print 'Url to process "', options.url, '"'

    if not uri_validator(options.url):
        print 'Url is not valid. Exit.'
        sys.exit(1)

    configurator = Configurator(options.config)
    config = configurator.getConfig()

    textArray = TextFromHtmlExtractor.getTextArrayFromUrl(options.url, config, options.verbose)

    fileGenerator = FilePathGenerator.FilePathGenerator(options.url)
    if (options.outputfile is not None):
        file = fileGenerator.generateProvided(options.outputfile)
    else:
        file = fileGenerator.generateByUrl('./out')

    print 'Output file is "', file.name, '"'

    with TextPrettyWriter(file, config) as prettyWriter:
        prettyWriter.write(textArray)

    print 'Work completed.'
Example #10
0
def main():
    parser = OptionParser(usage="%prog [options] <input CSS file> <absolute URL of CSS file> " "<ouptut file>")
    parser.add_option(
        "-m",
        "--max-image-size",
        type="int",
        dest="max_image_size",
        help="The size (in bytes) which images must be under to be encoded",
    )

    options, arguments = parser.parse_args()

    if len(arguments) != 3:
        print "%s takes exactly three arguments!\n" % parser.get_prog_name()
        print parser.get_usage()
        sys.exit(1)

    css_file_path = path.abspath(path.expanduser(arguments[0]))
    css_file_url = arguments[1]
    output_file_name = path.abspath(path.expanduser(arguments[2]))

    if not path.exists(css_file_path):
        print "'%s' cannot be found - aborting conversion" % css_file_path
        sys.exit(2)

    output_dir = path.dirname(output_file_name)
    if not path.exists(output_dir):
        print "The directory to contain the output file '%s' does not exist " "- aborting conversion" % output_dir
        sys.exit(2)

    print "Converting '%s'" % css_file_path

    try:
        converted_css = make_css_images_inline(css_file_path, css_file_url, options.max_image_size)
    except CssFileError, exc:
        print str(exc)
        sys.exit(3)
import lalsimulation

# Other imports.
import numpy as np
from scipy import linalg

# BAYESTAR imports.
from lalinference import bayestar.ligolw


# Read input file.
xmldoc = ligolw_utils.load_filename(,
    infilename, contenthandler=bayestar.ligolw.LSCTablesContentHandler)

# Write process metadata to output file.
process = ligolw_process.register_to_xmldoc(xmldoc, parser.get_prog_name(),
    opts.__dict__)

# Determine the low frequency cutoff from the template bank file.
f_low = bayestar.ligolw.get_temlate_bank_f_low(xmldoc)

# Get the SnglInspiral table.
sngl_inspiral_table = ligolw_table.get_table(xmldoc,
    lsctables.SnglInspiralTable.tableName)

# Determine central values of intrinsic parameters.
mchirp0 = lalinspiral.sbank.tau0tau3.m1m2_to_mchirp(opts.mass1, opts.mass2)
eta0 = opts.mass1 * opts.mass2 / np.square(opts.mass1 + opts.mass2)
chi0 = 0.

# Transform to chirp times.
Example #12
0
        parser.error("action must be one of: %s" % " ".join(ACTIONS))

    if len(args) < 1:
        parser.error("no URI specified")
    uri = args.pop(0)

    if action in ("get", "set", "delete", "find"):
        if len(args) < 1:
            parser.error("no name specified")
        name = args.pop(0)
    else:
        name = None

    if action == "set":
        if len(args) < 1:
            parser.error("no value specified")
        value = args.pop(0)
    else:
        value = None

    try:
        main(action, uri, name, value)
    except SocketError as e:
        print("%s: error with connection to MPD: %s" % \
                (parser.get_prog_name(), e[1]), file=stderr)
    except MPDError as e:
        print("%s: error executing action: %s" % \
                (parser.get_prog_name(), e), file=stderr)

# vim: set expandtab shiftwidth=4 softtabstop=4 textwidth=79:
Example #13
0
                      help="do not output error messages")
    parser.add_option("-v",
                      "--verbose",
                      dest="log_level",
                      action="store_const",
                      const=3,
                      help="output warnings and informational messages")
    parser.add_option("-d",
                      "--debug",
                      dest="log_level",
                      action="store_const",
                      const=4,
                      help="output debug messages")
    parser.add_option("-f",
                      "--force",
                      dest="force",
                      action="store_true",
                      default=False,
                      help="force overwriting of existing ID3v2 "
                      "ReplayGain tags")
    prog_name = parser.get_prog_name()
    options, args = parser.parse_args()
    if len(args) < 1:
        parser.error("no files specified")
    try:
        main(prog_name, options, args)
    except KeyboardInterrupt:
        pass

# vim: set expandtab shiftwidth=4 softtabstop=4 textwidth=79:
Example #14
0
    if action not in ACTIONS:
        parser.error("action must be one of: %s" % " ".join(ACTIONS))

    if len(args) < 1:
        parser.error("no URI specified")
    uri = args.pop(0)

    if action in ("get", "set", "delete", "find"):
        if len(args) < 1:
            parser.error("no name specified")
        name = args.pop(0)
    else:
        name = None

    if action == "set":
        if len(args) < 1:
            parser.error("no value specified")
        value = args.pop(0)
    else:
        value = None

    try:
        main(action, uri, name, value)
    except SocketError as e:
        print >> stderr, "%s: error with connection to MPD: %s" % \
                         (parser.get_prog_name(), e[1])
    except MPDError as e:
        print >> stderr, "%s: error executing action: %s" % \
                         (parser.get_prog_name(), e)

# vim: set expandtab shiftwidth=4 softtabstop=4 textwidth=79:
Example #15
0
def cmd_call(argv):
    '''Make a single Cascade call.'''

    op = OptionParser(
        usage = '''%prog call <method> [options]
        
Calls the Cascade method <method>. Parameters to the call should be passed on
stdin as a JSON blob. The results of the call are written to stdout.'''
    )
    op.add_option(
        '-i', dest = 'input', default = None,
        help = '''set the file from which to read input (default: stdin)'''
    )
    op.add_option(
        '-u', '--url-params', dest = 'urlParams', action = 'store_true',
        default = False,
        help = '''send OAuth parameters as URL query parameters, rather than 
using an Authorization header.'''
    )
    op.add_option(
        '-k', '--oauth-consumer-key', dest = 'oauthConsumerKey', default = None,
        help = '''the OAuth consumer key; required'''
    )
    op.add_option(
        '-s', '--oauth-consumer-secret', dest = 'oauthConsumerSecret', default = None,
        help = '''the OAuth consumer secret; required'''
    )
    op.add_option(
        '-a', '--oauth-access-token', dest = 'oauthAccessToken', default = None,
        help = '''the OAuth access token to use, in the form of a query string
from oauth_token_to_query_string(); this is the preferred method of specifying
an access token, over --oauth-access-token-* (default: %default)'''
    )
    op.add_option(
        '--oauth-access-token-key', dest = 'oauthAccessTokenKey', default = None,
        help = '''the OAuth access token key; required'''
    )
    op.add_option(
        '--oauth-access-token-secret', dest = 'oauthAccessTokenSecret',
        default = None,
        help = '''the OAuth access token secret; required'''
    )
    op.add_option(
        '--oauth-timestamp', dest = 'oauthTimestamp', type = 'int',
        default = oauth.generate_timestamp(),
        help = '''the timestamp to use for OAuth signing, in epoch seconds
(default: %default)'''
    )
    op.add_option(
        '--oauth-nonce', dest = 'oauthNonce', default = oauth.generate_nonce(),
        help = '''the nonce to use for OAuth signing (default: %default)'''
    )

    opts, args = op.parse_args(argv)
    PROG_NAME = op.get_prog_name()

    # Get our method name and parameters
    if len(args) != 1:
        op.print_usage(sys.stderr)
        sys.exit(1)

    methodName = args[0]

    inputFile = sys.stdin
    if opts.input:
        inputFile = open(opts.input, 'r')
    methodParams = simplejson.loads(''.join(inputFile.readlines()))
    if opts.input:
        inputFile.close()

    # Create our OAuth consumer
    if opts.oauthConsumerKey and \
       opts.oauthConsumerSecret:
        oaConsumer = oauth.OAuthConsumer(
            opts.oauthConsumerKey,
            opts.oauthConsumerSecret
        )
    else:
        sys.stderr.write(
            '\'%s\': consumer key options not specified\n' % (PROG_NAME)
        )
        op.print_usage(sys.stderr)
        sys.exit(1)

    # Create our OAuth access token
    if opts.oauthAccessToken:
        oaTok = oauth_token_from_query_string(opts.oauthAccessToken)
    elif opts.oauthAccessTokenKey and \
            opts.oauthAccessTokenSecret:
        oaTok = oauth.OAuthToken(
            opts.oauthAccessTokenKey,
            opts.oauthAccessTokenSecret
        )
    else:
        sys.stderr.write(
            '\'%s\': access token options not specified\n' % (PROG_NAME)
        )
        op.print_usage(sys.stderr)
        sys.exit(1)

    # Make the call, overriding any OAuth parametsrs as specified
    jc = JSON11Client(oaConsumer, oaTok, not opts.urlParams)

    oauthDefaults = {}
    if opts.oauthNonce:
        oauthDefaults['oauth_nonce'] = opts.oauthNonce
    if opts.oauthTimestamp:
        oauthDefaults['oauth_timestamp'] = opts.oauthTimestamp

    try:
        result = jc.call(
            methodName, 
            params = methodParams,
            oauthDefaults = oauthDefaults
        )
    except CascadeHTTPError, e:
        sys.stderr.write('\'%s\': call failed %s\n' % (PROG_NAME, str(e)))
        sys.exit(1)
Example #16
0
    "--parset_time",
    action="store_false",
    default=True,
    help=
    "Using this parameter to either use the clock_offsets from the parset files or the static values."
)
parser.add_option("-x",
                  "--extra_parameters",
                  type="str",
                  default='',
                  help="Extra parameters from the pipeline.")

(options, args) = parser.parse_args()

#----------------------------------------------------
if not parser.get_prog_name() == "frats_events.py":
    #   Program was run from within python
    station_centered = False
    stations = ['superterp']
    dump_version = 'R000'
    rfi_find = 2
    extra_parameters = ''
    parset_time = True
    verbose = 2
else:
    station_centered = options.station_centered
    stations = options.stations
    dump_version = options.dump_version
    rfi_find = options.rfi_find
    extra_parameters = options.extra_parameters
    parset_time = options.parset_time
class Mathgenealogy:
    """
	A class for parsing the command-line information and checking them. Use this
	information to call the correct function.
	"""

    def __init__(self):
        self.passedIDs = []
        self.passedName = None
        self.updateByID = False
        self.updateByName = False
        self.forceNaive = False
        self.ancestors = False
        self.descendants = False
        self.lca = False
        self.ie = False
        self.pk = False
        self.aa = False
        self.ad = False
        self.web = False
        self.writeFilename = None
        self.noDetails = False
        self.database = ""

    def parseInput(self):
        """
		Parse command-line information.
		"""
        self.parser = OptionParser()

        self.parser.set_usage("%prog [options] LastName or IDs")
        self.parser.set_description(
            "Update local database from the Mathematics Genealogy Project. Create a \
									Graphvizdot-file for a mathematics genealogy by querying the locals database, \
									where ID is a record identifier from the Mathematics Genealogy Project. Multiple \
									IDs may be passed in case of search queries. Choose one update method OR one \
									search method with the allowed options. You need online access for updates. You \
									don't need online access for search queries."
        )

        self.parser.add_option(
            "-i",
            "--update-by-ID",
            action="store_true",
            dest="updateByID",
            default=False,
            help="Update method: Update the local database entries of the entered ID(s) (and of the \
							   descendants and/or ancestors). INPUT: ID(s)",
        )

        self.parser.add_option(
            "-n",
            "--update-by-name",
            action="store_true",
            dest="updateByName",
            default=False,
            help="Update method: Find the corresponding ID in the online database of a \
							   mathematician. Besides, the tool will also update the records of all found \
							   mathematicians. INPUT: last name of one mathematician",
        )

        self.parser.add_option(
            "-f",
            "--force",
            action="store_true",
            dest="forceNaive",
            default=False,
            help="Force the tool to use naive update logic, which downloads all records of every \
							   mathematician you want to update without comparing the online number of descendants \
							   with the stored local one and stores every entry in the local database (replaces \
							   existing ones). Only available for update methods, not for search methods!",
        )

        self.parser.add_option(
            "-a",
            "--with-ancestors",
            action="store_true",
            dest="ancestors",
            default=False,
            help="Retrieve ancestors of IDs and include in graph. Only available for update-by-ID!",
        )

        self.parser.add_option(
            "-d",
            "--with-descendants",
            action="store_true",
            dest="descendants",
            default=False,
            help="Retrieve descendants of IDs and include in graph. Only available for update-by-ID!",
        )

        self.parser.add_option(
            "-w",
            "--web-front-end",
            action="store_true",
            dest="web",
            default=False,
            help="Don't use! Needed for web front-end",
        )

        self.parser.add_option(
            "-L",
            "--least-common-advisor",
            action="store_true",
            dest="lca",
            default=False,
            help="Search method: Search for the lowest common advisor of an arbitrary number of \
							   mathematicians. INPUT: IDs of the mathematicians separated by spaces",
        )

        self.parser.add_option(
            "-A",
            "--all-ancestors",
            action="store_true",
            dest="aa",
            default=False,
            help="Search method: Search for all ancestors of one mathematician. INPUT: ID of one \
							   mathematician",
        )

        self.parser.add_option(
            "-D",
            "--all-descendants",
            action="store_true",
            dest="ad",
            default=False,
            help="Search method: Search for all descendants of one mathematician. INPUT: ID of one \
							   mathematician",
        )

        self.parser.add_option(
            "-T",
            "--use-interval-encoding",
            action="store_true",
            dest="ie",
            default=False,
            help="Use interval encoding to compute the LSCA. Works only together with '-L'",
        )

        self.parser.add_option(
            "-P",
            "--create-pickle-file",
            action="store_true",
            dest="pk",
            default=False,
            help="Create or replace pickle-file to use interval encoding. Works only together with '-T'",
        )

        self.parser.add_option(
            "-s",
            "--save-to-file",
            dest="filename",
            metavar="FILE",
            default=None,
            help="Write output to a dot-file [default: stdout]. Only available for search methods, \
							   not for update methods!",
        )

        self.parser.add_option(
            "-b",
            "--use-different-database",
            action="store",
            type="string",
            dest="database",
            default="MGDB",
            help="Define the SQLite database name and/or path. This database will be created, \
							   updated and/or queried.",
        )

        self.parser.add_option(
            "-u",
            "--no-details",
            action="store_true",
            dest="noDetails",
            default=False,
            help="Don't add university for each mathematician to the DOT-file.",
        )

        self.parser.add_option(
            "-V", "--version", action="store_true", dest="print_version", default=False, help="Print version and exit."
        )

        (options, args) = self.parser.parse_args()

        self.updateByID = options.updateByID
        self.updateByName = options.updateByName
        self.forceNaive = options.forceNaive
        self.ancestors = options.ancestors
        self.descendants = options.descendants
        self.lca = options.lca
        self.ie = options.ie
        self.pk = options.pk
        self.aa = options.aa
        self.ad = options.ad
        self.web = options.web
        self.writeFilename = options.filename
        self.noDetails = options.noDetails
        self.database = options.database

        if options.print_version:
            print(u"Math-Genealogy-DB Version 1.0".encode("utf-8"))
            self.parser.exit()

            # Check for no arguments
        if len(args) == 0:
            raise SyntaxError("%s: error: no IDs or no last name passed" % (self.parser.get_prog_name()))

            # Check for the correct combination of options
        if (self.updateByName or self.updateByID or self.forceNaive or self.ancestors or self.descendants) and (
            self.lca or self.aa or self.ad or (self.writeFilename is not None)
        ):
            raise SyntaxError("%s: error: invalid combination of options" % (self.parser.get_prog_name()))

        if self.updateByName and (self.ancestors or self.descendants):
            raise SyntaxError("%s: error: invalid combination of options" % (self.parser.get_prog_name()))

        if self.updateByName and self.updateByID:
            raise SyntaxError("%s: error: you can only choose one update method" % (self.parser.get_prog_name()))

        if self.lca and (self.aa or self.ad):
            raise SyntaxError("%s: error: you can only choose one search method" % (self.parser.get_prog_name()))

        if not (self.updateByName or self.updateByID or self.lca or self.aa or self.ad):
            raise SyntaxError(
                "%s: error: you have to choose one update method or one search method" % (self.parser.get_prog_name())
            )

            # Check for the correct content (updateByName may contain anything)
        if not self.updateByName:
            for arg in args:
                for digit in arg:
                    if digit not in string.digits:
                        raise SyntaxError("%s: error: all arguments have to be numbers" % (self.parser.get_prog_name()))

                        # Check for the correct number of arguments
        if self.aa or self.ad:
            if len(args) != 1:
                raise SyntaxError("%s: error: enter only one ID" % (self.parser.get_prog_name()))

        if self.updateByName:
            if len(args) != 1:
                raise SyntaxError("%s: error: enter only one Name" % (self.parser.get_prog_name()))

        if self.updateByID:
            if len(args) < 1:
                raise SyntaxError("%s: error: you have to enter at least one ID" % (self.parser.get_prog_name()))

        if self.lca:
            if len(args) < 2:
                raise SyntaxError(
                    "%s: error: you have to enter at least two IDs to execute this search method"
                    % (self.parser.get_prog_name())
                )

                # If no error occurred, then the options and arguments are correct. Hence, we can continue:
                # Read the arguments
        if self.updateByName:
            self.passedName = str(args[0])

        else:
            for arg in args:
                self.passedIDs.append(int(arg))

        databaseConnector = databaseConnection.DatabaseConnector()
        connector = databaseConnector.connectToSQLite(self.database)

        # Call the correct function depending on the options which have been passed
        if self.updateByName:
            updater = update.Updater(connector, self.forceNaive, self.web)
            updater.findID(self.passedName)

        if self.updateByID:
            updater = update.Updater(connector, self.forceNaive, self.web)
            updater.updateByID(self.passedIDs, self.ancestors, self.descendants)

        if self.lca:
            if self.ie:  # BL: ... using interval encoding
                if self.pk:  # BL: ... pk = true means: create pickle file
                    createPickle = intervalEncoding.coding(connector)
                    createPickle.mainfun()

                searcher = intervalQuery.query()
                searcher.LCA(self.passedIDs)

            else:
                searcher = search.Searcher(connector, self.writeFilename, self.noDetails)
                searcher.lca(self.passedIDs)

        if self.aa and not self.ad:
            searcher = search.Searcher(connector, self.writeFilename, self.noDetails)
            searcher.allAncestors(self.passedIDs)

        if self.ad and not self.aa:
            searcher = search.Searcher(connector, self.writeFilename, self.noDetails)
            searcher.allDescendants(self.passedIDs)

        if self.aa and self.ad:
            searcher = search.Searcher(connector, self.writeFilename, self.noDetails)
            searcher.allAncestorsDescendants(self.passedIDs)

        connection = connector[0]
        cursor = connector[1]

        cursor.close()
        connection.close()
    # perform sky localization
    log.info("starting sky localization")
    sky_map, epoch, elapsed_time, instruments = gracedb_sky_map(
        coinc_file, psd_file, "TaylorF2threePointFivePN", 10)
    log.info("sky localization complete")

    # upload FITS file
    fitsdir = tempfile.mkdtemp()
    try:
        fitspath = os.path.join(fitsdir, "skymap.fits.gz")
        fits.write_sky_map(
            fitspath,
            sky_map,
            gps_time=float(epoch),
            creator=parser.get_prog_name(),
            objid=str(graceid),
            url='https://gracedb.ligo.org/events/{0}'.format(graceid),
            runtime=elapsed_time,
            instruments=instruments,
            origin='LIGO/Virgo',
            nest=True)
        gracedb.writeLog(graceid,
                         "INFO:BAYESTAR:uploaded sky map",
                         filename=fitspath,
                         tagname="sky_loc")
    finally:
        shutil.rmtree(fitsdir)
except:
    # Produce log message for any otherwise uncaught exception
    log.exception("sky localization failed")
Example #19
0
class Geneagrapher:
	"""
	A class for building Graphviz "dot" files for math genealogies
	extracted from the Mathematics Genealogy Project website.
	"""
	def __init__(self):
		self.graph = GGraph.Graph()
		self.leaf_ids = []
		self.get_ancestors = False
		self.get_descendants = False
		self.verbose = False
		self.write_filename = None

	def parseInput(self):
		"""
		Parse command-line information.
		"""
		self.parser = OptionParser()

		self.parser.set_usage("%prog [options] ID ...")
		self.parser.set_description('Create a Graphviz "dot" file for a mathematics genealogy, where ID is a record identifier from the Mathematics Genealogy Project. Multiple IDs may be passed.')

		self.parser.add_option("-f", "--file", dest="filename",
				       help="write output to FILE [default: stdout]", metavar="FILE", default=None)
		self.parser.add_option("-a", "--with-ancestors", action="store_true", dest="get_ancestors",
				       default=False, help="retrieve ancestors of IDs and include in graph")
		self.parser.add_option("-d", "--with-descendants", action="store_true", dest="get_descendants",
				       default=False, help="retrieve descendants of IDs and include in graph")
		self.parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False,
				       help="list nodes being retrieved")
		self.parser.add_option("--version", "-V", action="store_true", dest="print_version", default=False,
				       help="print version and exit")

		(options, args) = self.parser.parse_args()
		
		if options.print_version:
			print "Geneagrapher Version 0.2.1-r2"
			self.parser.exit()
		
		if len(args) == 0:
			raise SyntaxError("%s: error: no record IDs passed" % (self.parser.get_prog_name()))

		self.get_ancestors = options.get_ancestors
		self.get_descendants = options.get_descendants
		self.verbose = options.verbose
		self.write_filename = options.filename
		for arg in args:
			self.leaf_ids.append(int(arg))
		
	def buildGraph(self):
		"""
		Populate the graph member by grabbing the mathematician
		pages and extracting relevant data.
		"""
		leaf_grab_queue = list(self.leaf_ids)
		ancestor_grab_queue = []
		descendant_grab_queue = []

		# Grab "leaf" nodes.
		while len(leaf_grab_queue) != 0:
			id = leaf_grab_queue.pop()
			if not self.graph.hasNode(id):
				# Then this information has not yet been grabbed.
				grabber = grab.Grabber(id)
				if self.verbose:
					print "Grabbing record #%d" % (id)
				try:
					[name, institution, year, advisors, descendants] = grabber.extractNodeInformation()
				except ValueError:
					# The given id does not exist in the Math Genealogy Project's database.
					raise
				self.graph.addNode(name, institution, year, id, advisors, descendants, True)
				if self.get_ancestors:
					ancestor_grab_queue += advisors
				if self.get_descendants:
					descendant_grab_queue += descendants

		# Grab ancestors of leaf nodes.
		if self.get_ancestors:
			while len(ancestor_grab_queue) != 0:
				id = ancestor_grab_queue.pop()
				if not self.graph.hasNode(id):
					# Then this information has not yet been grabbed.
					grabber = grab.Grabber(id)
					if self.verbose:
						print "Grabbing record #%d" % (id)
					try:
						[name, institution, year, advisors, descendants] = grabber.extractNodeInformation()
					except ValueError:
						# The given id does not exist in the Math Genealogy Project's database.
						raise
					self.graph.addNode(name, institution, year, id, advisors, descendants)
					ancestor_grab_queue += advisors
						
		# Grab descendants of leaf nodes.
		if self.get_descendants:
			while len(descendant_grab_queue) != 0:
				id = descendant_grab_queue.pop()
				if not self.graph.hasNode(id):
					# Then this information has not yet been grabbed.
					grabber = grab.Grabber(id)
					if self.verbose:
						print "Grabbing record #%d" % (id)
					try:
						[name, institution, year, advisors, descendants] = grabber.extractNodeInformation()
					except ValueError:
						# The given id does not exist in the Math Genealogy Project's database.
						raise
					self.graph.addNode(name, institution, year, id, advisors, descendants)
					descendant_grab_queue += descendants
					
	def generateDotFile(self):
		dotfile = self.graph.generateDotFile(self.get_ancestors, self.get_descendants)
		if self.write_filename is not None:
			outfile = open(self.write_filename, "w")
			outfile.write(dotfile)
			outfile.close()
		else:
			print dotfile
Example #20
0
def main():
    from devflow.version import __version__  # pylint: disable=E0611,F0401
    parser = OptionParser(usage="usage: %prog [options] mode",
                          version="devflow %s" % __version__,
                          add_help_option=False)
    parser.add_option("-h", "--help",
                      action="store_true",
                      default=False,
                      help="show this help message")
    parser.add_option("-k", "--keep-repo",
                      action="store_true",
                      dest="keep_repo",
                      default=False,
                      help="Do not delete the cloned repository")
    parser.add_option("-b", "--build-dir",
                      dest="build_dir",
                      default=None,
                      help="Directory to store created pacakges")
    parser.add_option("-r", "--repo-dir",
                      dest="repo_dir",
                      default=None,
                      help="Directory to clone repository")
    parser.add_option("-d", "--dirty",
                      dest="force_dirty",
                      default=False,
                      action="store_true",
                      help="Do not check if working directory is dirty")
    parser.add_option("-c", "--config-file",
                      dest="config_file",
                      help="Override default configuration file")
    parser.add_option("--no-sign",
                      dest="sign",
                      action="store_false",
                      default=True,
                      help="Do not sign the packages")
    parser.add_option("--key-id",
                      dest="keyid",
                      help="Use this keyid for gpg signing")
    parser.add_option("--dist",
                      dest="dist",
                      default=None,
                      help="Force distribution in Debian changelog")
    parser.add_option("-S", "--source-only",
                      dest="source_only",
                      default=False,
                      action="store_true",
                      help="Specifies a source-only build, no binary packages"
                           " need to be made.")
    parser.add_option("--debian-branch",
                      dest="debian_branch",
                      default=None,
                      help="Use this debian branch, instead of"
                           "auto-discovering the debian branch to use")
    parser.add_option("--push-back",
                      dest="push_back",
                      default=False,
                      action="store_true",
                      help="Automatically push branches and tags to repo.")
    parser.add_option("--color",
                      dest="color_output",
                      default="auto",
                      help="Enable/disable colored output. Default mode is" +
                           " auto, available options are yes/no")

    (options, args) = parser.parse_args()

    if options.color_output == "yes":
        use_colors = True
    elif options.color_output == "no":
        use_colors = False
    else:
        if sys.stdout.isatty():
            use_colors = True
        else:
            use_colors = False

    red = lambda x: x
    green = lambda x: x

    if use_colors:
        try:
            import colors
            red = colors.red
            green = colors.green
        except AttributeError:
            pass

    print_red = lambda x: sys.stdout.write(red(x) + "\n")
    print_green = lambda x: sys.stdout.write(green(x) + "\n")

    if options.help:
        print_help(parser.get_prog_name())
        parser.print_help()
        return

    # Get build mode
    try:
        mode = args[0]
    except IndexError:
        mode = utils.get_build_mode()
    if mode not in AVAILABLE_MODES:
        raise ValueError(red("Invalid argument! Mode must be one: %s"
                         % ", ".join(AVAILABLE_MODES)))

    # Load the repository
    original_repo = utils.get_repository()

    # Check that repository is clean
    toplevel = original_repo.working_dir
    if original_repo.is_dirty() and not options.force_dirty:
        raise RuntimeError(red("Repository %s is dirty." % toplevel))

    # Get packages from configuration file
    config = utils.get_config(options.config_file)
    packages = config['packages'].keys()
    print_green("Will build the following packages:\n" + "\n".join(packages))

    # Get current branch name and type and check if it is a valid one
    branch = original_repo.head.reference.name
    branch = utils.undebianize(branch)
    branch_type_str = utils.get_branch_type(branch)

    if branch_type_str not in BRANCH_TYPES.keys():
        allowed_branches = ", ".join(BRANCH_TYPES.keys())
        raise ValueError("Malformed branch name '%s', cannot classify as"
                         " one of %s" % (branch, allowed_branches))

    # Fix needed environment variables
    v = utils.get_vcs_info()
    os.environ["DEVFLOW_BUILD_MODE"] = mode
    os.environ["DEBFULLNAME"] = v.name
    os.environ["DEBEMAIL"] = v.email

    # Check that base version file and branch are correct
    versioning.get_python_version()

    # Get the debian branch
    if options.debian_branch:
        debian_branch = options.debian_branch
    else:
        debian_branch = utils.get_debian_branch(branch)
    origin_debian = "origin/" + debian_branch

    # Clone the repo
    repo_dir = options.repo_dir or create_temp_directory("df-repo")
    repo_dir = os.path.abspath(repo_dir)
    repo = original_repo.clone(repo_dir, branch=branch)
    print_green("Cloned repository to '%s'." % repo_dir)

    build_dir = options.build_dir or create_temp_directory("df-build")
    build_dir = os.path.abspath(build_dir)
    print_green("Build directory: '%s'" % build_dir)

    # Create the debian branch
    repo.git.branch(debian_branch, origin_debian)
    print_green("Created branch '%s' to track '%s'" % (debian_branch,
                origin_debian))

    # Go to debian branch
    repo.git.checkout(debian_branch)
    print_green("Changed to branch '%s'" % debian_branch)

    # Merge with starting branch
    repo.git.merge(branch)
    print_green("Merged branch '%s' into '%s'" % (branch, debian_branch))

    # Compute python and debian version
    cd(repo_dir)
    python_version = versioning.get_python_version()
    debian_version = versioning.\
        debian_version_from_python_version(python_version)
    print_green("The new debian version will be: '%s'" % debian_version)

    # Update the version files
    versioning.update_version()

    if not options.sign:
        sign_tag_opt = None
    elif options.keyid:
        sign_tag_opt = "-u=%s" % options.keyid
    elif mode == "release":
        sign_tag_opt = "-s"
    else:
        sign_tag_opt = None

    # Tag branch with python version
    branch_tag = python_version
    tag_message = "%s version %s" % (mode.capitalize(), python_version)
    try:
        repo.git.tag(branch_tag, branch, sign_tag_opt, "-m %s" % tag_message)
    except GitCommandError:
        # Tag may already exist, if only the debian branch has changed
        pass
    upstream_tag = "upstream/" + branch_tag
    repo.git.tag(upstream_tag, branch)

    # Update changelog
    dch = git_dch("--debian-branch=%s" % debian_branch,
                  "--git-author",
                  "--ignore-regex=\".*\"",
                  "--multimaint-merge",
                  "--since=HEAD",
                  "--new-version=%s" % debian_version)
    print_green("Successfully ran '%s'" % " ".join(dch.cmd))

    if options.dist is not None:
        distribution = options.dist
    elif mode == "release":
        distribution = utils.get_distribution_codename()
    else:
        distribution = "unstable"

    f = open("debian/changelog", 'r+')
    lines = f.readlines()
    lines[0] = lines[0].replace("UNRELEASED", distribution)
    lines[2] = lines[2].replace("UNRELEASED", "%s build" % mode)
    f.seek(0)
    f.writelines(lines)
    f.close()

    if mode == "release":
        call("vim debian/changelog")

    # Add changelog to INDEX
    repo.git.add("debian/changelog")
    # Commit Changes
    repo.git.commit("-s", "debian/changelog",
                    m="Bump version to %s" % debian_version)
    # Tag debian branch
    debian_branch_tag = "debian/" + utils.version_to_tag(debian_version)
    tag_message = "%s version %s" % (mode.capitalize(), debian_version)
    if mode == "release":
        repo.git.tag(debian_branch_tag, sign_tag_opt, "-m %s" % tag_message)

    # Create debian packages
    cd(repo_dir)
    version_files = []
    for _, pkg_info in config['packages'].items():
        if pkg_info.get("version_file"):
            version_files.extend(pkg_info.as_list('version_file'))

    # Add version.py files to repo
    repo.git.add("-f", *version_files)

    # Export version info to debuilg environment
    os.environ["DEB_DEVFLOW_DEBIAN_VERSION"] = debian_version
    os.environ["DEB_DEVFLOW_VERSION"] = python_version
    build_cmd = "git-buildpackage --git-export-dir=%s"\
                " --git-upstream-branch=%s --git-debian-branch=%s"\
                " --git-export=INDEX --git-ignore-new -sa"\
                " --source-option=--auto-commit"\
                " --git-upstream-tag=%s"\
                % (build_dir, branch, debian_branch, upstream_tag)
    if options.source_only:
        build_cmd += " -S"
    if not options.sign:
        build_cmd += " -uc -us"
    elif options.keyid:
        build_cmd += " -k\"'%s'\"" % options.keyid
    call(build_cmd)

    # Remove cloned repo
    if mode != 'release' and not options.keep_repo:
        print_green("Removing cloned repo '%s'." % repo_dir)
        rm("-r", repo_dir)

    # Print final info
    info = (("Version", debian_version),
            ("Upstream branch", branch),
            ("Upstream tag", branch_tag),
            ("Debian branch", debian_branch),
            ("Debian tag", debian_branch_tag),
            ("Repository directory", repo_dir),
            ("Packages directory", build_dir))
    print_green("\n".join(["%s: %s" % (name, val) for name, val in info]))

    # Print help message
    if mode == "release":
        origin = original_repo.remote().url
        repo.create_remote("original_origin", origin)
        print_green("Created remote 'original_origin' for the repository '%s'"
                    % origin)

        print_green("To update repositories '%s' and '%s' go to '%s' and run:"
                    % (toplevel, origin, repo_dir))
        for remote in ['origin', 'original_origin']:
            objects = [debian_branch, branch_tag, debian_branch_tag]
            print_green("git push %s %s" % (remote, " ".join(objects)))
        if options.push_back:
            objects = [debian_branch, branch_tag, debian_branch_tag]
            repo.git.push("origin", *objects)
            print_green("Automatically updated origin repo.")
Example #21
0
    if verbose:
        os.system("grep 'Crystal family' '%s'" % (jobname+'.gulp.out'))
        os.system("grep 'Space group' '%s'" % (jobname+'.gulp.out'))

if __name__ == "__main__":

    # Parse cmd line args
    parser = OptionParser( usage = "usage: %prog [options] filename.cif" )
    pdf_file = os.path.splitext(os.path.basename(sys.argv[0]))[0] + '.pdf'
    parser.add_option('-v', '--verbose', action='store_true', dest = 'verbose', default = False, help = 'increase verbosity')
    parser.add_option('-f', action='store_true', dest = 'auto_fractions', default = False, help = 'try to identify fractional numbers (special positions) from coordinates with less than six decimals.')
    parser.add_option('-e', '--engine', dest = 'engine', default = 'ase', help = 'the backend to use for the conversion (gulp, ase or cctbx)')
    (options, args) = parser.parse_args()
    if len(args) != 1:
        print "No filename given. Run %s -h for help" % (parser.get_prog_name())
        sys.exit(1)
    
    filename = args[0]
    if filename[-4:] == '.xyz':
        jobname = filename[0:-4]
        #cif2vaspUsingCCTBX(jobname)
        xyz2vaspUsingGULP(jobname, verbose = options.verbose, auto_fractions = options.auto_fractions)
    elif filename[-4:] == '.cif':
        jobname = filename[0:-4]
        if options.engine == 'cctbx':
            cif2vaspUsingCCTBX(jobname)
        elif options.engine == 'gulp':
            cif2vaspUsingGULP(jobname, verbose = options.verbose, auto_fractions = options.auto_fractions)
        elif options.engine == 'ase':
            cif2vaspUsingASE(jobname)
Example #22
0
class Project(object):
    environment_class = mirbuild.environment.Environment
    test_runner_class = mirbuild.test.BoostTestRunner
    default_dependency_class = None
    nocache_commands = set('meta'.split())
    noapply_commands = set('meta clean realclean distclean'.split())

    def __init__(self, name, **opts):
        self.__configurecache = mirbuild.cache.Cache(filename='configure.json')
        self.__options = opts
        self.__tests = None
        self.__versions = []
        self.__plugins = []
        self.__install = []
        self.__packagers = {}
        self.__test_runners = {}

        try:
            self.__configurecache.load()
        except Exception:
            # if we can't load the cache, so be it
            pass

        self.__parser = OptionParser(add_help_option=False)
        self.__general_options = OptionGroup(self.__parser, "General Options")
        self.__parser.add_option_group(self.__general_options)

        self.opt = LocalOptions('general')
        self.__configurecache.register(self.opt)

        if self.has_build_configs:
            self.add_option('-c|--configuration',
                            dest='configuration',
                            type='string',
                            defaultstr=False,
                            metavar='CFG',
                            help='selected build configuration')

        self.add_bool_option('-h|--help',
                             dest='help',
                             help='show this help message and exit',
                             cache=False)

        for opt, dest, help in [
            ('-d|--debug', 'debug', 'debug build.py execution'),
            ('-q|--quiet', 'quiet', 'be quiet'),
            ('-v|--verbose', 'verbose', 'verbose compiler/packaging output'),
            ('--trace', 'trace',
             'trace build process (if supported by the builder)'),
            ('--nodeps', 'nodeps', "don't use dependencies from .mirbuildrc"),
            ('--noconfig', 'noconfig', "don't use .mirbuildrc files at all"),
            ('--noenv', 'noenv', "don't honour environment variables"),
            ('--called-by-packager', 'called_by_packager',
             "option indicating that build.py is being invoked by a mirbuild packager"
             ),
        ]:
            self.add_bool_option(opt, dest=dest, help=help, cache=False)
            for o in opt.split('|'):
                if o in sys.argv:
                    self.opt.set_value(dest, True)

        self.__env = self.environment_class(name)
        self.__env.set_options(self.opt)
        self._deps = mirbuild.dependency.Dependencies(
            self.__env, self.default_dependency_class)

        if not self.opt.noconfig:
            self.__env.read_config()
            if self.__env.has('build', 'prefix'):
                self.opt.state_merge(
                    {'prefix': self.__env.get('build', 'prefix')})

        self.opt.ensure_value('jobs',
                              self.__env.get('build', 'parallel', 'auto'))
        self.add_option('-j|--jobs',
                        dest='jobs',
                        type='string',
                        metavar='NUM',
                        cache=False,
                        help='number of parallel jobs to execute if possible')

        self.add_option('--prefix',
                        dest='prefix',
                        type='string',
                        default=self.default_install_path,
                        metavar='PATH',
                        help='install prefix for this project')
        self.add_option('--install-destdir',
                        dest='install_destdir',
                        type='string',
                        metavar='PATH',
                        help='install files to this path')

        self.add_option('-b|--build-mode',
                        dest='build_mode',
                        type='choice',
                        choices=['in', 'out'],
                        default='in',
                        metavar="MODE",
                        help='[in|out] source build mode')

        for o in [('-I|--include-path', 'include',
                   'C_INCLUDE_PATH CPLUS_INCLUDE_PATH'.split()),
                  ('-L|--library-path', 'library', 'LIBRARY_PATH'.split())]:
            var = o[1] + '_path'
            if hasattr(self, 'add_' + var):
                path = []
                if not self.opt.noenv:
                    for e in o[2]:
                        path += [
                            x for x in os.environ.get(e, '').split(
                                os.path.pathsep) if x
                        ]
                path += [
                    x for x in self.env.get('build', var, '').split(
                        os.path.pathsep) if x
                ]
                self.opt.state_merge({var: path})
                self.add_option(o[0],
                                type='string',
                                dest=var,
                                multi=True,
                                metavar='PATH',
                                help='use additional ' + o[1] + ' path')

    @property
    def _configure_cache(self):
        return self.__configurecache

    @property
    def _option_parser(self):
        return self.__parser

    @property
    def default_install_path(self):
        return '/usr/local'

    @property
    def ident(self):
        return self.__parser.get_prog_name()

    @property
    def options(self):
        return self.__options

    @property
    def env(self):
        return self.__env

    @property
    def commands(self):
        return self.methlist('run_(\w+)')

    @property
    def build_configurations(self):
        return self.methlist('configure_(\w+)')

    @property
    def tests(self):
        return self.__tests

    @property
    def packager(self):
        return self.__packagers[self.opt.packager]

    @property
    def project_name(self):
        return self.__env.project_name

    @property
    def build_config(self):
        return getattr(self.opt, 'configuration', None)

    @property
    def has_thrift_dependency(self):
        # We need to know if there's a thrift dependency as we'll need to
        # configure some additional things for Visual Studio if we do
        # [mhx] We could cache the result, but I'd rather not bother with that now...
        return self._deps.any_is_a(mirbuild.ThriftDependency)

    def prefixpath(self, path):
        if os.path.isabs(path):
            return path
        else:
            return os.path.join(self.opt.prefix, path)

    def installpath(self, path, isdir=False, mkdir=False):
        destdir = path if isdir else os.path.split(path)[0]
        if os.path.isabs(destdir):
            if self.opt.install_destdir is not None:
                destdir = os.path.join(self.opt.install_destdir,
                                       rootrelpath(destdir))
            else:
                destdir = destdir
        else:
            if self.opt.install_destdir is not None:
                destdir = os.path.join(self.opt.install_destdir,
                                       rootrelpath(self.opt.prefix), destdir)
            else:
                destdir = os.path.join(self.opt.prefix, destdir)

        if mkdir:
            try:
                os.makedirs(destdir)
            except OSError as ex:
                if ex.errno != errno.EEXIST:
                    raise

        return destdir if isdir else os.path.join(destdir,
                                                  os.path.split(path)[1])

    def __usage(self):
        usage = 'Usage: %prog [Options] <Command>'
        usage += '\n\nCommands: {0}'.format(', '.join(self.commands))
        if self.has_build_configs:
            usage += '\n\nBuild Configurations: {0}'.format(', '.join(map(lambda x: (x + ' [*]') \
                   if x == self.__default_build_config() else x, self.build_configurations)))
        return usage

    def __default_build_config(self):
        if not self.has_build_configs:
            return None
        if self.opt.configuration is not None:
            return self.opt.configuration
        try:
            return self.env.get('build', 'configuration')
        except Exception:
            pass
        if 'release' in self.build_configurations:
            return 'release'
        return self.build_configurations[0]

    def methlist(self, match):
        list = []
        run = re.compile(match)
        for method in dir(self):
            m = run.match(method)
            if m is not None and getattr(self, method) is not None:
                list.append(m.group(1))
        list.sort()
        return list

    def add_option(self, *args, **kw):
        self.opt.add_option(self.__general_options, *args, **kw)

    def add_bool_option(self, *args, **kw):
        self.opt.add_bool_option(self.__general_options, *args, **kw)

    def depends(self, *deps):
        self._deps.add(*deps)

    def test(self, *args, **kwargs):
        filt = kwargs.get('filter', lambda x: True)
        recurse = kwargs.get('recurse', True)
        runner_class = kwargs.get('runner', self.test_runner_class)
        test_builders = []
        if self.__tests is None:
            self.__tests = []

        for arg in args:
            if isinstance(arg, mirbuild.test.TestBuilder):
                test_builders.append(arg)
            elif isinstance(arg, basestring):
                dirs = []
                for e in glob.glob(arg):
                    if os.path.isdir(e):
                        if filt(
                                e
                        ) and self.test_builder_class.looks_like_test_dir(e):
                            dirs.append(e)
                        if recurse:
                            for root, ds, fs in os.walk(e):
                                for d in ds:
                                    path = os.path.join(root, d)
                                    if filt(
                                            path
                                    ) and self.test_builder_class.looks_like_test_dir(
                                            path):
                                        dirs.append(path)
                dirs.sort()
                for d in dirs:
                    test_builders.append(self.test_builder_class(self.env, d))
            else:
                test_builders.append(self.test_builder_class(self.env, *arg))

        if test_builders:
            if runner_class.name not in self.__test_runners:
                self.__test_runners[runner_class.name] = runner_class(self.env)
            runner = self.__test_runners[runner_class.name]

            for tb in test_builders:
                self.__tests.append(
                    mirbuild.test.TestWrapper(builder=tb, runner=runner))

    def package(self, *args):
        for arg in args:
            assert isinstance(arg, mirbuild.packaging.Packaging)
            assert not self.__packagers.has_key(arg.name)
            self.__packagers[arg.name] = arg

    def install(self, source, destdir, glob=True):
        i = InstallRule()
        i.source = [source] if isinstance(source, basestring) else source
        i.destdir = destdir
        i.glob = glob
        self.__install.append(i)

    def __install_files(self):
        for i in self.__install:
            destdir = self.installpath(i.destdir, isdir=True, mkdir=True)

            source = []

            for src in i.source:
                if i.glob:
                    source += glob.glob(src)
                else:
                    source.append(src)

            for src in source:
                dst = os.path.join(destdir, os.path.split(src)[1])
                self.env.vsay('installing {0} -> {1}'.format(src, dst))
                if os.path.isdir(src):
                    shutil.copytree(src, dst, symlinks=True)
                else:
                    shutil.copy2(src, dst)

    def version(self,
                file=os.path.join('src', 'version.h'),
                info=None,
                **opts):
        if isinstance(file, basestring):
            file = mirbuild.version.VersionFileFactory.create(
                self.env, file, **opts)
        if info is None:
            info = mirbuild.version.VersionInfoFactory.create()
        assert isinstance(file, mirbuild.version.VersionFile)
        assert isinstance(info, mirbuild.version.VersionInfo)
        self.__versions.append({'file': file, 'info': info})

    def add_plugin(self, *args):
        for arg in args:
            assert isinstance(arg, mirbuild.plugin.Plugin)
            self.__plugins.append(arg)

    def _run_plugins(self, meth, reverse=False):
        for plugin in reversed(self.__plugins) if reverse else self.__plugins:
            self.env.dbg("running plugin method {0}.{1}".format(
                plugin.__class__.__name__, meth))
            getattr(plugin, meth)(self)

    @property
    def has_build_configs(self):
        return len(self.build_configurations) > 0

    def __expand_command(self, raw):
        if raw in self.commands:
            return raw
        cand = [cmd for cmd in self.commands if cmd.startswith(raw)]
        if len(cand) == 1:
            return cand[0]
        raise RuntimeError('{0} command "{1}".'.format(
            'Invalid' if not cand else 'Ambiguous', raw))

    def run_has(self, what, arg):
        if what in ['command']:
            raise SystemExit(0 if arg in self.commands else 1)
        if what in ['config', 'configuration']:
            raise SystemExit(0 if arg in self.build_configurations else 1)
        raise SystemExit(2)

    def run(self):
        try:
            if self.__tests is None:
                self.test('test')

            if not self.__packagers:
                self.package(
                    *mirbuild.packaging.PackagingFactory.create_all(self.env))

            dc = mirbuild.cache.Cache('dependencies')
            self._deps.set_cache(dc)
            self.__configurecache.register(dc)

            rc = mirbuild.cache.Cache('test_runners')
            for runner in self.__test_runners.itervalues():
                try:
                    runner.set_cache(rc)
                except Exception as ex:
                    sys.stderr.write(str(ex) + '\n')
            self.__configurecache.register(rc)

            self._deps.add_options(self.__parser, nomerge=self.opt.nodeps)

            for name, runner in self.__test_runners.iteritems():
                if self.env.has_section('test:' + name):
                    runner.state_merge(self.env.get_section('test:' + name))
                runner.add_options(self.__parser)

            if self.__packagers:
                self.add_option('--packager',
                                dest='packager',
                                type='choice',
                                choices=self.__packagers.keys(),
                                defaultstr=len(self.__packagers) == 1,
                                default=self.__packagers.keys()[0]
                                if len(self.__packagers) == 1 else None,
                                metavar='PKG',
                                help='selected packager')
                for name, pkg in self.__packagers.iteritems():
                    sec = 'packaging:' + name
                    if self.env.has_section(sec):
                        pkg.state_merge(self.env.get_section(sec))
                    pkg.add_options(self.__parser)
                self.run_package = self.do_package

            self.__parser.set_usage(self.__usage())

            args = self.__parser.parse_args()[1]

            if self.has_build_configs:
                self.opt.ensure_value('configuration',
                                      self.__default_build_config())

            if self.opt.help or len(args) < 1:
                self.__parser.print_help()
                raise SystemExit(0)

            if self.has_build_configs and self.build_config not in self.build_configurations:
                raise RuntimeError('Invalid build configuration "{0}".'.format(
                    self.build_config))

            command = self.__expand_command(args[0])
            command_method = getattr(self, 'run_' + command)

            if command not in self.noapply_commands:
                self.__apply_paths()
                self._deps.apply(self)

            if command not in self.nocache_commands:
                self.__configurecache.save()

            self.env.vsay('''******************************
   Config : {0}
   Action : {1}
******************************'''.format(
                self.build_config if self.has_build_configs else '(none)',
                command))

            command_method(*args[1:])

        except RuntimeError as ex:
            if self.opt.debug:
                raise
            sys.stderr.write('*** ERROR: ' + str(ex) + '\n')
            raise SystemExit(1)

        except KeyboardInterrupt:
            if self.opt.debug:
                raise
            sys.stderr.write('*** INTERRUPTED\n')
            raise SystemExit(1)

    def run_meta(self):
        meta = {
            'project': self.project_name,
            'commands': self.commands,
            'dependencies': self._deps.meta,
        }
        if self.__packagers:
            meta['packaging'] = {}
            for name, p in self.__packagers.iteritems():
                meta['packaging'][name] = p.meta
        try:
            info = mirbuild.version.VersionInfoFactory.create()
            meta['version'] = info.upstream_version()
        except RuntimeError:
            pass
        print json.dumps(meta, indent=4)

    def run_build(self):
        self.run_configure()
        self._run_plugins('pre_build')
        self._run_plugins('build')
        self.do_build()
        self._run_plugins('post_build')

    def run_test(self):
        self.run_build()
        self._run_plugins('pre_test')
        self._run_plugins('test')
        self.do_test()
        self._run_plugins('post_test')

    def run_install(self):
        self.run_build()
        self._run_plugins('pre_install')
        self._run_plugins('install')
        self.do_install()
        self.__install_files()
        self._run_plugins('post_install')

    # TODO
    # def run_coverage(self):
    #     self.run_test()
    #     self.do_coverage()

    # this is just an alias
    def run_distclean(self):
        self.run_realclean()

    def run_realclean(self):
        for t in self.tests:
            t.clean()
        self._run_plugins('pre_realclean', reverse=True)
        self.do_realclean()
        self._run_plugins('realclean', reverse=True)
        self._run_plugins('post_realclean', reverse=True)
        self.env.remove_files(self.__configurecache.filename)
        self.env.remove_trees('build')
        for v in self.__versions:
            v['file'].clean()

    def run_clean(self):
        self._run_plugins('pre_clean', reverse=True)
        self.do_clean()
        self._run_plugins('clean', reverse=True)
        self._run_plugins('post_clean', reverse=True)
        for t in self.tests:
            t.clean()

    def __apply_paths(self):
        for opt in ['include_path', 'library_path']:
            meth = getattr(self, 'add_' + opt, None)
            if meth is not None:
                for path in getattr(self.opt, opt):
                    meth(
                        mirbuild.dependency.CLibraryDependency.validated_path(
                            path, env=self.env))

    def run_configure(self):
        for v in self.__versions:
            v['file'].generate(v['info'])
        self._run_plugins('pre_configure')
        self._run_plugins('configure')
        self.do_configure()
        self._run_plugins('post_configure')

    def do_test(self):
        for t in self.tests:
            t.configure()
            t.build()
        obs = mirbuild.test.TestObserver()
        for t in self.tests:
            t.run(obs)
        if obs.num_total > 0:
            self.env.say(obs.report())
            if obs.num_failed > 0:
                raise SystemExit(1)
        elif self.tests:
            raise RuntimeError('No test runs observed.')

    def do_package(self):
        self._run_plugins('pre_package')
        self.prepare_package()
        self._run_plugins('package')
        self.packager.package()
        self._run_plugins('post_package')

    def prepare_package(self):
        pass
Example #23
0
               'gsv': GITSave,
               'gld': GITLoad,
               'gdi': GITDiff,
               'gsm': GITSummary,
               'gcf': GITConfig}

if __name__ == '__main__':
    """
    the main function
    this tool works like busybox: all the symbolic links to the same file.
    depending on what command name is invoked, we provide corresponding services.
    """
    invoke('export LANG=en_US.UTF-8')
    #get the service requested by the user
    parser = OptionParser()
    service = parser.get_prog_name()
    if DEBUG:
        #a major service will always be a 3-character key word
        if service == 'ghelp':
            try:
                help = CALL_TABLE[sys.argv[1][:3]].__doc__
            except Exception:
                help = __doc__
            help = help.replace("`", color['quote_left'])
            help = help.replace("'", color['quote_right'])
            print(help)
        else:
            if len(sys.argv) == 2 and sys.argv[1] == '--help':
                help = CALL_TABLE[service[:3]].__doc__
                help = help.replace("`", color['quote_left'])
                help = help.replace("'", color['quote_right'])
Example #24
0
def main():
    args = sys.argv
    err = 0
    if 'id3help' in args:
        from mutagen.easyid3 import EasyID3
        for key in EasyID3.valid_keys.keys():
            print(key, )

    from optparse import OptionParser as OP

    OP = OP()
    OP.usage = ("%prog [options] filenames")
    OP.epilog = '%s id3help: for help with id3 tags' % os.path.basename(
        args[0])
    OP.add_option('-t',
                  '--tag',
                  dest='tag',
                  action='append',
                  help="set a tag",
                  metavar='tag=value')
    OP.add_option(
        '-a',
        '--add',
        dest='add',
        action='append',
        help='set/add values to a tag, without removing any existing values',
        metavar='tag=value')
    OP.add_option('-p',
                  '--pattern',
                  dest='pattern',
                  action='store',
                  help='substitution pattern from filename',
                  metavar="'%n %t.flac'")
    OP.add_option('--fn2tag',
                  dest='pattern',
                  action='store',
                  help='same as -p | --pattern')
    OP.add_option('-r',
                  '--remove',
                  dest='remove',
                  action='append',
                  help='remove a tag value or entire tag',
                  metavar="'tag' or 'tag=value'")
    OP.add_option('-j',
                  '--justify',
                  dest='justify',
                  action='store_true',
                  help='zero-justify tracknumbers')
    OP.add_option('--clear',
                  dest='clear',
                  action='store_true',
                  help='clear all tags')
    OP.add_option('-n',
                  '--noact',
                  dest='noact',
                  action='store_true',
                  help="just show what changes would be made")
    OP.add_option('-c',
                  '--confirm',
                  dest='confirm',
                  action='store_true',
                  help='show changes and prompt for confirmation to save')
    OP.add_option('-f',
                  '--files',
                  dest='filenames',
                  action='append',
                  help='one or more filenames/globs')
    OP.add_option('-q',
                  '--quiet',
                  dest='quiet',
                  action='store_true',
                  help='no output to stdout')
    OP.add_option('--tag2fn',
                  dest='tag2fn',
                  action='store',
                  help='substitution pattern from tags',
                  metavar="'%n %t.flac'")
    OP.add_option(
        '-s',
        '--filter',
        dest='symbols',
        action='store',
        help=
        'one or more characters to filter from tags used to build filenames',
        metavar="'!@$&*/\?'")
    OP.add_option(
        '-m',
        '--map',
        dest='map',
        action='store',
        help=
        'replace all instances of a char with another char\nin conjunction with --tag2fn',
        metavar="/ -")
    OP.add_option('-i',
                  '--index',
                  dest='idx',
                  action='store_true',
                  help='index files by filename order (persistent file order)')
    OP.add_option('-v',
                  '--version',
                  dest='vers',
                  action='store_true',
                  help='show version')

    argstr = ' '.join(args)

    if len(args) < 2:
        OP.print_usage()
        #        print("version %s" % __version__)
        print('-h|--help for help')
        sys.exit(1)

    p = '(-t|--tag|-a|--add|-p|--pattern|-r|--remove|-f|--files)\ +?\-[^\ ]*'
    mo = re.search(p, argstr)
    if mo:
        print('illegal option combination: ', mo.group())
        sys.exit(1)

    (opt, fnames) = OP.parse_args()
    if opt.vers:
        print('%s %s' % (OP.get_prog_name(), __version__))
    if opt.filenames:
        fnames += opt.filenames

    for fname in fnames:
        if not os.path.exists(fname):
            print('%s: no such file' % fname)
            err += 1
    if err:
        sys.exit(err)

    cfmr = Confirmer(opt)
    fnum = 0
    idx = 0
    if opt.pattern:
        subster = Subster(opt.pattern)
    elif opt.tag2fn:
        subster = Subster(opt.tag2fn, 'tag2fn')
    else:
        subster = Subster('', '')

    modded = any(
        [opt.clear, opt.remove, opt.add, opt.tag, opt.pattern, opt.justify])
    spkr = Speaker(opt.quiet)
    top_length = 0
    for fname in fnames:
        bfname = os.path.basename(fname)
        top_length = len(bfname) if len(bfname) > top_length else top_length

    for fname in fnames:
        fnum += 1
        vals = {}
        keys = []
        origfn = fname

        if os.path.splitext(fname)[1] == '.mp3':
            try:
                mf = MP3(fname)
            except IOError:
                spkr.speak("\ncan't open %s" % fname)
                continue
            spkr.speak("processing %s" % fname)
            if opt.clear:
                mf.clear()
            for action in opt.remove or []:
                k, v = (action.split('=', 1) + [''])[:2]
                vals[k] = mf.pop(k, [])
                if k and not v:
                    vals[k] = []

                elif v and v in vals[k]:
                    vals[k].remove(v)
            for action in opt.tag or []:
                k, v = (action.split('=', 1) + [''])[:2]
                vals[k] = [v]
            for action in opt.add or []:
                k, v = (action.split('=', 1) + [''])[:2]
                if vals.get(k, []):
                    vals[k] += mf.pop(k, [])
                else:
                    vals[k] = mf.pop(k, [])
                vals[k].extend([v])
            if subster.pattern:
                d = subster.getdict(fname)
                for k in d:
                    values = d.get(k, [])
                    if not isinstance(values, list):
                        values = [values]
                    try:
                        vals[k].extend(values)
                    except KeyError:
                        vals[k] = values
            if opt.justify:
                if not vals.get('tracknumber'):
                    vals['tracknumber'] = fnum
                width = len(str(len(fnames)))
                n = width - len(str(vals['tracknumber']))
                vals['tracknumber'] = [n * '0' + str(vals['tracknumber'])]

            if not modded:
                if not opt.quiet:
                    print(mf.pprint())
                    continue

            if opt.noact or opt.confirm:
                for k in vals:
                    print(k + '=' + str(vals[k]))
            if opt.noact:
                continue
            if opt.confirm and not cfmr.confirm():
                continue
            for k in vals:
                try:
                    mf.update({k: vals[k]})
#                mf.save( )
                except ValueError:
                    pass
            mf.save()
        else:
            try:
                #            print(fname)
                mf = File(fname)
            except IOError:
                spkr.speak("can't open %s" % fname)
                continue
            spkr.speak(os.path.basename(fname))

            if opt.idx:
                trn = mf.get('tracknumber', None)
                mf['idx'] = unicode(fnum)
                if trn:
                    mf['idx'] += trn
                mf.save()
                print(' indexed')

            if opt.clear:
                mf.clear()
                spkr.speak('\n\ttags cleared..')
            for action in opt.remove or []:
                k, v = (action.split('=', 1) + [''])[:2]
                t = mf.pop(k, [])
                if v and v in t:
                    t.remove(v)
                    spkr.speak(str(k) + ' removes ' + str(v))
                if v and t:
                    mf.update({k: t})
            for action in opt.tag or []:
                if '=' in action:
                    k, v = action.split('=', 1)
                    if k and v:
                        mf.update({k: [v]})
                        spkr.speak('\t\ttag set: ' + k + '=' + v)
            for action in opt.add or []:
                if '=' in action:
                    k, v = action.split('=', 1)
                    mf.update({k: mf.get(k, []) + [v]})
                    spkr.speak('\n\ttag appended: ' + k + '=' + v)
            if subster.mode == 'fn2tag':
                d = subster.getdict(fname)
                for k in d:
                    mf.update({k: d[k]})
                    spkr.speak('\n\tfrom filename: ' + k + '=' + d[k])

            if subster.mode == 'tag2fn':
                fname = ''
                fnlist = subster.getfnlist()
                if 'tracknumber' in fnlist:
                    tn = 1
                else:
                    tn = 0
                lit = True
                for item in fnlist:
                    lit = not lit
                    if lit:
                        if not tn and item == 'tracknumber':
                            item = 'track'
                        if tn and item == 'track':
                            item = 'tracknumber'
                        if item.startswith('track') and opt.justify:
                            subst = mf[item][0].rjust(2, '0')
                        else:
                            subst = mf[item][0]

                        if opt.symbols:
                            pat = '[' + opt.symbols + ']'
                            subst = re.sub(pat, '', subst)
                            subst = subst.strip()

                        fname += subst
                    else:
                        fname += item

                    if '/' in fname:
                        fname = re.sub('/', '-', fname)

#            if opt.map:
#                fname = map(fname,opt.map)

                if opt.noact or opt.confirm:
                    pass

            if not any([modded, opt.tag2fn, opt.quiet]):
                print(mf.pprint(), )

            if cfmr.confirm():
                if opt.tag2fn:
                    if opt.map:
                        a, b = opt.map.split()
                        fname = re.sub(a, b, fname)

                    pth = os.path.join(os.path.dirname(origfn), fname)
                    second_column = top_length + 2
                    tab = (second_column - len(os.path.basename(origfn))) * ' '
                    try:
                        os.rename(origfn, pth)
                        print(tab + '--> ' + fname),
#                    spkr.speak( 'renamed...   ' + fname )
                    except IOError:
                        raise IOError
                else:
                    mf.save()
                    spkr.speak('\tsaved!')
Example #25
0
            source = node_from + ':' + filename_from
            destination = filename_to


#------------------------------------------------------
#Command line options
#------------------------------------------------------
parser = OptionParser()
parser.add_option("-l",
                  "--locus",
                  type="str",
                  default='',
                  help="Locus node from which you want to copy data.")
(options, args) = parser.parse_args()

if not parser.get_prog_name() == "listfratsfiles.py":
    #   Program was run from within python
    locus = ""
else:
    locus = options.locus

node_local = socket.gethostname()

if locus == '':
    locus = node_local

if locus != node_local and node_local != 'locus013':
    print '**Warning**: Using ' + node_local + ' as locus instead of ' + locus + ', since node_local is not locus013.'
    locus = node_local

#------------------------------------------------------
Example #26
0
def main():
    from optparse import OptionParser

    cli = OptionParser(
        usage="%prog [options] <base-file>",
        description=
        "Generate OpenVZ container configuration files based on an existing file"
    )
    cli.add_option("-m",
                   "--multiply",
                   dest="multiply",
                   type="float",
                   metavar="FACTOR",
                   help="multiply by given factor")
    cli.add_option(
        "-a",
        "--add",
        dest="add",
        type="string",
        action="append",
        metavar="FILE",
        help=
        "add (as in sum) given file, you can add as many files as you need by specifying this option multiple times"
    )
    cli.add_option(
        "-s",
        "--substract",
        dest="substract",
        type="string",
        action="append",
        metavar="FILE",
        help=
        "substract given file, you can add as many files as you need by specifying this option multiple times"
    )
    cli.add_option("-d",
                   "--debug",
                   dest="debug",
                   action="store_true",
                   help="do not catch python exceptions, useful for debugging")
    (options, args) = cli.parse_args()

    if not len(args):
        cli.error("No base file provided")

    try:
        # Require Python >= 2.4
        import sys
        if sys.version_info[0] < 2 or sys.version_info[1] < 4:
            cli.error("Python 2.4.0 or higher is required")

        c = CTConfig(args[0])

        # Multiply
        if options.multiply:
            if options.multiply <= 0:
                cli.error("Invalid multiplication factor %s" %
                          str(options.multiply))

            c.multiply(options.multiply)

        # Add
        if options.add is not None:
            for f in options.add:
                c.add(CTConfig(f))

        # Substract
        if options.substract is not None:
            for f in options.substract:
                c.substract(CTConfig(f))

        # Output results
        print c
    except Exception, e:
        if options.debug:
            raise
        else:
            cli.print_usage()
            cli.exit(2, "%s: %s\n" % (cli.get_prog_name(), e))
Example #27
0
	if (len(filenames) < 1):
		print "input files not found."
		sys.exit(1)
		
	for f in filenames:
		graphTitle = options.title
		if (graphTitle == None):
			graphTitle = f.replace(".finalBests.csv","").replace(".finalCheckedBests.csv","")
		plotFromFile(f, graphTitle, options)

	

if __name__ == '__main__':
	parser = OptionParser()
	parser.set_usage(parser.get_prog_name() + " file1.finalBests.csv ...")
	parser.set_description("""Takes xxx.finalBests.csv or xxx.finalCheckedBests.csv files and 
for each file creates a corresponding xxx.finalBests.{png,pdf,eps,svg} boxplot of the parameter settings
(Note filename wildcards are allowed for input files.)""")
	parser.add_option("-o", action="store", type="choice", choices=["png","pdf","eps","svg"], dest="filetype", help="type of output file to be created")
	parser.add_option("--dpi", action="store", type="int", dest="dpi", help="DPI to use when exporting graphics (applicable to raster formats, like PNG)")
	parser.add_option("--title", action="store", dest="title", help="title for all graphs created")
	parser.add_option("-s", "--show", action="store_true", dest="show", help="display the created plots")
	
	options , args = parser.parse_args()
	main(options, args)




Example #28
0
    unittest       run unittests

Help on these subcommands can be retrieved by invoking the subcommand with the
-h option (e.g. %prog call -h).'''
    )
    op.add_option(
        '-v', action = 'count', dest = 'verbosity', default = 0,
        help = '''increase logging verbosity; this option can be used multiple
times to increase verbosity even more (default: %default)'''
    )
    op.allow_interspersed_args = False

    # Parse arguments, processing global options
    globalOpts, globalArgs = op.parse_args()

    PROG_NAME = op.get_prog_name()

    if len(globalArgs) < 1:
        op.print_usage(sys.stderr)
        sys.exit(1)

    if not globalArgs[0] in CMD_TAB:
        sys.stderr.write(
            '%s: no such command \'%s\'\n' % (PROG_NAME, globalArgs[0])
        )
        op.print_usage(sys.stderr)
        sys.exit(1)

    # Set up logging
    logHandler = logging.StreamHandler(sys.stderr)
    logHandler.setFormatter(logging.Formatter('%(message)s'))
Example #29
0
def main():
    from optparse import OptionParser, OptionGroup

    descr  = ("A simple tool for archiving fanfiction for offline reading " +
    "and converting said archives into ready-to-read eBooks for pocket " +
    "reading devices.")

    epilog = ("As an alternative to explicitly specifying a personality, " +
    "this command will alter its behaviour if called by the following names:" +
    " " + ', '.join(sorted(Personality.personalities)))

    parser = OptionParser(version="%%prog v%s" % __version__,
        usage="%prog [options] <url> ...", description=descr, epilog=epilog)
    parser.add_option('-b', '--bundle', action="store_true", dest="bundle",
        default=False, help="Also bundle the entire story into a single file" +
                            "with chapter headings and a table of contents.")
    parser.add_option('-t', '--target', action="store", dest="target", metavar="DIR",
        default=os.getcwd(), help="Specify a target directory other than the current working directory.")
    parser.add_option('--list_supported', action="store_true", dest="list_supported",
        default=False, help="List installed scrapers and personalities.")
    parser.add_option('-P', '--personality', action="store", dest="persona", metavar="NAME",
        default=None, help="Set the personality the conversion will operate under. See --list_supported.")

    #pre_group = OptionGroup(parser, "Pre-Processing Options")
    #pre_group.add_option('--strip-accents', action="store_true", dest="strip_accents",
    #    default=False, help="Remove diacritics for compatibility with readers with " +
    #    "limited fonts and no internal fallback mechanism. (eg. Sony PRS-505)")

    pp_group = OptionGroup(parser, "Post-Processing Options")
    pp_group.add_option('-p', '--postproc', action="append", dest="postproc", metavar="CMD",
        default=[], help="Call the specified post-processor after each retrieval " +
                         "completes. Can be used multiple times. Implies --bundle.")
    pp_group.add_option('-e', '--final_ext', action="store", dest="final_ext", metavar="EXT",
        default='.out', help="Set the extension to be used in the output filename " +
                           "available to post-processor templates.")
    parser.add_option_group(pp_group)

    opts, args = parser.parse_args()
    cmd = parser.get_prog_name()

    if opts.list_supported:
        names = sorted(Scraper.scrapers[x].site_name for x in Scraper.scrapers)
        print "Scrapers:\n\t" + '\n\t'.join(names)
        print
        print "Personalities:\n\t" + '\n\t'.join(sorted(Personality.personalities))
        parser.exit()

    if not args:
        parser.print_help()
        parser.exit()

    persona = Personality.get(opts.persona or cmd)()
    for option in persona.opts:
        setattr(opts, option, persona.opts[option])

    if opts.postproc:
        opts.bundle = True

    for url_arg in args:
        scraper = Scraper.get(url_arg)(opts.target, opts.bundle, opts.final_ext)
        try:
            downloaded_story = scraper.download_fic(url_arg)
        except Exception, err:
            print "Failed to retrieve story %s" % url_arg
            print "TODO: Handle this properly"
            continue

        persona.postproc(downloaded_story)

        if opts.postproc:
            inputs = {
                'appname'   : "%s v%s" % (__appname__, __version__),
                'author'    : downloaded_story.author,
                'bundle'    : downloaded_story.path,
                'category'  : downloaded_story.category,
                'coverfile' : downloaded_story.cover,
                'outfile'   : downloaded_story.final_path,
                'site_name' : downloaded_story.site_name,
                'title'     : downloaded_story.title
            }

            for pp_cmdline in opts.postproc:
                cmdlist = pp_cmdline.strip().split()
                print "Calling post-processor: %s" % cmdlist[0]
                subprocess.call([r % inputs for r in cmdlist])
Example #30
0
   for filename in args:
       ape2id3.copy_replaygain_tags(filename)

if __name__ == "__main__":
   parser = OptionParser(version="0.1", usage="%prog [OPTION]... FILE...",
                         description="Copy APEv2 ReplayGain tags on "
                                     "FILE(s) to ID3v2.")
   parser.add_option("-q", "--quiet", dest="log_level",
                     action="store_const", const=0, default=1,
                     help="do not output error messages")
   parser.add_option("-v", "--verbose", dest="log_level",
                     action="store_const", const=3,
                     help="output warnings and informational messages")
   parser.add_option("-d", "--debug", dest="log_level",
                     action="store_const", const=4,
                     help="output debug messages")
   parser.add_option("-f", "--force", dest="force",
                     action="store_true", default=False,
                     help="force overwriting of existing ID3v2 "
                          "ReplayGain tags")
   prog_name = parser.get_prog_name()
   options, args = parser.parse_args()
   if len(args) < 1:
       parser.error("no files specified")
   try:
       main(prog_name, options, args)
   except KeyboardInterrupt:
       pass

# vim: set expandtab shiftwidth=4 softtabstop=4 textwidth=79:
Example #31
0
def main(args):
    parser = OptionParser(
        prog="vdautomount",
        usage="%prog [options] machine-name-or-uuid mountpoint",
        version="%prog " + globals()["VERSION"])
    parser.add_option("-p",
                      help="path to vdfuse",
                      type="string",
                      default=globals()["VDFUSE_COMMAND"],
                      metavar="vdfuse")
    parser.add_option("-r", help="readonly", action="store_true")
    parser.add_option("-g", help="run in foreground", action="store_true")
    parser.add_option("-v", help="verbose", action="store_true")
    parser.add_option("-d", help="debug", action="store_true")
    parser.add_option("-a",
                      help="allow all users to read disk",
                      action="store_true")
    parser.add_option("-w",
                      help="allow all users to read and write to disk",
                      action="store_true")
    parser.add_option(
        "-m",
        help=
        "specify which disk to mount, required if machine has more than one disk",
        type="int",
        default=-1,
        metavar="NUMBER")

    options, args = parser.parse_args(args=args)

    if len(args) != 2:
        parser.error("invalid machine specifier or mountpoint")

    spec = args[0]
    mountpoint = args[1]
    vbm = VirtualBoxManager(None, None)

    if not (os.access(mountpoint, os.R_OK | os.W_OK)
            and os.path.isdir(mountpoint)):
        parser.error("mountpoint cannot be accessed or is not a directory")

    try:
        machine = vbm.vbox.getMachine(spec)
    except:
        try:
            machine = vbm.vbox.findMachine(spec)
        except:
            parser.error("couldn't find machine \"%s\"" % spec)

    mediums = [x.medium for x in machine.getMediumAttachments() if x.type == 3]

    if len(mediums) == 1:
        medium = mediums[0]
    elif options.m != -1:
        medium = mediums[options.m - 1]
    else:
        ss = sys.stdout
        sys.stdout = sys.stderr
        print "Multiple disks on machine:"
        for index, medium in enumerate(mediums):
            print "%d:\tbase:\t%s" % (index + 1, medium.base.location)
            if medium.id != medium.base.id:
                print "\tsnap:\t%s" % medium.location
        sys.stdout = ss
        parser.exit(
            2, "%s: specify the disk number with the -m option\n" %
            parser.get_prog_name())

    paths = []
    while True:
        paths.append(medium.location)
        if medium.parent:
            medium = medium.parent
        else:
            break

    paths.reverse()
    base = paths[0]
    diffs = paths[1:]

    if len(diffs) > 100:
        parser.error("too many levels of snapshots")

    args = [options.p]
    for option, value in options.__dict__.iteritems():
        if option in ("p", "m"):
            continue
        if value:
            args.append("-" + option)

    args.append("-f")
    args.append(base.encode("UTF-8"))

    for x in diffs:
        args.append("-s")
        args.append(x.encode("UTF-8"))

    args.append(mountpoint)

    try:
        os.execvp(options.p, args)
    except OSError as e:
        parser.error("error running vdfuse. wrong path (-p) ?")
def main():
    """
    Main
    """

    # 引数のパース
    usage = "Usage: %prog [option ...]"
    version = "%%prog %s\nCopyright (C) Yuichiro SAITO." % (PROGRAM_VERSION)
    parser = OptionParser(usage=usage, version=version)
    parser.add_option(
        "-w",
        "--warning",
        type="int",
        dest="warning",
        metavar="<pages>",
        default=100,
        help=
        "Exit with WARNING status if more than major page faults per sec. Default value is 100."
    )
    parser.add_option(
        "-c",
        "--critical",
        type="int",
        dest="critical",
        metavar="<pages>",
        default=1000,
        help=
        "Exit with CRITICAL status if more than major page faults per sec. Default value is 1000."
    )
    parser.add_option("-i",
                      "--interval",
                      type="int",
                      dest="interval",
                      metavar="<seconds>",
                      default=2,
                      help="Check interval. Default value is 2 (sec).")
    parser.add_option("-V",
                      "--verbose",
                      action="store_true",
                      dest="verbose",
                      default=False,
                      help="Verbose mode. (For debug only)")
    (options, args) = parser.parse_args()
    prog_name = parser.get_prog_name()

    if options.verbose:
        logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
    else:
        logging.basicConfig(level=logging.WARNING, format=LOG_FORMAT)

    logging.debug("START")

    # 評価を実施
    pf = _PageFault(options.interval)
    ret = pf.setWarning(options.warning)
    if ret != _PageFault.STATE_OK:
        logging.debug("EXIT")
        return ret
    ret = pf.setCritical(options.critical)
    if ret != _PageFault.STATE_OK:
        logging.debug("EXIT")
        return ret
    ret = pf.checkMajorPageFaluts()
    if ret != _PageFault.STATE_OK:
        logging.debug("EXIT")
        return ret

    logging.debug("END")
Example #33
0
def main (args):
	parser = OptionParser (prog="vdautomount",
		usage="%prog [options] machine-name-or-uuid mountpoint", version="%prog " + globals()["VERSION"])
	parser.add_option ("-p", help="path to vdfuse",
		type="string", default=globals()["VDFUSE_COMMAND"], metavar="vdfuse")
	parser.add_option ("-r", help="readonly", action="store_true")
	parser.add_option ("-g", help="run in foreground", action="store_true")
	parser.add_option ("-v", help="verbose", action="store_true")
	parser.add_option ("-d", help="debug", action="store_true")
	parser.add_option ("-a", help="allow all users to read disk", action="store_true")
	parser.add_option ("-w", help="allow all users to read and write to disk", action="store_true")
	parser.add_option ("-m", help="specify which disk to mount, required if machine has more than one disk",
		type="int", default=-1, metavar="NUMBER")
	
	options, args = parser.parse_args (args=args)
	
	if len (args) != 2:
		parser.error ("invalid machine specifier or mountpoint")
	
	spec = args[0]
	mountpoint = args[1]
	vbm = VirtualBoxManager (None, None)
	
	if not (os.access (mountpoint, os.R_OK | os.W_OK) and os.path.isdir (mountpoint)):
		parser.error ("mountpoint cannot be accessed or is not a directory")
	
	try:
		machine = vbm.vbox.getMachine (spec)
	except:
		try:
			machine = vbm.vbox.findMachine (spec)
		except:
			parser.error ("couldn't find machine \"%s\"" % spec)
	
	mediums = [x.medium for x in machine.getMediumAttachments () if x.type == 3]
	
	if len (mediums) == 1:
		medium = mediums[0]
	elif options.m != -1:
		medium = mediums[options.m - 1]
	else:
		ss = sys.stdout
		sys.stdout = sys.stderr
		print "Multiple disks on machine:"
		for index, medium in enumerate (mediums):
			print "%d:\tbase:\t%s" % (index + 1, medium.base.location)
			if medium.id != medium.base.id:
				print "\tsnap:\t%s" % medium.location
		sys.stdout = ss
		parser.exit (2, "%s: specify the disk number with the -m option\n" % parser.get_prog_name ())
	
	paths = []
	while True:
		paths.append (medium.location)
		if medium.parent:
			medium = medium.parent
		else:
			break
	
	paths.reverse ()
	base = paths[0]
	diffs = paths[1:]
	
	if len (diffs) > 100:
		parser.error ("too many levels of snapshots")
	
	args = [options.p]
	for option, value in options.__dict__.iteritems ():
		if option in ("p", "m"):
			continue
		if value:
			args.append ("-" + option)
	
	args.append ("-f")
	args.append (base.encode ("UTF-8"))
	
	for x in diffs:
		args.append ("-s")
		args.append (x.encode ("UTF-8"))
	
	args.append (mountpoint)
	
	try:
		os.execvp (options.p, args)
	except OSError as e:
		parser.error ("error running vdfuse. wrong path (-p) ?")
Example #34
0
    if (len(filenames) < 1):
        print "input files not found."
        sys.exit(1)

    for f in filenames:
        graphTitle = options.title
        if (graphTitle == None):
            graphTitle = f.replace(".finalBests.csv",
                                   "").replace(".finalCheckedBests.csv", "")
        plotFromFile(f, graphTitle, options)


if __name__ == '__main__':
    parser = OptionParser()
    parser.set_usage(parser.get_prog_name() + " file1.finalBests.csv ...")
    parser.set_description(
        """Takes xxx.finalBests.csv or xxx.finalCheckedBests.csv files and 
for each file creates a corresponding xxx.finalBests.{png,pdf,eps,svg} boxplot of the parameter settings
(Note filename wildcards are allowed for input files.)""")
    parser.add_option("-o",
                      action="store",
                      type="choice",
                      choices=["png", "pdf", "eps", "svg"],
                      dest="filetype",
                      help="type of output file to be created")
    parser.add_option(
        "--dpi",
        action="store",
        type="int",
        dest="dpi",
Example #35
0
    if len(args) < 1:
        parser.error("no URI specified")
    uri = args.pop(0)

    if action in ("get", "set", "delete", "find"):
        if len(args) < 1:
            parser.error("no name specified")
        name = args.pop(0)
    else:
        name = None

    if action == "set":
        if len(args) < 1:
            parser.error("no value specified")
        value = args.pop(0)
    else:
        value = None

    try:
        main(action, uri, name, value)
    except SocketError as e:
        print("%s: error with connection to MPD: %s" % \
                (parser.get_prog_name(), e[1]), file=stderr)
    except MPDError as e:
        print("%s: error executing action: %s" % \
                (parser.get_prog_name(), e), file=stderr)


# vim: set expandtab shiftwidth=4 softtabstop=4 textwidth=79:
Example #36
0
    'gld': GITLoad,
    'gdi': GITDiff,
    'gsm': GITSummary,
    'gcf': GITConfig
}

if __name__ == '__main__':
    """
    the main function
    this tool works like busybox: all the symbolic links to the same file.
    depending on what command name is invoked, we provide corresponding services.
    """
    invoke('export LANG=en_US.UTF-8')
    #get the service requested by the user
    parser = OptionParser()
    service = parser.get_prog_name()
    if DEBUG:
        #a major service will always be a 3-character key word
        if service == 'ghelp':
            try:
                help = CALL_TABLE[sys.argv[1][:3]].__doc__
            except Exception:
                help = __doc__
            help = help.replace("`", color['quote_left'])
            help = help.replace("'", color['quote_right'])
            print(help)
        else:
            if len(sys.argv) == 2 and sys.argv[1] == '--help':
                help = CALL_TABLE[service[:3]].__doc__
                help = help.replace("`", color['quote_left'])
                help = help.replace("'", color['quote_right'])
    if len(args) < 1:
        parser.error("no URI specified")
    uri = args.pop(0)

    if action in ("get", "set", "delete", "find"):
        if len(args) < 1:
            parser.error("no name specified")
        name = args.pop(0)
    else:
        name = None

    if action == "set":
        if len(args) < 1:
            parser.error("no value specified")
        value = args.pop(0)
    else:
        value = None

    try:
        main(action, uri, name, value)
    except SocketError as e:
        print >> stderr, "%s: error with connection to MPD: %s" % \
                         (parser.get_prog_name(), e[1])
    except MPDError as e:
        print >> stderr, "%s: error executing action: %s" % \
                         (parser.get_prog_name(), e)


# vim: set expandtab shiftwidth=4 softtabstop=4 textwidth=79:
Example #38
0
             default=None,
             help="The email address in the ReplyTo: and From: fields of the email you want to send out with this image.")
p.add_option('--email-body', '--body',
             dest='email_body',
             #default ='*****@*****.**',
             default=None,
             help="Email body text to send instead of the image EXIF comments.")
p.add_option('--email-server', '--email_server', '--email-url', '--email_url', '--email-smtp', '--email_smtp',
             dest='email_server',
             default='smtp.gmail.com',
             help="The smtp email server URL (e.g. smtp.gmail.com or localhost)")

(o, a) = p.parse_args()

if o.email_pw:
    warn("Make sure you invoked "+p.get_prog_name()+" in such a way that history won't record this command (with a plaintext password) in the history file. It would be much  better if you didn't supply the password on the command line and instead allowed this script to securely prompt you for it later.")  # ,UserWarning) #,RuntimeWarning)

if not o.email_from:
    o.email_from = o.email_user

lat_label = 'Exif.GPSInfo.GPSLatitude'
lon_label = 'Exif.GPSInfo.GPSLongitude'
ref_suffix = 'Ref'

#exiv2 -M"set Exif.GPSInfo.GPSLatitude 4/1 15/1 33/1" \
#-M"set Exif.GPSInfo.GPSLatitudeRef N" image.jpg
#Sets the latitude to 4 degrees, 15 minutes and 33 seconds north. The Exif
#standard  stipulates  that the GPSLatitude tag consists of three Rational
#numbers for the degrees, minutes and seconds of the latitude and GPSLati
#tudeRef  contains  either  'N' or 'S' for north or south latitude respec
#tively.
    # download coinc.xml
    coinc_file = gracedb.files(graceid, "coinc.xml")

    # download psd.xml.gz
    psd_file = gracedb.files(graceid, "psd.xml.gz")

    # perform sky localization
    log.info("starting sky localization")
    sky_map, epoch, elapsed_time, instruments = gracedb_sky_map(
        coinc_file, psd_file, "TaylorF2threePointFivePN", 10)
    log.info("sky localization complete")

    # upload FITS file
    fitsdir = tempfile.mkdtemp()
    try:
        fitspath = os.path.join(fitsdir, "skymap.fits.gz")
        fits.write_sky_map(fitspath, sky_map, gps_time=float(epoch),
            creator=parser.get_prog_name(), objid=str(graceid),
            url='https://gracedb.ligo.org/events/{0}'.format(graceid),
            runtime=elapsed_time, instruments=instruments,
            origin='LIGO/Virgo', nest=True)
        gracedb.writeLog(graceid, "INFO:BAYESTAR:uploaded sky map",
            filename=fitspath, tagname="sky_loc")
    finally:
        shutil.rmtree(fitsdir)
except:
    # Produce log message for any otherwise uncaught exception
    log.exception("sky localization failed")
    # Then re-raise the exception
    raise
Example #40
0
def main(args):
    p = OptionParser(prog='gpaw install-data',
                     usage=usage,
                     description=description)
    add = p.add_option
    add('--version', metavar='VERSION',
        help='download VERSION of package.  '
        'Run without arguments to display a list of versions.  '
        'VERSION can be the full URL or a part such as  '
        '\'0.8\' or \'0.6.6300\'')
    add('--tarball', metavar='FILE',
        help='unpack and install from local tarball FILE '
        'instead of downloading')
    add('--list-all', action='store_true',
        help='list packages from all sources')
    g = OptionGroup(p, 'Sources')
    for name, help in sources:
        g.add_option('--%s' % name, action='store_const',
                     const=name, dest='source',
                     help=help)
    p.add_option_group(g)
    add('--register', action='store_true',
        help='run non-interactively and register install path in '
        'GPAW setup search paths.  This is done by adding lines to '
        '~/.gpaw/rc.py')
    add('--no-register', action='store_true',
        help='run non-interactively and do not register install path in '
        'GPAW setup search paths')
    opts, args = p.parse_args(args)
    nargs = len(args)

    if opts.source is None:
        opts.source = sources[0][0]

    if opts.register and opts.no_register:
        p.error('Conflicting options specified on whether to register '
                'setup install paths in configuration file.  Try not '
                'specifying some options.')

    # The sg15 file is a tarbomb.  We will later defuse it by untarring
    # into a subdirectory, so we don't leave a ghastly mess on the
    # unsuspecting user's system.

    if not opts.tarball:
        if opts.list_all:
            urls = []
            for source in names:
                urls1 = get_urls(source)
                urls.extend(urls1)
        else:
            urls = get_urls(opts.source)

        def print_urls(urls, marked=None):
            for url in urls:
                pageurl, fname = url.rsplit('/', 1)
                if url == marked:
                    marking = ' [*]'
                else:
                    marking = '    '
                print(' %s %s' % (marking, url))

        if len(urls) == 0:
            url = baseurls[opts.source]
            p.error(notfound_msg.format(url=url))

        if opts.version:
            matching_urls = [url for url in urls if opts.version in url]
            if len(matching_urls) > 1:
                p.error('More than one setup file matches version "%s":\n'
                        '%s' % (opts.version, '\n'.join(matching_urls)))
            elif len(matching_urls) == 0:
                p.error('\nNo setup matched the specified version "%s".\n'
                        'Available setups are:\n'
                        '%s' % (opts.version, '\n'.join(urls)))
            assert len(matching_urls) == 1
            url = matching_urls[0]
        else:
            url = urls[0]

        print('Available setups and pseudopotentials')
        print_urls(urls, url)
        print()

    if nargs == 0:
        print_setups_info(p)
        print()
        progname = p.get_prog_name()
        print('Run %s DIR to install newest setups into DIR.' % progname)
        print('Run %s DIR --version=VERSION to install VERSION (from above).'
              % progname)
        print('See %s --help for more info.' % progname)
        raise SystemExit
    elif len(args) != 1:
        p.error('No more than one DIR expected.  Please try --help.')

    targetpath = args[0]

    if opts.tarball:
        print('Reading local tarball %s' % opts.tarball)
        targzfile = tarfile.open(opts.tarball)
        tarfname = opts.tarball
    else:
        tarfname = url.rsplit('/', 1)[1]
        print('Selected %s.  Downloading...' % tarfname)
        response = urlopen(url)
        targzfile = tarfile.open(fileobj=BytesIO(response.read()))

    if not os.path.exists(targetpath):
        os.makedirs(targetpath)

    assert tarfname.endswith('.tar.gz')
    setup_dirname = tarfname.rsplit('.', 2)[0]  # remove .tar.gz ending
    setup_path = os.path.abspath(os.path.join(targetpath, setup_dirname))
    if tarfname.startswith('sg15'):
        # Defuse tarbomb
        if not os.path.isdir(setup_path):
            os.mkdir(setup_path)
        targetpath = os.path.join(targetpath, setup_dirname)

    print('Extracting tarball into %s' % targetpath)
    targzfile.extractall(targetpath)
    assert os.path.isdir(setup_path)
    print('Setups installed into %s.' % setup_path)

    # Okay, now we have to maybe edit people's rc files.
    rcfiledir = os.path.join(os.environ['HOME'], '.gpaw')
    rcfilepath = os.path.join(rcfiledir, 'rc.py')

    # We could do all this by importing the rcfile as well and checking
    # whether things are okay or not.
    rcline = "setup_paths.insert(0, '%s')" % setup_path

    # Run interactive mode unless someone specified a flag requiring otherwise
    interactive_mode = not (opts.register or opts.no_register)

    register_path = False

    if interactive_mode:
        answer = input('Register this setup path in %s? [y/n] ' % rcfilepath)
        if answer.lower() in ['y', 'yes']:
            register_path = True
        elif answer.lower() in ['n', 'no']:
            print('As you wish.')
        else:
            print('What do you mean by "%s"?  Assuming "n".' % answer)
    else:
        if opts.register:
            assert not opts.no_register
            register_path = True
        else:
            assert opts.no_register

    if register_path:
        # First we create the file
        if not os.path.exists(rcfiledir):
            os.makedirs(rcfiledir)
        if not os.path.exists(rcfilepath):
            tmpfd = open(rcfilepath, 'w')  # Just create empty file
            tmpfd.close()

        for line in open(rcfilepath):
            if line.startswith(rcline):
                print('It looks like the path is already registered in %s.'
                      % rcfilepath)
                print('File will not be modified at this time.')
                break
        else:
            rcfd = open(rcfilepath, 'a')
            print(rcline, file=rcfd)
            print('Setup path registered in %s.' % rcfilepath)
            # Need to explicitly flush/close the file so print_setups_info
            # sees the change in rc.py
            rcfd.close()

            print_setups_info(p)
    else:
        print('You can manually register the setups by adding the')
        print('following line to %s:' % rcfilepath)
        print()
        print(rcline)
        print()
    print('Installation complete.')
Example #41
0
                  type="str",
                  action='callback',
                  callback=make_list,
                  help="Bad antennas to flag.")
parser.add_option('-d',
                  '--foo',
                  type='string',
                  action='callback',
                  callback=foo_callback)

(options, args) = parser.parse_args()

print 'options', options
print 'args', args

if not parser.get_prog_name() == "test_OptionParser.py":
    #   Program was run from within python
    substation = 1
    flag_antenna = None
else:
    substation = options.substation
    flag_antenna = options.flag_antenna

print 'substation', substation
print 'flag_antenna', flag_antenna

#for item in sys.argv:
#    print item, type(item)

# def record_foo_seen(option, opt_str, value, parser):
#     parser.saw_foo = True
Example #42
0
class Project(object):
    environment_class = mirbuild.environment.Environment
    test_runner_class = mirbuild.test.BoostTestRunner
    default_dependency_class = None
    nocache_commands = set('meta'.split())
    noapply_commands = set('meta clean realclean distclean'.split())

    def __init__(self, name, **opts):
        self.__configurecache = mirbuild.cache.Cache(filename = 'configure.json')
        self.__options = opts
        self.__tests = None
        self.__versions = []
        self.__plugins = []
        self.__install = []
        self.__packagers = {}
        self.__test_runners = {}

        try:
            self.__configurecache.load()
        except Exception:
            # if we can't load the cache, so be it
            pass

        self.__parser = OptionParser(add_help_option = False)
        self.__general_options = OptionGroup(self.__parser, "General Options")
        self.__parser.add_option_group(self.__general_options)

        self.opt = LocalOptions('general')
        self.__configurecache.register(self.opt)

        if self.has_build_configs:
            self.add_option('-c|--configuration', dest = 'configuration', type = 'string', defaultstr = False,
                            metavar = 'CFG', help = 'selected build configuration')

        self.add_bool_option('-h|--help', dest = 'help', help = 'show this help message and exit', cache = False)

        for opt, dest, help in [('-d|--debug', 'debug', 'debug build.py execution'),
                                ('-q|--quiet', 'quiet', 'be quiet'),
                                ('-v|--verbose', 'verbose', 'verbose compiler/packaging output'),
                                ('--trace', 'trace', 'trace build process (if supported by the builder)'),
                                ('--nodeps', 'nodeps', "don't use dependencies from .mirbuildrc"),
                                ('--noconfig', 'noconfig', "don't use .mirbuildrc files at all"),
                                ('--noenv', 'noenv', "don't honour environment variables"),
                                ('--called-by-packager', 'called_by_packager', "option indicating that build.py is being invoked by a mirbuild packager"),
                                ]:
            self.add_bool_option(opt, dest = dest, help = help, cache = False)
            for o in opt.split('|'):
                if o in sys.argv:
                    self.opt.set_value(dest, True)

        self.__env = self.environment_class(name)
        self.__env.set_options(self.opt)
        self._deps = mirbuild.dependency.Dependencies(self.__env, self.default_dependency_class)

        if not self.opt.noconfig:
            self.__env.read_config()
            if self.__env.has('build', 'prefix'):
                self.opt.state_merge({ 'prefix': self.__env.get('build', 'prefix') })

        self.opt.ensure_value('jobs', self.__env.get('build', 'parallel', 'auto'))
        self.add_option('-j|--jobs', dest = 'jobs', type = 'string', metavar = 'NUM', cache = False,
                        help = 'number of parallel jobs to execute if possible')

        self.add_option('--prefix', dest = 'prefix', type = 'string', default = self.default_install_path,
                        metavar = 'PATH', help = 'install prefix for this project')
        self.add_option('--install-destdir', dest = 'install_destdir', type = 'string',
                        metavar = 'PATH', help = 'install files to this path')

        self.add_option('-b|--build-mode', dest = 'build_mode', type = 'choice', choices = ['in', 'out'], default = 'in',
                        metavar = "MODE", help = '[in|out] source build mode')

        for o in [('-I|--include-path', 'include', 'C_INCLUDE_PATH CPLUS_INCLUDE_PATH'.split()),
                  ('-L|--library-path', 'library', 'LIBRARY_PATH'.split())]:
            var = o[1] + '_path'
            if hasattr(self, 'add_' + var):
                path = []
                if not self.opt.noenv:
                    for e in o[2]:
                        path += [x for x in os.environ.get(e, '').split(os.path.pathsep) if x]
                path += [x for x in self.env.get('build', var, '').split(os.path.pathsep) if x]
                self.opt.state_merge({ var: path })
                self.add_option(o[0], type = 'string', dest = var, multi = True,
                                metavar = 'PATH', help = 'use additional ' + o[1] + ' path')

    @property
    def _configure_cache(self):
        return self.__configurecache

    @property
    def _option_parser(self):
        return self.__parser

    @property
    def default_install_path(self):
        return '/usr/local'

    @property
    def ident(self):
        return self.__parser.get_prog_name()

    @property
    def options(self):
        return self.__options

    @property
    def env(self):
        return self.__env

    @property
    def commands(self):
        return self.methlist('run_(\w+)')

    @property
    def build_configurations(self):
        return self.methlist('configure_(\w+)')

    @property
    def tests(self):
        return self.__tests

    @property
    def packager(self):
        return self.__packagers[self.opt.packager]

    @property
    def project_name(self):
        return self.__env.project_name

    @property
    def build_config(self):
        return getattr(self.opt, 'configuration', None)

    @property
    def has_thrift_dependency(self):
        # We need to know if there's a thrift dependency as we'll need to
        # configure some additional things for Visual Studio if we do
        # [mhx] We could cache the result, but I'd rather not bother with that now...
        return self._deps.any_is_a(mirbuild.ThriftDependency)

    def prefixpath(self, path):
        if os.path.isabs(path):
            return path
        else:
            return os.path.join(self.opt.prefix, path)

    def installpath(self, path, isdir = False, mkdir = False):
        destdir = path if isdir else os.path.split(path)[0]
        if os.path.isabs(destdir):
            if self.opt.install_destdir is not None:
                destdir = os.path.join(self.opt.install_destdir, rootrelpath(destdir))
            else:
                destdir = destdir
        else:
            if self.opt.install_destdir is not None:
                destdir = os.path.join(self.opt.install_destdir, rootrelpath(self.opt.prefix), destdir)
            else:
                destdir = os.path.join(self.opt.prefix, destdir)

        if mkdir:
            try:
                os.makedirs(destdir)
            except OSError as ex:
                if ex.errno != errno.EEXIST:
                    raise

        return destdir if isdir else os.path.join(destdir, os.path.split(path)[1])

    def __usage(self):
        usage = 'Usage: %prog [Options] <Command>'
        usage += '\n\nCommands: {0}'.format(', '.join(self.commands))
        if self.has_build_configs:
            usage += '\n\nBuild Configurations: {0}'.format(', '.join(map(lambda x: (x + ' [*]') \
                   if x == self.__default_build_config() else x, self.build_configurations)))
        return usage

    def __default_build_config(self):
        if not self.has_build_configs:
            return None
        if self.opt.configuration is not None:
            return self.opt.configuration
        try:
            return self.env.get('build', 'configuration')
        except Exception:
            pass
        if 'release' in self.build_configurations:
            return 'release'
        return self.build_configurations[0]

    def methlist(self, match):
        list = []
        run = re.compile(match)
        for method in dir(self):
            m = run.match(method)
            if m is not None and getattr(self, method) is not None:
                list.append(m.group(1))
        list.sort()
        return list

    def add_option(self, *args, **kw):
        self.opt.add_option(self.__general_options, *args, **kw)

    def add_bool_option(self, *args, **kw):
        self.opt.add_bool_option(self.__general_options, *args, **kw)

    def depends(self, *deps):
        self._deps.add(*deps)

    def test(self, *args, **kwargs):
        filt = kwargs.get('filter', lambda x: True)
        recurse = kwargs.get('recurse', True)
        runner_class = kwargs.get('runner', self.test_runner_class)
        test_builders = []
        if self.__tests is None:
            self.__tests = []

        for arg in args:
            if isinstance(arg, mirbuild.test.TestBuilder):
                test_builders.append(arg)
            elif isinstance(arg, basestring):
                dirs = []
                for e in glob.glob(arg):
                    if os.path.isdir(e):
                        if filt(e) and self.test_builder_class.looks_like_test_dir(e):
                            dirs.append(e)
                        if recurse:
                            for root, ds, fs in os.walk(e):
                                for d in ds:
                                    path = os.path.join(root, d)
                                    if filt(path) and self.test_builder_class.looks_like_test_dir(path):
                                        dirs.append(path)
                dirs.sort()
                for d in dirs:
                    test_builders.append(self.test_builder_class(self.env, d))
            else:
                test_builders.append(self.test_builder_class(self.env, *arg))

        if test_builders:
            if runner_class.name not in self.__test_runners:
                self.__test_runners[runner_class.name] = runner_class(self.env)
            runner = self.__test_runners[runner_class.name]

            for tb in test_builders:
                self.__tests.append(mirbuild.test.TestWrapper(builder = tb, runner = runner))

    def package(self, *args):
        for arg in args:
            assert isinstance(arg, mirbuild.packaging.Packaging)
            assert not self.__packagers.has_key(arg.name)
            self.__packagers[arg.name] = arg

    def install(self, source, destdir, glob = True):
        i = InstallRule()
        i.source = [source] if isinstance(source, basestring) else source
        i.destdir = destdir
        i.glob = glob
        self.__install.append(i)

    def __install_files(self):
        for i in self.__install:
            destdir = self.installpath(i.destdir, isdir = True, mkdir = True)

            source = []

            for src in i.source:
                if i.glob:
                    source += glob.glob(src)
                else:
                    source.append(src)

            for src in source:
                dst = os.path.join(destdir, os.path.split(src)[1])
                self.env.vsay('installing {0} -> {1}'.format(src, dst))
                if os.path.isdir(src):
                    shutil.copytree(src, dst, symlinks = True)
                else:
                    shutil.copy2(src, dst)

    def version(self, file = os.path.join('src', 'version.h'), info = None, **opts):
        if isinstance(file, basestring):
            file = mirbuild.version.VersionFileFactory.create(self.env, file, **opts)
        if info is None:
            info = mirbuild.version.VersionInfoFactory.create()
        assert isinstance(file, mirbuild.version.VersionFile)
        assert isinstance(info, mirbuild.version.VersionInfo)
        self.__versions.append({ 'file': file, 'info': info })

    def add_plugin(self, *args):
        for arg in args:
            assert isinstance(arg, mirbuild.plugin.Plugin)
            self.__plugins.append(arg)

    def _run_plugins(self, meth, reverse = False):
        for plugin in reversed(self.__plugins) if reverse else self.__plugins:
            self.env.dbg("running plugin method {0}.{1}".format(plugin.__class__.__name__, meth))
            getattr(plugin, meth)(self)

    @property
    def has_build_configs(self):
        return len(self.build_configurations) > 0

    def __expand_command(self, raw):
        if raw in self.commands:
            return raw
        cand = [cmd for cmd in self.commands if cmd.startswith(raw)]
        if len(cand) == 1:
            return cand[0]
        raise RuntimeError('{0} command "{1}".'.format('Invalid' if not cand else 'Ambiguous', raw))

    def run_has(self, what, arg):
        if what in ['command']:
            raise SystemExit(0 if arg in self.commands else 1)
        if what in ['config', 'configuration']:
            raise SystemExit(0 if arg in self.build_configurations else 1)
        raise SystemExit(2)

    def run(self):
        try:
            if self.__tests is None:
                self.test('test')

            if not self.__packagers:
                self.package(*mirbuild.packaging.PackagingFactory.create_all(self.env))

            dc = mirbuild.cache.Cache('dependencies')
            self._deps.set_cache(dc)
            self.__configurecache.register(dc)

            rc = mirbuild.cache.Cache('test_runners')
            for runner in self.__test_runners.itervalues():
                try:
                    runner.set_cache(rc)
                except Exception as ex:
                    sys.stderr.write(str(ex) + '\n')
            self.__configurecache.register(rc)

            self._deps.add_options(self.__parser, nomerge = self.opt.nodeps)

            for name, runner in self.__test_runners.iteritems():
                if self.env.has_section('test:' + name):
                    runner.state_merge(self.env.get_section('test:' + name))
                runner.add_options(self.__parser)

            if self.__packagers:
                self.add_option('--packager', dest = 'packager', type = 'choice',
                                choices = self.__packagers.keys(), defaultstr = len(self.__packagers) == 1,
                                default = self.__packagers.keys()[0] if len(self.__packagers) == 1 else None,
                                metavar = 'PKG', help = 'selected packager')
                for name, pkg in self.__packagers.iteritems():
                    sec = 'packaging:' + name
                    if self.env.has_section(sec):
                        pkg.state_merge(self.env.get_section(sec))
                    pkg.add_options(self.__parser)
                self.run_package = self.do_package

            self.__parser.set_usage(self.__usage())

            args = self.__parser.parse_args()[1]

            if self.has_build_configs:
                self.opt.ensure_value('configuration', self.__default_build_config())

            if self.opt.help or len(args) < 1:
                self.__parser.print_help()
                raise SystemExit(0)

            if self.has_build_configs and self.build_config not in self.build_configurations:
                raise RuntimeError('Invalid build configuration "{0}".'.format(self.build_config))

            command = self.__expand_command(args[0])
            command_method = getattr(self, 'run_' + command)

            if command not in self.noapply_commands:
                self.__apply_paths()
                self._deps.apply(self)

            if command not in self.nocache_commands:
                self.__configurecache.save()

            self.env.vsay('''******************************
   Config : {0}
   Action : {1}
******************************'''.format(self.build_config if self.has_build_configs else '(none)', command))

            command_method(*args[1:])

        except RuntimeError as ex:
            if self.opt.debug:
                raise
            sys.stderr.write('*** ERROR: ' + str(ex) + '\n')
            raise SystemExit(1)

        except KeyboardInterrupt:
            if self.opt.debug:
                raise
            sys.stderr.write('*** INTERRUPTED\n')
            raise SystemExit(1)

    def run_meta(self):
        meta = {
            'project': self.project_name,
            'commands': self.commands,
            'dependencies': self._deps.meta,
        }
        if self.__packagers:
            meta['packaging'] = {}
            for name, p in self.__packagers.iteritems():
                meta['packaging'][name] = p.meta
        try:
            info = mirbuild.version.VersionInfoFactory.create()
            meta['version'] = info.upstream_version()
        except RuntimeError:
            pass
        print json.dumps(meta, indent = 4)

    def run_build(self):
        self.run_configure()
        self._run_plugins('pre_build')
        self._run_plugins('build')
        self.do_build()
        self._run_plugins('post_build')

    def run_test(self):
        self.run_build()
        self._run_plugins('pre_test')
        self._run_plugins('test')
        self.do_test()
        self._run_plugins('post_test')

    def run_install(self):
        self.run_build()
        self._run_plugins('pre_install')
        self._run_plugins('install')
        self.do_install()
        self.__install_files()
        self._run_plugins('post_install')

    # TODO
    # def run_coverage(self):
    #     self.run_test()
    #     self.do_coverage()

    # this is just an alias
    def run_distclean(self):
        self.run_realclean()

    def run_realclean(self):
        for t in self.tests:
            t.clean()
        self._run_plugins('pre_realclean', reverse = True)
        self.do_realclean()
        self._run_plugins('realclean', reverse = True)
        self._run_plugins('post_realclean', reverse = True)
        self.env.remove_files(self.__configurecache.filename)
        self.env.remove_trees('build')
        for v in self.__versions:
            v['file'].clean()

    def run_clean(self):
        self._run_plugins('pre_clean', reverse = True)
        self.do_clean()
        self._run_plugins('clean', reverse = True)
        self._run_plugins('post_clean', reverse = True)
        for t in self.tests:
            t.clean()

    def __apply_paths(self):
        for opt in ['include_path', 'library_path']:
            meth = getattr(self, 'add_' + opt, None)
            if meth is not None:
                for path in getattr(self.opt, opt):
                    meth(mirbuild.dependency.CLibraryDependency.validated_path(path, env = self.env))

    def run_configure(self):
        for v in self.__versions:
            v['file'].generate(v['info'])
        self._run_plugins('pre_configure')
        self._run_plugins('configure')
        self.do_configure()
        self._run_plugins('post_configure')

    def do_test(self):
        for t in self.tests:
            t.configure()
            t.build()
        obs = mirbuild.test.TestObserver()
        for t in self.tests:
            t.run(obs)
        if obs.num_total > 0:
            self.env.say(obs.report())
            if obs.num_failed > 0:
                raise SystemExit(1)
        elif self.tests:
            raise RuntimeError('No test runs observed.')

    def do_package(self):
        self._run_plugins('pre_package')
        self.prepare_package()
        self._run_plugins('package')
        self.packager.package()
        self._run_plugins('post_package')

    def prepare_package(self):
        pass
Example #43
0
        Option("--samples-per-bin", type=int, default=30,
            help="Samples per bin [default: %default]"),
        Option("--objid",
            help="Event ID to be stored in FITS header [default: %default]"),
    ]
)
opts, args = parser.parse_args()
infilename = command.get_input_filename(parser, args)

if opts.output is None:
    parser.error('--output: missing required argument')


# Late imports.
import numpy as np
import lalinference.fits
import lalinference.bayestar.postprocess

samples = np.recfromtxt(infilename, names=True)
theta = 0.5*np.pi - samples['dec']
phi = samples['ra']

p = lalinference.bayestar.postprocess.adaptive_healpix_histogram(
    theta, phi, opts.samples_per_bin,
    nside=opts.nside, max_nside=opts.max_nside)

# Write output to FITS file.
lalinference.fits.write_sky_map(opts.output, p,
    creator=parser.get_prog_name(), objid=opts.objid,
    gps_time=samples['time'].mean())
from lalinference.bayestar import filter
from lalinference.bayestar import timing

# Other imports.
import numpy as np


progress = pylal.progress.ProgressBar()

# Open output file.
progress.update(-1, 'setting up output document')
out_xmldoc = ligolw.Document()
out_xmldoc.appendChild(ligolw.LIGO_LW())

# Write process metadata to output file.
process = ligolw_process.register_to_xmldoc(out_xmldoc, parser.get_prog_name(),
    opts.__dict__, ifos=opts.detector, comment="Simulated coincidences")

# Add search summary to output file.
all_time = segments.segment(
    [glue.lal.LIGOTimeGPS(0), glue.lal.LIGOTimeGPS(2e9)])
search_summary_table = lsctables.New(lsctables.SearchSummaryTable)
out_xmldoc.childNodes[0].appendChild(search_summary_table)
summary = ligolw_search_summary.append_search_summary(out_xmldoc, process,
    inseg=all_time, outseg=all_time)

# Read PSDs.
progress.update(-1, 'reading ' + opts.reference_psd)
xmldoc = ligolw_utils.load_filename(
    opts.reference_psd, contenthandler=lal.series.PSDContentHandler)
psds = lal.series.read_psd_xmldoc(xmldoc)
Example #45
0
						 simplestats.stdError(cfitnesses) ]
			out.writerow(row)
		fout.flush()

def determineMaxCountFromData(inName):
	try:
		finalBest = inName.replace(".bestHistory.csv", ".finalBests.csv").replace(".objectiveFunctionHistory.csv", ".finalBests.csv")
		with open(finalBest,"r") as fin:
			dr = csv.DictReader(fin, quoting=csv.QUOTE_ALL)	
			return int(dr.next()['evaluation'])
	except:
		return None
		
if __name__ == '__main__':
	parser = OptionParser()
	parser.set_usage(parser.get_prog_name() + " input_file_or_wildcard_pattern")
	parser.set_description("""Takes BehaviorSearch CSV output files (either xxx.bestHistory.csv or 
xxx.objectiveFunctionHistory.csv) that contains data from multiple search runs, 
and processes them into a new xxx.performance.csv file which gives a record of 
'average fitness' as the search progresses.""")
	parser.add_option("--min", action="store", type="int", dest="minEvalCount", default=0, help="# of evaluations to start at")
	parser.add_option("--inc", action="store", type="int", dest="increment", default=1, help="increment for the # of evaluations")
	parser.add_option("--max", action="store", type="int", dest="maxEvalCount", help="highest number of evaluations to go up to.")

	options , filepatterns = parser.parse_args()
	if (len(filepatterns) == 0): # or not options.maxEvalCount):
		parser.print_help()
		sys.exit(0)

	filenames = []
	for fPat in filepatterns:
Example #46
0
                  help="File size to copy (M or G) for >Mb or >Gb sizes.")
parser.add_option("-t",
                  "--test",
                  action="store_true",
                  default=False,
                  help="During test will not do rsync")
parser.add_option(
    "-i",
    "--id_obs",
    type="str",
    default=None,
    help="Give an the observation ID, with format D20120718T1215.")

(options, args) = parser.parse_args()

if not parser.get_prog_name() == "rsyncfratsfiles.py":
    #   Program was run from within python
    locus = ""
    size = 'G'
    test = False
    id_obs = None
else:
    locus = options.locus
    size = options.size
    test = options.test
    id_obs = options.id_obs

#------------------------------------------------------

if size != 'M':
    size_str = "'G 2'"
Example #47
0
        # Require Python >= 2.4
        import sys
        if sys.version_info[0] < 2 or sys.version_info[1] < 4:
            cli.error("Python 2.4.0 or higher is required")

        report = DiskSpaceReport(args[0])
        if options.format not in ['text', 'html']:
            cli.error("Invalid format specified (%s)" % options.format)

        func = getattr(report, 'as_%s' % options.format)
        output = func()
        if not options.archive:
            print output
        else:
            if options.format == 'html':
                ext = 'html'
            else:
                ext = 'txt'
            t = time.localtime()
            d = os.path.join(os.path.expanduser(options.archive), time.strftime("%Y/%m", t))
            subprocess.call(['mkdir', '-p', d])
            f = open(os.path.join(d, time.strftime("report.%Y-%m-%d." + ext, t)), 'w')
            f.write(output)
            f.close()
    except Exception, e:
        if options.debug:
            raise
        else:
            cli.print_usage()
            cli.exit(2, "%s: %s\n" % (cli.get_prog_name(), e))
Example #48
0
        for sngl_inspiral in sngl_inspirals)

    # Loop over sky localization methods
    for method in opts.method:
        log.info("%s:method '%s':computing sky map", coinc.coinc_event_id, method)
        if opts.chain_dump:
            chain_dump = '%s.chain.npy' % int(coinc.coinc_event_id)
        else:
            chain_dump = None
        try:
            sky_map, epoch, elapsed_time = ligolw_sky_map.ligolw_sky_map(
                sngl_inspirals, approximant, amplitude_order, phase_order, f_low,
                opts.min_distance, opts.max_distance, opts.prior_distance_power,
                psds=psds, method=method, nside=opts.nside, chain_dump=chain_dump)
        except (ArithmeticError, ValueError):
            log.exception("%s:method '%s':sky localization failed", coinc.coinc_event_id, method)
            count_sky_maps_failed += 1
            if not opts.keep_going:
                raise
        else:
            log.info("%s:method '%s':saving sky map", coinc.coinc_event_id, method)
            fits.write_sky_map('%s.%s.fits.gz' % (int(coinc.coinc_event_id), method),
                sky_map, objid=str(coinc.coinc_event_id), gps_time=float(epoch),
                creator=parser.get_prog_name(), runtime=elapsed_time,
                instruments=instruments, nest=True)


if count_sky_maps_failed > 0:
    raise RuntimeError("{0} sky map{1} did not converge".format(
        count_sky_maps_failed, 's' if count_sky_maps_failed > 1 else ''))
parser = OptionParser()
parser.add_option('-p',
                  '--pol',
                  dest='poldeg',
                  type="int",
                  default=1,
                  help="polinom degree")

(options, args) = parser.parse_args()

if __name__ == "__main__":
    if options.poldeg == 1:
        pol01()
    elif options.poldeg == 2:
        pol02()
    else:
        parser.print_help()
        print('parser.usage = ', parser.usage)
        print(options)
        print(type(options))
        print(args)
        print(type(args))
        print('len(args) = ', len(args))
        print('type(options.poldeg)        = ', type(options.poldeg))
        print('parser.get_usage()          = ', parser.get_usage())
        print('parser.get_default_values() = ', parser.get_default_values())
        print('parser.get_prog_name()      = ', parser.get_prog_name())
        #print(parser.get_version())
        #print(parser.get_description())
        #print(parser.get_option_group())
Example #50
0
    fits_nest = True

    if not args.enable_distance_map:
        hpmap = skypost.as_healpix(args.nside, nest=fits_nest, fast=not(args.slowsmoothskymaps))
    else:
        print('Constructing 3D clustered posterior.')
        try:
          skypost3d = sac.Clustered3DKDEPosterior(np.column_stack((data['ra'], data['dec'], data['dist'])))
        except:
          print("ERROR, cannot use skypost3d with LIB output. Exiting..\n")
          import sys
          sys.exit(1)

        print('Producing distance map')
        hpmap = skypost3d.as_healpix(args.nside, nest=fits_nest)
    names=data.dtype.names 
    if 'time' in names:
      gps_time=data['time'].mean()
    elif 'time_mean' in names:
      gps_time=data['time_mean'].mean()
    elif 'time_maxl' in names:
      gps_time=data['time_maxl'].mean()
    else:
      print("Cannot find time, time_mean, or time maxl variable in posterior. Not saving sky_pos obj.\n")
      exit(0)

    fits.write_sky_map(os.path.join(args.outdir, args.fitsoutname),
                       hpmap, creator=parser.get_prog_name(),
                       objid=args.objid, gps_time=gps_time,
                       nest=fits_nest)
Example #51
0
def uniq(alist):  # remove duplicates, preserve order
    set = {}
    return [set.setdefault(e, e) for e in alist if e not in set]


def stripQuotes(s):
    return s.strip('" \n\t')


####################################################
## Main program starts here...
####################################################

if __name__ == '__main__':
    parser = OptionParser()
    parser.set_usage(parser.get_prog_name() +
                     " bsdata1.csv bsdata2.csv [...] ")
    parser.set_description(
        """Combines two or more NetLogo BehaviorSpace .csv files, and spits to stdout.

Note that you can specify wildcards, such as "bsdata*.csv" 
""")
    parser.add_option("-f",
                      "--final",
                      action="store_true",
                      dest="finalTicksOnly",
                      help="Only pull the final tick row of each experiment.")

    options, filepatterns = parser.parse_args()
    if (len(filepatterns) == 0):
        parser.print_help()
Example #52
0
def main():
    parser = OptionParser()
    parser.set_usage(parser.get_prog_name() +
                     " -a exp1.searchConfig.xml exp2.searchConfig\n" + " OR " +
                     parser.get_prog_name() +
                     " -m exp1.YYY.csv exp2.YYY.csv [...]")
    parser.set_description(
        """Combines BehaviorSearch .csv files (that were created using the same search configuration.)

In -a (auto mode), it will use the *.searchConfig.xml files you specify to find all of the matching CSV search results files, and combine them
into a new file, named based on the common filename stem of the combined files. (i.e. the files: xxxx_00.yyy.csv, xxxx_01.yyy.csv,  => xxxx.yyy.csv)

In -m (manual mode), only those CSV files that you manually specify will be combined, and the results will go to stdout.
(Note that you can specify wildcards, such as "data*.xml") 
""")
    parser.add_option("-m",
                      "--manual",
                      action="store_true",
                      dest="manual",
                      help="manual mode")
    parser.add_option(
        "-a",
        "--autosort",
        action="store_true",
        dest="autosort",
        help=
        "(auto-sort mode) use XXX.searchConfig.xml files to automatically choose which CSV files should be combined."
    )
    parser.add_option(
        "-p",
        "--preserve",
        action="store_true",
        dest="preserve",
        help=
        "keep the original search number indexes, instead of renumbering consecutively."
    )
    parser.add_option("-d",
                      "--delete",
                      action="store_true",
                      dest="delete",
                      help="delete the input files, after combining")

    options, filepatterns = parser.parse_args()
    if (options.manual == options.autosort):
        print "ERROR: You must specify EITHER -m (manual) or -a (autosort) mode."
        print
        parser.print_help()
        sys.exit(0)

    if (len(filepatterns) == 0):
        parser.print_help()
        sys.exit(0)

    filenames = []
    for fPat in filepatterns:
        filenames.extend(glob.glob(fPat))

    filenames = uniq(filenames)

    if (len(filenames) < 1):
        parser.print_help()
        sys.exit(1)

    if (options.autosort):
        autosort_and_combine(filenames, options.preserve, options.delete)
    else:  # (options.manual == True)
        combine(filenames, options.preserve, options.delete, sys.stdout)
	numSearches = [str(x) for x in sorted(numSearches)]
	figtext(0.97,0.02, "[avg. of %s searches]"%(" or ".join(numSearches)), 
			fontsize=9, ha='right')
	#legend(loc = 'lower right')
	import matplotlib.font_manager
	prop = matplotlib.font_manager.FontProperties(size=10)
	legend(loc=options.legendloc, prop=prop)
	#axis(ymax=0.6)

	savefig(outputFileName, dpi=options.dpi)

	###########################

if __name__ == '__main__':
	parser = OptionParser()
	parser.set_usage(parser.get_prog_name() + " output_graph.png file1.performance.csv ...")
	parser.set_description("""Takes xxx.performance.csv files and creates a graphic plot of the data.
The output type is determined by the output_graph file extension (.png, .pdf, .eps, .svg, etc).
(Note filename wildcards are allowed for input files.)""")
	parser.add_option("-e", "--errorbars", action="store_true", dest="errorbars", help="include error bars (95% confidence interval on the mean)")
	parser.add_option("--stdevbars", action="store_true", dest="stdevbars", help="include error bars using stdev (95% conf. interval for each search)")
	parser.add_option("-c", "--checked", action="store_true", dest="checked", help="also include checked fitness values in the plot")
	parser.add_option("-o", "--only-checked", action="store_true", dest="onlychecked", help="only show checked fitness values in the plot")	
	parser.add_option("-i", "--interval", action="store", type="int", dest="interval", default=1, help="only plot every Nth row of the input data file.")
	parser.add_option("--min", action="store", type="int", dest="min", default=0, help="start plotting at the Nth row of the input data file.")
	parser.add_option("--max", action="store", type="int", dest="max", default=None, help="stop plotting at the Nth row of the input data file.")
	parser.add_option("--ymin", action="store", type="float", dest="ymin", default=None, help="min y-value for plot window")
	parser.add_option("--ymax", action="store", type="float", dest="ymax", default=None, help="max y-value for plot window")
	parser.add_option("--alpha", action="store", type="float", dest="alpha", default=1.0, help="alpha transparency for plotting")
	parser.add_option("--ealpha", action="store", type="float", dest="ealpha", default=1.0, help="alpha transparency for plotting error bars")