Example #1
0
def checkArguments():
    ynOptions = ['y', 'n', 'Y', 'N']
    optparser = OptionParser()
    optparser.add_option(
        "-e",
        "--Env",
        dest="Env",
        default="api",
        help="Mandatory - provide the Qubole enviornment, default value is api"
    )
    optparser.add_option("-t",
                         "--Token",
                         dest="Token",
                         default="",
                         help="Mandatory - provide your Qubole account token")
    optparser.add_option(
        "-p",
        "--SearchPattern",
        dest="SrchPattern",
        default="",
        help=
        "Mandatory - search multiple string or pattern seperated by ~ character, case insensitive search by default"
    )
    optparser.add_option("-u",
                         "--Users",
                         dest="Users",
                         default="",
                         help="Optional - comma seperated list of email ids")
    optparser.add_option(
        "-s",
        "--CaseSensitiveSearch",
        dest="CaseSensitiveSrch",
        default="N",
        choices=ynOptions,
        help="Optional - case sensitive search , default value is N")
    optparser.add_option(
        "-c",
        "--CommonDirectory",
        dest="CommonDirectory",
        default="Y",
        choices=ynOptions,
        help="Optional - include common directory , default value is Y")
    (opts, args) = optparser.parse_args()
    opts.Env = opts.Env.lower().replace(" ", "")
    if opts.Env == "":
        print("\nPlease specify an enviornment.")
    opts.CaseSensitiveSrch = opts.CaseSensitiveSrch.upper()
    opts.CommonDirectory = opts.CommonDirectory.upper()
    if (opts.Token == None or opts.Token == '' or opts.SrchPattern == None
            or opts.SrchPattern == ''):
        argumentExit(optparser)
    elif (len(opts.Users) > 0):
        # remove spaces and create a list
        userList = opts.Users.replace(" ", "").split(",")
        #remove dups
        userList = list(set(userList))
        #remove empty
        userList = ''.join(userList).split()
        if (len(userList) > 0):
            p = re.compile(r"[^@]+@[^@]+\.[^@]+")
            for user in userList:
                if not p.match(user):
                    print("\nInvalid email id -  %s \n" % user)
                    argumentExit(optparser)
        else:
            print("\nEmail ids cannot be empty strings.\n")
            argumentExit(optparser)
        opts.Users = userList
        return (opts, args)
    else:
        return (opts, args)
Example #2
0
from array import array

ROOT.gROOT.SetBatch(True)

import utilities
utilities = utilities.util()

import sys
sys.path.append(os.getcwd() + "/plotUtils/")
from utility import *

if __name__ == "__main__":

    ROOT.gStyle.SetOptStat(0)
    from optparse import OptionParser
    parser = OptionParser(usage='%prog th2.root [options] ')
    parser.add_option('-o','--outdir',     dest='outdir',     default='',   type='string', help='outdput directory to save plots')
    parser.add_option(     '--palette'  , dest='palette',      default=55, type=int, help='Set palette: use a negative number to select a built-in one, otherwise the default is 55 (kRainbow)')
    parser.add_option(     '--rebinEtaPt'  , dest='rebinEtaPt',      default=(0,0), nargs=2, type=int, help='Rebinnign factor for eta-pt distribution. Default is none, equivalent to 1,1')
    (options, args) = parser.parse_args()

    ROOT.TH1.SetDefaultSumw2()

    if len(args) < 1:
        parser.print_usage()
        quit()

    outdir = options.outdir
    addStringToEnd(outdir,"/",notAddIfEndswithMatch=True)
    createPlotDirAndCopyPhp(outdir)
Example #3
0
def new_option_parser():
    from optparse import OptionParser
    result = OptionParser()
    result.add_option("-N", dest="N", type="int", default=1000,
                      help="number of stars [1000]")
    return result
Example #4
0
        See Analyzer.Write for more information.
        """
        for analyzer in self._analyzers:
            analyzer.write(self.setup)
        self.setup.close()


if __name__ == '__main__':

    import pickle
    import sys
    import os
    from heppy.framework.heppy_loop import _heppyGlobalOptions
    from optparse import OptionParser
    parser = OptionParser(
        usage='%prog cfgFileName compFileName [--options=optFile.json]')
    parser.add_option('--options',
                      dest='options',
                      default='',
                      help='options json file')
    (options, args) = parser.parse_args()

    if options.options != '':
        jsonfilename = options.options
        jfile = open(jsonfilename, 'r')
        opts = json.loads(jfile.readline())
        for k, v in opts.iteritems():
            _heppyGlobalOptions[k] = v
        jfile.close()

    if len(args) == 1:
Example #5
0
def main():
    sys.stdout = codecs.getwriter('utf-8')(sys.stdout)

    parser = OptionParser()
    parser.add_option('-s',
                      '--server_host',
                      dest='server_host',
                      type='string',
                      default='localhost',
                      help='server host')
    parser.add_option('-p',
                      '--server_port',
                      dest='server_port',
                      type='int',
                      default=_UNDEFINED_PORT,
                      help='server port')
    parser.add_option('-o',
                      '--origin',
                      dest='origin',
                      type='string',
                      default='http://localhost/',
                      help='origin')
    parser.add_option('-r',
                      '--resource',
                      dest='resource',
                      type='string',
                      default='/echo',
                      help='resource path')
    parser.add_option('-m',
                      '--message',
                      dest='message',
                      type='string',
                      help=('comma-separated messages to send excluding "%s" '
                            'that is always sent at the end' %
                            _GOODBYE_MESSAGE))
    parser.add_option('-q',
                      '--quiet',
                      dest='verbose',
                      action='store_false',
                      default=True,
                      help='suppress messages')
    parser.add_option('-t',
                      '--tls',
                      dest='use_tls',
                      action='store_true',
                      default=False,
                      help='use TLS (wss://)')
    parser.add_option('-k',
                      '--socket_timeout',
                      dest='socket_timeout',
                      type='int',
                      default=_TIMEOUT_SEC,
                      help='Timeout(sec) for sockets')

    (options, unused_args) = parser.parse_args()

    # Default port number depends on whether TLS is used.
    if options.server_port == _UNDEFINED_PORT:
        if options.use_tls:
            options.server_port = _DEFAULT_SECURE_PORT
        else:
            options.server_port = _DEFAULT_PORT

    # optparse doesn't seem to handle non-ascii default values.
    # Set default message here.
    if not options.message:
        options.message = u'Hello,\u65e5\u672c'  # "Japan" in Japanese

    EchoClient(options).run()
def run():
	usage = "dispatch_server.py [-u][-f][-d][-p][-U]|[-c]|[-s]"
	optParser = OptionParser(usage)
	group = OptionGroup(optParser,"Auxiliary Funciton Options")
	group.add_option("-c","--command",action = "store",type="str",dest = "cmd",
						help='''status for process status
		 					kill for kill process 
							clresult for clear result directory 
							other string for you can input cmd yourself''')
	group.add_option("-s","--slice",action = "store",type="str",dest = "slicefile",nargs=2, help="file to slice")
	optParser.add_option_group(group)
	optParser.add_option("-d","--download",action = "store_true",dest = "downdir",help="input which directory you want to download")
	optParser.add_option("-f","--file",action = "store",type="str",dest = "filename", help="file to check")
	optParser.add_option("-p","--print",action = "store_true",dest = "outputinfo", help="if on, output the process infomation")
	optParser.add_option("-u","--upload",action = "store_false",dest = "uploaddir", help="if update")
	optParser.add_option("-U","--user",action = "store",type="str",dest = "username", default="ffff", help="username")
	options, args = optParser.parse_args()
	
	#pdb.set_trace()
	username = options.username
	dir = '/home/%s' %username
	maclistfile ='%s/dispatch/ffff_maclistfile.txt' %dir
	freelist = read_file(maclistfile)
	hostname = freelist[0]
	busylist = []	
	
	if options.slicefile == None:	
		passwd = get_passwd(username, hostname)
	
	'''update executable program'''
	if options.uploaddir != None:
		for i in range(0, len(freelist)):
			hostname = freelist[i].strip()
			errcode, errmsg = dir_upload(username, hostname, dir, passwd)
			if errcode == 0:
				print errmsg
				break
			print '%d.%s: %s' %(i, hostname, errmsg)
	
		print '%d host upload done' %(len(freelist))
	
	'''dispatch file and data check'''
	if options.filename != None:
		if(len(freelist)) > 20:
			worker = 20
		else:
			worker = len(freelist)
		filelist = data_slice(worker, options.filename)
		work_threads = []	


		for i in range(0, worker):
			hostname = freelist[i].strip()
		
			errcode, errmsg, todir = mk_todir(username, hostname, passwd, dir, worker)
			if errcode == 0:
				print errmsg
				break
			errcode, errmsg = file_upload(filelist[i], username, hostname, todir, passwd)
			if errcode == 0:
				print errmsg
				break
			
			work_thread = threading.Thread(target = work, args = (username, hostname, passwd, filelist[i]))
			work_threads.append(work_thread)
			print work_thread, hostname
			work_thread.start()
	
		for job in work_threads:
			print str(job)+' waiting!'
			job.join()
	
	#time.sleep(10)
	'''download result'''
	if options.downdir != None:
		for i in range(0, len(freelist)):
			hostname = freelist[i].strip()
			errcode, errmsg = dir_download(username, hostname, dir, passwd)
			if errcode == 0:
				print errmsg
				break
			print '%d.%s: %s' %(i, hostname, errmsg)
	
		print '%d host download done' %(len(freelist))
	
	'''file slicing'''
	if options.slicefile != None:
		filelist = data_slice(int(options.slicefile[1]), options.slicefile[0])
		
	'''check the progress status or other command'''
	if options.cmd != None: 
		machines_stat_det(username, freelist, passwd, options.cmd)		
Example #7
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: chaomy
# @Date:   2017-06-28 00:35:14
# @Last Modified by:   chaomy
# @Last Modified time: 2018-07-23 15:14:07


from optparse import OptionParser
import gn_dd_data_hcp
import gn_dd_data_bcc

if __name__ == '__main__':
    usage = "usage:%prog [options] arg1 [options] arg2"
    parser = OptionParser(usage=usage)
    parser.add_option('-t', "--mtype", action="store",
                      type="string", dest="mtype")
    parser.add_option('-p', "--param", action="store",
                      type='string', dest="fargs")
    (options, args) = parser.parse_args()

    bcc = gn_dd_data_bcc.gn_dd_data_bcc()
    drv = gn_dd_data_hcp.gn_dd_data_hcp()

    dispatcher = {'hcp': drv.write_hcp_straight_data,
                  'hcporowan': drv.write_hcp_orawan_data,
                  'hcpprec': drv.inplane_hcp_beta1_prec,
                  'hcpten': drv.write_hcp_tensile_data,
                  'bccscrew': bcc.write_straight_screw_data}
    dispatcher[options.mtype.lower()]()
Example #8
0
def main():
    usage = "usage: %prog <-d path> [options] <bed files> ..."
    description = "Draw conservation plot for many bed files."

    optparser = OptionParser(version="%prog 0.1",description=description,usage=usage,add_help_option=False)
    optparser.add_option('-H','--height', dest='height',type='int',default=10, help="height of plot")
    optparser.add_option('-W','--width',dest='width',type='int',default=10, help="width of plot")
    optparser.add_option('-w',dest='w',type='int',default=1000, help="window width centered at middle of bed regions,default: 1000")
    optparser.add_option('-t','--title',dest='title',help="title of the figure. Default: 'Average Phastcons around the Center of Sites'",default= 'Average Phastcons around the Center of Sites')
    optparser.add_option('-d','--phasdb',dest='phasdb',help= 'The directory to store phastcons scores in the server')
    optparser.add_option('-o','--outimg',dest='outimg',help= 'output image file prefix')
    optparser.add_option("-l","--bed-label",dest="bedlabel",type="string",action="append",
                         help="the BED file labels in the figure. No space is allowed. This option should be used same times as -w option, and please input them in the same order as BED files. default: will use the BED file filename as labels.")
    optparser.add_option("-h","--help",action="help",help="Show this help message and exit.")

    (options,bedfiles) = optparser.parse_args()
    options.pf_res = options.w / 100 # get 100 points to plot
    options.w = options.pf_res * 100 # trim

    bedfiles = map(os.path.abspath,bedfiles)
    bedfilenames = map(os.path.basename,bedfiles)

    bedfilenum = len(bedfiles)

    if bedfilenum < 1 or not options.phasdb:
        optparser.print_help()
        sys.exit(1)

    if options.bedlabel and len(options.bedlabel) == bedfilenum:
        bedlabel = options.bedlabel
    else:                               # or use the filename
        bedlabel = map(lambda x:os.path.basename(x),bedfiles)

    if options.height < 10:
        error("Height can not be lower than 10!")
        sys.exit(1)
    if options.width < 10:
        error("Width can not be smaller than 10!")
        sys.exit(1)

    # check the files
    for f in bedfiles:
        if not os.path.isfile(f):
            error("%s is not valid!" % f)
            sys.exit(1)

    # check phastcons db
    if not os.path.isdir(options.phasdb):
        error("%s is not valid!" % options.phasdb)
        sys.exit(1)

    # change wd to phastcons db path
    olddir = os.path.abspath('.')
    os.chdir(options.phasdb)

    phas_chrnames = []

    files_phasdb = os.listdir('.')
    for file_phasdb in files_phasdb:
        if file_phasdb.endswith('.bw'):
            name = file_phasdb.rstrip('.bw')
            phas_chrnames.append(name)

    if not phas_chrnames:
        error("%s has no valid phastcons db bw files!" % options.phasdb)
        sys.exit(1)

    info("number of bed files: %d" % bedfilenum)

    avgValues = []

    # for each bed file
    for f in bedfiles:
        info("extract phastcons scores using %s" % f)
        scores = extract_phastcons(f,phas_chrnames, options.w, options.pf_res)
        avgValues.append(scores)
    if options.w == 4000:
        ## 100 points for 4000, 40bp resolution
        print("\t".join([str(avgValues[0][i]) for i in [12,25,38,50,62,75,88]]))
    elif options.w == 400:
        print("\t".join([str(avgValues[0][i]) for i in [45,48,50,52,55]]))

    makeBmpFile(avgValues,olddir, options.outimg ,options.height,options.width,options.w,options.pf_res,options.title,bedlabel)
################################################################################

from __future__ import print_function

import aerospike
import sys

from optparse import OptionParser

################################################################
# Option Parsing
################################################################

usage = "usage: %prog [options] key"

optparser = OptionParser(usage=usage, add_help_option=False)

optparser.add_option(
  "--help", dest="help", action="store_true",
  help="Displays this message.")

optparser.add_option(
  "-h", "--host", dest="host", type="string", default="127.0.0.1", metavar="<ADDRESS>",
  help="Address of Aerospike server.")

optparser.add_option(
  "-p", "--port", dest="port", type="int", default=3000, metavar="<PORT>",
  help="Port of the Aerospike server.")

optparser.add_option(
  "-n", "--namespace", dest="namespace", type="string", default="test", metavar="<NS>",
Example #10
0
from optparse import OptionParser

import pickle
import time
import sys


prog = 'pyRSA'
cmd = 'python3 pyrsa.py'
version = '0.1'
div = '\n------------------------------'

usage = cmd + ' -e <arquivo_em_texto_aberto> -o <saída_encriptada> [options]'
usage += '\n' + cmd + '-d <arquivo_encriptado> -o <saída_em_texto_aberto> [options].'

parser = OptionParser(prog=prog, usage=usage, version='%prog ' + version)

parser.add_option("-e", "--encrypt", dest="msg_filename", type='str',
                  help='arquivo aberto para ser encriptado')
parser.add_option("-d", "--decrypt", dest="encrypt_filename", type='str',
                  help='arquivo encriptado para ser decifrado')
parser.add_option('-o', '--output', dest='dest_filename', type='str',
                  help='arquivo de destino/saída.')
parser.add_option("-l", "--list", dest="list", action='store_true', default='False',
                  help='não utiliza map-reduce para encriptar/desencriptar (mais lento - desligado por padrão)')
parser.add_option("-k", "--keys", dest="genkeys", action='store_true', default='False',
                  help='gera um novo par de chaves')
parser.add_option("-v", "--verbose", dest="verbose", action='store_true',
                  default='False', help='imprime o arquivo aberto')

Example #11
0
def main():
    parser = OptionParser()

    parser.add_option(
        "--verbose",
        action="store_true",
        dest="verbose",
        default=False,
        help="""Default is %default.""",
    )

    parser.add_option(
        "--from-commit",
        action="store_true",
        dest="from_commit",
        default=False,
        help="""From commit hook, do not descend into directories. Default is %default.""",
    )

    parser.add_option(
        "--check-only",
        action="store_true",
        dest="check_only",
        default=False,
        help="""For CI testing, check if it's properly formatted. Default is %default.""",
    )

    parser.add_option(
        "--no-progressbar",
        action="store_false",
        dest="progress_bar",
        default=True,
        help="""Disable progress bar outputs (if tqdm is installed).
Defaults to off.""",
    )

    options, positional_args = parser.parse_args()

    if options.from_commit:
        assert not positional_args
        for desc in getStagedFileChangeDesc():
            autoformat(desc["src_path"], git_stage=desc)
    else:
        if not positional_args:
            positional_args = ["bin", "nuitka", "setup.py", "tests/*/run_all.py"]

        my_print("Working on:", positional_args)

        positional_args = sum(
            (
                resolveShellPatternToFilenames(positional_arg)
                for positional_arg in positional_args
            ),
            [],
        )

        filenames = list(
            scanTargets(
                positional_args,
                suffixes=(".py", ".scons", ".rst", ".txt", ".j2", ".md", ".c", ".h"),
            )
        )

        if not filenames:
            tools_logger.sysexit("No files found.")

        result = 0

        if options.progress_bar:
            enableProgressBar()

        for filename in wrapWithProgressBar(
            filenames, stage="Autoformat", unit="files"
        ):
            if autoformat(filename, git_stage=False, check_only=options.check_only):
                result += 1

        if options.check_only and result > 0:
            tools_logger.sysexit(
                """Error, bin/autoformat-nuitka-source would make changes to %d files, \
make sure to have commit hook installed."""
                % result
            )
        elif result > 0:
            tools_logger.info("autoformat: Changes to formatting of %d files" % result)
        else:
            tools_logger.info("autoformat: No files needed formatting changes.")
Example #12
0
        return self.freq

    def set_freq(self, freq):
        self.freq = freq
        self.uhd_usrp_sink_0.set_center_freq(self.freq, 0)

    def get_bw(self):
        return self.bw

    def set_bw(self, bw):
        self.bw = bw
        self.uhd_usrp_sink_0.set_bandwidth(self.bw, 0)


if __name__ == '__main__':
    parser = OptionParser(option_class=eng_option, usage="%prog: [options]")
    parser.add_option("-s",
                      "--samplerate",
                      dest="samplerate",
                      help="Sample Rate",
                      default=100000)
    parser.add_option("-f",
                      "--freq",
                      dest="freq",
                      help="Frequency",
                      default=433000)
    parser.add_option("-g", "--gain", dest="gain", help="Gain", default=30)
    parser.add_option("-b",
                      "--bandwidth",
                      dest="bw",
                      help="Bandwidth",
Example #13
0
    rdOptions.put("rho", str(kwargs['eratio']))
    RemoveDegeneratedTriangles(liaison, rdOptions).compute()

    # Output
    MeshWriter.writeObject3D(liaison.getMesh(), kwargs['out_dir'], "")
    if kwargs['recordFile']:
        liaison.getMesh().getTrace().finish()


if __name__ == "__main__":
    """
    Clean an existing mesh: swap+valence+smooth+remove degenerated.
    """

    cmd = ("clean", "<inputDir> <outputDir>", "Clean an existing mesh")
    parser = OptionParser(usage="amibebatch %s [OPTIONS] %s\n\n%s" % cmd,
                          prog="clean")
    parser.add_option(
        "-a",
        "--angle",
        metavar="FLOAT",
        default=15.0,
        action="store",
        type="float",
        dest="coplanarityAngle",
        help="angle (in degrees) between face normals to detect "
        "feature edges (default: 15, superseded if -c is defined)")
    parser.add_option(
        "-c",
        "--coplanarity",
        metavar="FLOAT",
        action="store",
Example #14
0
        state = self.state
        self.dragpos = wx.Point(0, 0)
        client_area = state.frame.GetClientSize()
        self.zoom = min(float(client_area.x) / self.img.GetWidth(),
                        float(client_area.y) / self.img.GetHeight())
        self.need_redraw = True

    def full_size(self):
        '''show image at full size'''
        self.dragpos = wx.Point(0, 0)
        self.zoom = 1.0
        self.need_redraw = True

if __name__ == "__main__":
    from optparse import OptionParser
    parser = OptionParser("mp_image.py <file>")
    parser.add_option("--zoom", action='store_true', default=False, help="allow zoom")
    parser.add_option("--drag", action='store_true', default=False, help="allow drag")
    parser.add_option("--autosize", action='store_true', default=False, help="auto size window")
    (opts, args) = parser.parse_args()

    im = MPImage(mouse_events=True,
                 key_events=True,
                 can_drag = opts.drag,
                 can_zoom = opts.zoom,
                 auto_size = opts.autosize)
    img = cv.LoadImage(args[0])
    im.set_image(img, bgr=True)

    while im.is_alive():
        for event in im.events():
Example #15
0
  cand = [1] + [product(allfactors[:i+1]) for i in range(len(allfactors))]
 #return cand[-1], n/cand[-1]
  best = [cand[len(cand)/2], n/cand[len(cand)/2]]
  best.sort(reverse=True)
  return tuple(best)
# if len(cand)%2:
#   return cand[len(cand)/2], cand[len(cand)/2]
# return cand[len(cand)/2], cand[len(cand)/2 - 1]


if __name__ == '__main__':
  #print __doc__

  #XXX: note that 'argparse' is new as of python2.7
  from optparse import OptionParser
  parser = OptionParser(usage=__doc__)
  parser.add_option("-i","--iter",action="store",dest="step",metavar="INT",\
                    default=None,help="the largest iteration to plot")
  parser.add_option("-p","--param",action="store",dest="param",\
                    metavar="STR",default="[':']",
                    help="indicator string to select parameters")
  parser.add_option("-l","--label",action="store",dest="label",\
                    metavar="STR",default="['']",
                    help="string to assign label to y-axis")
  parser.add_option("-n","--nid",action="store",dest="id",\
                    metavar="INT",default=None,
                    help="id # of the nth simultaneous points to plot")
  parser.add_option("-c","--cost",action="store_true",dest="cost",\
                    default=False,help="also plot the parameter cost")
  parser.add_option("-g","--legend",action="store_true",dest="legend",\
                    default=False,help="show the legend")
Example #16
0
def do_main():
    parser = OptionParser()
    parser.add_option("-s", "--scripts", dest="script_dir", default="./scripts",
                      help="top level test case script directory")
    parser.add_option("-b", "--baselines", dest="baseline_dir", default="./baselines",
                      help="top level test output baseline directory")
    parser.add_option("-o", "--report_file", dest="report_file",
                      default="./sqlcmdtest.report",
                      help="report output file")
    parser.add_option("-r", "--refresh", dest="refresh",
                      action="store_true", default=False,
                      help="enable baseline refresh")
    parser.add_option("-p", "--purge_only", dest="purge_only",
                      action="store_true", default=False,
                      help="instead of running tests, purge temp scratch files from prior runs")
    # TODO add a way to pass non-default options to the VoltDB server and tweak the sqlcmd
    # command line options if/when these settings effect the connection string
    # (non-default ports. security, etc.)
    # TODO add a way to pass sqlcmd command line options to be used with all test scripts.
    (options, args) = parser.parse_args()

    if options.purge_only:
        purgeonly(options.script_dir)
        sys.exit("The -p/--purge_only option does not run tests. It purged %d scratch files." % (purge_only_count))

    # TODO Output jenkins-friendly html-formatted report artifacts as an alternative to plain text.
    reportout = open(options.report_file, 'w+')

    # TODO -- support different server modes  -- either by explicit command line
    # option or automatic-but-verbosely -- to detect and use an already running
    # VoltDB server and remember to leave it running on exit.
    launch_and_wait_on_voltdb(reportout)

    # Except in refresh mode, any diffs change the scripts exit code to fail ant/jenkins
    haddiffs = False
    try:
        for parent, dirs, files in os.walk(options.script_dir):
            # Process each ".in" file found in the recursive directory walk.
            # Ignore other files -- these may be scratch files that (FIXME) really should be
            # written to a temp directory instead, or they may be backup files (like from a text editor)
            # or in the future they may be other kinds of input like a ".options" file that
            # could provide sqlcmd command line options to use with a corresponding ".in" file.
            for inpath in files:
                if not inpath.endswith(".in"):
                    continue
                print "Running ", os.path.join(parent, inpath)
                prefix = inpath[:-3]
                childin = open(os.path.join(parent, inpath))
                # TODO use temp scratch files instead of local files to avoid polluting the git
                # workspace. Ideally they would be self-purging except in failure cases or debug
                # modes when they may contain useful diagnostic detail.
                childout = open(os.path.join(parent, prefix + '.out'), 'w+')
                childerr = open(os.path.join(parent, prefix + '.err'), 'w+')
                subprocess.call(['../../bin/sqlcmd'],
                        stdin=childin, stdout=childout, stderr=childerr)

                # TODO launch a hard-coded script that verifies a clean database and healthy server
                # ("show tables" or equivalent) after each test run to prevent cross-contamination.

                # fuzz the sqlcmd output for reliable comparison
                clean_output(parent, prefix + '.out')
                clean_output(parent, prefix + '.err')

                baseparent = replace_parent_dir_prefix(parent, options.script_dir, options.baseline_dir)
                if compare_cleaned_to_baseline(parent, baseparent,
                        prefix + '.out', inpath,
                        options.refresh, reportout):
                    haddiffs = True;
                if compare_cleaned_to_baseline(parent, baseparent,
                        prefix + '.err', inpath,
                        options.refresh, reportout):
                    haddiffs = True;
    finally:
        kill_voltdb()
        print "Summary report written to file://" + os.path.abspath(options.report_file)
        # Would it be useful to dump the report file content to stdout?
        # Except in refresh mode, any diffs change the scripts exit code to fail ant/jenkins
        if haddiffs:
            sys.exit("One or more sqlcmdtest script failures or errors was detected.")
Example #17
0
def main():
    root_logger = logging.Logger("hang_analyzer", level=logging.DEBUG)

    handler = logging.StreamHandler(sys.stdout)
    handler.setFormatter(logging.Formatter(fmt="%(message)s"))
    root_logger.addHandler(handler)

    root_logger.info("Python Version: %s" % sys.version)
    root_logger.info("OS: %s" % platform.platform())

    try:
        if _is_windows or sys.platform == "cygwin":
            distro = platform.win32_ver()
            root_logger.info("Windows Distribution: %s" % str(distro))
        else:
            distro = platform.linux_distribution()
            root_logger.info("Linux Distribution: %s" % str(distro))

    except AttributeError:
        root_logger.warning("Cannot determine Linux distro since Python is too old")

    try:
        uid = os.getuid()
        root_logger.info("Current User: %s" % str(uid))
        current_login = os.getlogin()
        root_logger.info("Current Login: %s" % current_login)
    except OSError:
        root_logger.warning("Cannot determine Unix Current Login")
    except AttributeError:
        root_logger.warning("Cannot determine Unix Current Login, not supported on Windows")

    interesting_processes = ["mongo", "mongod", "mongos", "_test", "dbtest", "python", "java"]
    go_processes = []
    process_ids = []

    parser = OptionParser(description=__doc__)
    parser.add_option('-m', '--process-match',
                      dest='process_match',
                      choices=['contains', 'exact'],
                      default='contains',
                      help="Type of match for process names (-p & -g), specify 'contains', or"
                           " 'exact'. Note that the process name match performs the following"
                           " conversions: change all process names to lowecase, strip off the file"
                           " extension, like '.exe' on Windows. Default is 'contains'.")
    parser.add_option('-p', '--process-names',
                      dest='process_names',
                      help='Comma separated list of process names to analyze')
    parser.add_option('-g', '--go-process-names',
                      dest='go_process_names',
                      help='Comma separated list of go process names to analyze')
    parser.add_option('-d', '--process-ids',
                      dest='process_ids',
                      default=None,
                      help='Comma separated list of process ids (PID) to analyze, overrides -p &'
                           ' -g')
    parser.add_option('-c', '--dump-core',
                      dest='dump_core',
                      action="store_true",
                      default=False,
                      help='Dump core file for each analyzed process')
    parser.add_option('-s', '--max-core-dumps-size',
                      dest='max_core_dumps_size',
                      default=10000,
                      help='Maximum total size of core dumps to keep in megabytes')
    parser.add_option('-o', '--debugger-output',
                      dest='debugger_output',
                      action="append",
                      choices=['file', 'stdout'],
                      default=None,
                      help="If 'stdout', then the debugger's output is written to the Python"
                           " process's stdout. If 'file', then the debugger's output is written"
                           " to a file named debugger_<process>_<pid>.log for each process it"
                           " attaches to. This option can be specified multiple times on the"
                           " command line to have the debugger's output written to multiple"
                           " locations. By default, the debugger's output is written only to the"
                           " Python process's stdout.")

    (options, args) = parser.parse_args()

    if options.debugger_output is None:
        options.debugger_output = ['stdout']

    if options.process_ids is not None:
        # process_ids is an int list of PIDs
        process_ids = [int(pid) for pid in options.process_ids.split(',')]

    if options.process_names is not None:
        interesting_processes = options.process_names.split(',')

    if options.go_process_names is not None:
        go_processes = options.go_process_names.split(',')
        interesting_processes += go_processes

    [ps, dbg, jstack] = get_hang_analyzers()

    if ps is None or (dbg is None and jstack is None):
        root_logger.warning("hang_analyzer.py: Unsupported platform: %s" % (sys.platform))
        exit(1)

    all_processes = ps.dump_processes(root_logger)

    # Canonicalize the process names to lowercase to handle cases where the name of the Python
    # process is /System/Library/.../Python on OS X and -p python is specified to hang_analyzer.py.
    all_processes = [(pid, process_name.lower()) for (pid, process_name) in all_processes]

    # Find all running interesting processes:
    #   If a list of process_ids is supplied, match on that.
    #   Otherwise, do a substring match on interesting_processes.
    if process_ids:
        processes = [(pid, pname) for (pid, pname) in all_processes
                     if pid in process_ids and pid != os.getpid()]

        running_pids = set([pid for (pid, pname) in all_processes])
        missing_pids = set(process_ids) - running_pids
        if missing_pids:
            root_logger.warning("The following requested process ids are not running %s" %
                                list(missing_pids))
    else:
        processes = [(pid, pname) for (pid, pname) in all_processes
                     if pname_match(options.process_match, pname, interesting_processes) and
                     pid != os.getpid()]

    root_logger.info("Found %d interesting processes %s" % (len(processes), processes))

    max_dump_size_bytes = int(options.max_core_dumps_size) * 1024 * 1024

    # Dump all other processes including go programs, except python & java.
    for (pid, process_name) in [(p, pn) for (p, pn) in processes
                                if not re.match("^(java|python)", pn)]:
        process_logger = get_process_logger(options.debugger_output, pid, process_name)
        dbg.dump_info(
            root_logger,
            process_logger,
            pid,
            process_name,
            options.dump_core and check_dump_quota(max_dump_size_bytes, dbg.get_dump_ext()))

    # Dump java processes using jstack.
    for (pid, process_name) in [(p, pn) for (p, pn) in processes if pn.startswith("java")]:
        process_logger = get_process_logger(options.debugger_output, pid, process_name)
        jstack.dump_info(root_logger, process_logger, pid, process_name)

    # Signal go processes to ensure they print out stack traces, and die on POSIX OSes.
    # On Windows, this will simply kill the process since python emulates SIGABRT as
    # TerminateProcess.
    # Note: The stacktrace output may be captured elsewhere (i.e. resmoke).
    for (pid, process_name) in [(p, pn) for (p, pn) in processes if pn in go_processes]:
        root_logger.info("Sending signal SIGABRT to go process %s with PID %d" %
            (process_name, pid))
        signal_process(root_logger, pid, signal.SIGABRT)

    # Dump python processes by signalling them.
    for (pid, process_name) in [(p, pn) for (p, pn) in processes if pn.startswith("python")]:
        # On Windows, we set up an event object to wait on a signal. For Cygwin, we register
        # a signal handler to wait for the signal since it supports POSIX signals.
        if _is_windows:
            root_logger.info("Calling SetEvent to signal python process %s with PID %d" %
                (process_name, pid))
            signal_event_object(root_logger, pid)
        else:
            root_logger.info("Sending signal SIGUSR1 to python process %s with PID %d" %
                (process_name, pid))
            signal_process(root_logger, pid, signal.SIGUSR1)

    root_logger.info("Done analyzing all processes for hangs")
Example #18
0
def main(argv):
    global OPTIONS
    script_path = os.path.abspath(__file__)
    script_dir = os.path.dirname(script_path)

    # OBJDIR is a standalone SpiderMonkey build directory. This is where we
    # find the SpiderMonkey shared library to link against.
    #
    # The [TESTS] optional arguments are paths of test files relative
    # to the jit-test/tests directory.
    from optparse import OptionParser

    op = OptionParser(usage="%prog [options] OBJDIR [TESTS...]")
    op.add_option(
        "-s",
        "--show-cmd",
        dest="show_cmd",
        action="store_true",
        help="show GDB shell command run",
    )
    op.add_option(
        "-o",
        "--show-output",
        dest="show_output",
        action="store_true",
        help="show output from GDB",
    )
    op.add_option(
        "-x",
        "--exclude",
        dest="exclude",
        action="append",
        help="exclude given test dir or path",
    )
    op.add_option(
        "-t",
        "--timeout",
        dest="timeout",
        type=float,
        default=150.0,
        help="set test timeout in seconds",
    )
    op.add_option(
        "-j",
        "--worker-count",
        dest="workercount",
        type=int,
        help="Run [WORKERCOUNT] tests at a time",
    )
    op.add_option(
        "--no-progress",
        dest="hide_progress",
        action="store_true",
        help="hide progress bar",
    )
    op.add_option(
        "--worklist",
        dest="worklist",
        metavar="FILE",
        help="Read tests to run from [FILE] (or run all if [FILE] not found);\n"
        "write failures back to [FILE]",
    )
    op.add_option(
        "-r",
        "--read-tests",
        dest="read_tests",
        metavar="FILE",
        help="Run test files listed in [FILE]",
    )
    op.add_option(
        "-w",
        "--write-failures",
        dest="write_failures",
        metavar="FILE",
        help="Write failing tests to [FILE]",
    )
    op.add_option(
        "--write-failure-output",
        dest="write_failure_output",
        action="store_true",
        help="With --write-failures=FILE, additionally write the output of failed "
        "tests to [FILE]",
    )
    op.add_option(
        "--gdb",
        dest="gdb_executable",
        metavar="EXECUTABLE",
        default="gdb",
        help="Run tests with [EXECUTABLE], rather than plain 'gdb'.",
    )
    op.add_option(
        "--srcdir",
        dest="srcdir",
        default=os.path.abspath(os.path.join(script_dir, "..")),
        help="Use SpiderMonkey sources in [SRCDIR].",
    )
    op.add_option(
        "--testdir",
        dest="testdir",
        default=os.path.join(script_dir, "tests"),
        help="Find tests in [TESTDIR].",
    )
    op.add_option(
        "--builddir", dest="builddir", help="Build test executable from [BUILDDIR]."
    )
    op.add_option("--bindir", dest="bindir", help="Run test executable from [BINDIR].")
    (OPTIONS, args) = op.parse_args(argv)
    if len(args) < 1:
        op.error("missing OBJDIR argument")
    OPTIONS.objdir = os.path.abspath(args[0])

    test_args = args[1:]

    if not OPTIONS.workercount:
        OPTIONS.workercount = get_cpu_count()

    # Compute defaults for OPTIONS.builddir and OPTIONS.bindir now, since we've
    # computed OPTIONS.objdir.
    if not OPTIONS.builddir:
        OPTIONS.builddir = os.path.join(OPTIONS.objdir, "js", "src", "gdb")
    if not OPTIONS.bindir:
        OPTIONS.bindir = os.path.join(OPTIONS.objdir, "dist", "bin")

    test_set = set()

    # All the various sources of test names accumulate.
    if test_args:
        for arg in test_args:
            test_set.update(find_tests(OPTIONS.testdir, arg))
    if OPTIONS.worklist:
        try:
            with open(OPTIONS.worklist) as f:
                for line in f:
                    test_set.update(os.path.join(OPTIONS.testdir, line.strip("\n")))
        except IOError:
            # With worklist, a missing file means to start the process with
            # the complete list of tests.
            sys.stderr.write(
                "Couldn't read worklist file '%s'; running all tests\n"
                % (OPTIONS.worklist,)
            )
            test_set = set(find_tests(OPTIONS.testdir))
    if OPTIONS.read_tests:
        try:
            with open(OPTIONS.read_tests) as f:
                for line in f:
                    test_set.update(os.path.join(OPTIONS.testdir, line.strip("\n")))
        except IOError as err:
            sys.stderr.write(
                "Error trying to read test file '%s': %s\n" % (OPTIONS.read_tests, err)
            )
            sys.exit(1)

    # If none of the above options were passed, and no tests were listed
    # explicitly, use the complete set.
    if not test_args and not OPTIONS.worklist and not OPTIONS.read_tests:
        test_set = set(find_tests(OPTIONS.testdir))

    if OPTIONS.exclude:
        exclude_set = set()
        for exclude in OPTIONS.exclude:
            exclude_set.update(find_tests(OPTIONS.testdir, exclude))
        test_set -= exclude_set

    if not test_set:
        sys.stderr.write("No tests found matching command line arguments.\n")
        sys.exit(1)

    summary = Summary(len(test_set))
    test_list = [Test(_, summary) for _ in sorted(test_set)]

    # Build the test executable from all the .cpp files found in the test
    # directory tree.
    try:
        build_test_exec(OPTIONS.builddir)
    except subprocess.CalledProcessError as err:
        sys.stderr.write("Error building test executable: %s\n" % (err,))
        sys.exit(1)

    # Run the tests.
    try:
        summary.start()
        run_tests(test_list, summary)
        summary.finish()
    except OSError as err:
        sys.stderr.write("Error running tests: %s\n" % (err,))
        sys.exit(1)

    sys.exit(0)
Example #19
0
sys.path.insert(0, LIB_DIR)

try:
  from twisted.internet import epollreactor
  epollreactor.install()
except ImportError:
  pass

from twisted.internet import stdio, reactor, defer
from twisted.protocols.basic import LineReceiver
from carbon.routers import ConsistentHashingRouter, RelayRulesRouter
from carbon.client import CarbonClientManager
from carbon import log, events


option_parser = OptionParser(usage="%prog [options] <host:port:instance> <host:port:instance> ...")
option_parser.add_option('--debug', action='store_true', help="Log debug info to stdout")
option_parser.add_option('--keyfunc', help="Use a custom key function (path/to/module.py:myFunc)")
option_parser.add_option('--replication', type='int', default=1, help='Replication factor')
option_parser.add_option('--routing', default='consistent-hashing',
  help='Routing method: "consistent-hashing" (default) or "relay"')
option_parser.add_option('--relayrules', default=default_relayrules,
  help='relay-rules.conf file to use for relay routing')

options, args = option_parser.parse_args()

if not args:
  print 'At least one host:port destination required\n'
  option_parser.print_usage()
  raise SystemExit(1)
Example #20
0
def main() :
    try:

        print "Autoscale Cleanup Script started"

        parser = OptionParser()
        parser.add_option("-r", "--region", action="store", type="string", dest="region", help="aws region" )
        parser.add_option("-a", "--autoscale_group", action="store", type="string", dest="autoscale_group", help="bigip autoscale group name" )
        parser.add_option("-s", "--s3bucket", action="store", type="string", dest="s3_bucket", help="s3bucket" )
        parser.add_option("--aws_access_key", action="store", type="string", dest="aws_access_key", help="aws_access_key" )
        parser.add_option("--aws_secret_key", action="store", type="string", dest="aws_secret_key", help="aws_secret_key" )
        parser.add_option("-l", "--debug_logging", action="store", type="string", dest="debug_logging", default=False, help="debug logging: True or False" )

        (options, args) = parser.parse_args()

        debug_logging = False
        #setEnvironmentVariables()
        if options.debug_logging == "True":
            debug_logging = True

        # OVERRIDE
        if options.aws_access_key:
            #print "AWS Keys are Passed as Args"
            aws_access_key = options.aws_access_key
            aws_secret_key = options.aws_secret_key
        elif 'AWS_ACCESS_KEY_ID' in os.environ:
            #print "Getting Keys from Environment Vars"
            aws_access_key = os.environ["AWS_ACCESS_KEY_ID"]
            aws_secret_key = os.environ["AWS_SECRET_ACCESS_KEY"]
        else:
            aws_access_key = None
            aws_secret_key = None
            if debug_logging == True:
                print "Boto relying on credentials from default ~/.aws/credentials"


        region = options.region
        asg_name = options.autoscale_group
        s3_bucket_name = options.s3_bucket


        if aws_access_key and aws_secret_key:

            # Override boto creds with ones passed in
            # Create ASG client
            try:
                asg_client = boto3.client('autoscaling', region_name=region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key ) # Create Autoscale client
            except botocore.exceptions.ClientError as e:
                print e
                sys.exit("Exiting...")

            # Create EC2 client
            try:
                s3_client = boto3.client('s3', region_name=region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key ) # Create Autoscale client
            except botocore.exceptions.ClientError as e:
                print e
                sys.exit("Exiting...")

        else:
            # Try using ~/.aws/credentials
            # Create ASG client
            try:
                asg_client = boto3.client('autoscaling', region_name=region ) # Create Autoscale client
            except botocore.exceptions.ClientError as e:
                print e
                sys.exit("Exiting...")

            # Create EC2 client
            try:
                s3_client = boto3.client('s3', region_name=region ) # Create Autoscale client
            except botocore.exceptions.ClientError as e:
                print e
                sys.exit("Exiting...")


        #autoscaling = boto3.client('autoscaling')
        #s3_client = boto3.client('s3')
        s3 = boto3.resource('s3')
        ec2 = boto3.resource('ec2')
        instances = []
        asg = ""

        # Removing Scale-In protection from Master 
        for instance in asg_client.describe_auto_scaling_instances()['AutoScalingInstances']:
            if asg_name == instance['AutoScalingGroupName']:
                instances.append( instance['InstanceId'])
                asg = instance['AutoScalingGroupName']
        if instances:
            print 'Auto Scale: Removing Scale-In protection from Master: ',instances
            asg_client.set_instance_protection(
                    InstanceIds = instances,
                    AutoScalingGroupName=asg,
                    ProtectedFromScaleIn=False
                    )
        # Delete S3 Bucket
        buckets = s3_client.list_buckets()['Buckets']
        for bucket in buckets:
            if s3_bucket_name == bucket['Name']:
                print 'S3: deleting S3 bucket: ',bucket['Name']
                b = s3.Bucket(bucket['Name'])
                b.objects.all().delete()
                b.delete()

        print "Autoscale Cleanup Script Finished"

    except Exception, ex:
        print "An exception of type " + type(ex).__name__ + \
            " with message " + str(sys.exc_info()[1])
Example #21
0
                file_name.replace(
                    "{0}{1}{2}".format(
                        self.extract_dir,
                        "integrations-extras",
                        sep,
                    ),
                    "https://github.com/DataDog/integrations-extras/blob/master/",
                )
            )

        return dependencies


if __name__ == "__main__":
    parser = OptionParser(
        usage="usage: %prog [options] link_type"
    )
    parser.add_option(
        "-t",
        "--token",
        help="github access token",
        default=None,
    )
    parser.add_option(
        "-s",
        "--source",
        help="location of src files",
        default=curdir,
    )

    options, args = parser.parse_args()
Example #22
0
def main(argv):
    parser = OptionParser(usage='Usage: %prog [-v] [-i ignorepath]* [path]')
    parser.add_option('-v',
                      '--verbose',
                      dest='verbose',
                      default=False,
                      action='store_true')
    parser.add_option('-i',
                      '--ignore-path',
                      dest='ignored_paths',
                      default=[],
                      action='append')
    options, args = parser.parse_args(argv[1:])

    if len(args) == 0:
        path = '.'
    elif len(args) == 1:
        path = args[0]
    else:
        print args
        parser.error('No more then one path supported')

    verbose = options.verbose
    ignored_paths = set(abspath(p) for p in options.ignored_paths)

    num = 0
    out = cStringIO.StringIO()

    for root, dirs, files in os.walk(path):
        for vcs_dir in ['.svn', '.hg', '.git']:
            if vcs_dir in dirs:
                dirs.remove(vcs_dir)
        if abspath(root) in ignored_paths:
            del dirs[:]
            continue
        in_check_pkg = root.startswith('./sphinx')
        for fn in files:

            fn = join(root, fn)
            if fn[:2] == './': fn = fn[2:]

            if abspath(fn) in ignored_paths:
                continue

            ext = splitext(fn)[1]
            checkerlist = checkers.get(ext, None)
            if not checkerlist:
                continue

            if verbose:
                print "Checking %s..." % fn

            try:
                f = open(fn, 'rb')
                try:
                    lines = list(f)
                finally:
                    f.close()
            except (IOError, OSError), err:
                print "%s: cannot open: %s" % (fn, err)
                num += 1
                continue

            for checker in checkerlist:
                if not in_check_pkg and checker.only_pkg:
                    continue
                for lno, msg in checker(fn, lines):
                    print >> out, "%s:%d: %s" % (fn, lno, msg)
                    num += 1
Example #23
0
                                executable="/bin/bash",
                                stdout=subprocess.PIPE, 
                                stderr=subprocess.PIPE); 
    jobcreated.wait()

    out, err = jobcreated.communicate()
    errcode  = jobcreated.returncode;
	
    if(errcode != 0): #has finished but wrong code
        print "Job failed "+cjob+" failed";
        sys.exit(1);

    return out;


parser = OptionParser("$prog [options]")
#parser.add_option("-t", "--theta",       dest="theta",        help="Theta, default is 20",                          default=20,      type="float");
parser.add_option("-s", "--timesplit",   dest="timesplit",    help="Split time in 2N_0 generations, default is 0.1", default=0.10,    type="float");
parser.add_option("-f", "--destfolder",  dest="destfolder",   help="Output folder",                  default=None,    type="string");
parser.add_option("-n", "--numsim",      dest="numsim",       help="Number of simulations, default is 100",          default=100,     type="int");
parser.add_option(""  , "--branchl",     dest="branchlscale", help="Seq-gen branch scale, default is 0.00045",            default=0.00045, type="float");
parser.add_option(""  , "--chrlen",      dest="lengthchr",    help="Chromosome length, default is 10kb",              default=10000,   type="int");
parser.add_option("-c", "--numcont",     dest="numcont",      help="Number of present-day human contaminants, default is 2", default=2, type="int")
parser.add_option("-e", "--numendo",     dest="numendo",      help="Number of ancient endogenous, default is 2",             default=2, type="int")






#print handle_job("which ls");
Example #24
0
    sys.stderr.write('This file cannot be loaded as a module!')
    sys.exit(1)

# script directory
script_dir = os.path.dirname(__file__)

# JCEF root directory
jcef_dir = os.path.abspath(os.path.join(script_dir, os.pardir))


# parse command-line options
disc = """
This utility creates the version header file.
"""

parser = OptionParser(description=disc)
parser.add_option('--header', dest='header', metavar='FILE',
                  help='output version header file [required]')
parser.add_option('--cef-path', dest='cefpath',
                  help='path to the CEF binary distribution [required]')
parser.add_option('-q', '--quiet',
                  action='store_true', dest='quiet', default=False,
                  help='do not output detailed status information')
(options, args) = parser.parse_args()

# the header option is required
if options.header is None or options.cefpath is None:
    parser.print_help(sys.stdout)
    sys.exit(1)

def write_svn_header(header):
Example #25
0
 or by snail mail at:
      Cyan Worlds, Inc.
      14617 N Newport Hwy
      Mead, WA   99021

 *==LICENSE==* """

import os
import sys
import glob
import subprocess
from optparse import OptionParser


if __name__ == '__main__':
	parser = OptionParser(usage="usage: %prog [options]")
	parser.add_option("-q", "--quiet", dest="verbose", default=True, action="store_false", help="Don't print status messages")
	parser.add_option("-r", "--render", dest="render", default=False, action="store_true", help="Perform SVG Render to images")
	parser.add_option("-p", "--package", dest="package", default=False, action="store_true", help="Perform packaging into resource container")
	parser.add_option("-z", "--pngcrush", dest="pngcrush", type="string", help="Perform PNGCrush optimization on PNG resources")
	parser.add_option("-b", "--brute", dest="brute", default=False, action="store_true", help="Allow brute-force optimization")
	parser.add_option("-w", "--workpath", dest="workpath", default=".", help="Sets working output path for image renders")
	parser.add_option("-o", "--outpath", dest="outpath", default=".", help="Sets output path for resource container")
	parser.add_option("-i", "--inpath", dest="inpath", default=".", help="Sets input path for files to add to resource file")

	(options, args) = parser.parse_args()

	## Send output to OS's null if unwanted
	if not options.verbose:
		sys.stdout = open(os.devnull,"w")
		sys.stderr = open(os.devnull,"w")
Example #26
0
def main():
    usage = 'usage: %prog vpc_gene_exprs_file '\
        'beltran_t_test_file output_file\n'\
        'Requires four input arguments:\n'\
        '1) Text file of VPC cohort gene exprs\n'\
        '2) Text file of Beltran cohort gene exprs\n'\
        '3) Text file containing exprs of 2 samples/groups for fold change\n'\
        '4) Output file.\n'
    parser = OptionParser(usage=usage)
    parser.add_option('--pval_colname',
                      dest='pval_colname',
                      default='pval',
                      help='Column name containing pvals. Default "pval".')
    parser.add_option('--gene_colname',
                      dest='gene_colname',
                      default='gene',
                      help='Column name containing pvals. Default "gene".')
    parser.add_option('--group1_colname', dest='group1_colname',
                      default='group1',
                      help='Column name containing group1 exprs values. '\
                        'Default "group1".')
    parser.add_option('--group2_colname', dest='group2_colname',
                      default='group2',
                      help='Column name containing group2 exprs values. '\
                        'Default "group2".')
    parser.add_option('--group1_fc_colname', dest='group1_fc_colname',
                      default='LTL331',
                      help='Column name of group1 exprs values in FC file. '\
                        'Default: "LTL331"')
    parser.add_option('--group2_fc_colname', dest='group2_fc_colname',
                      default='LTL331_R',
                      help='Column name of group1 exprs values in FC file. '\
                        'Default: "LTL331_R"')
    parser.add_option('--fc_gene_colname', dest='fc_gene_colname',
                      default='gene_name',
                      help='Column name of gene names in FC file. '\
                        'Default: "gene_name"')
    (options, args) = parser.parse_args()

    if len(args) < 4:
        print 'Four arguments need to be specified in command line.\n'
        print usage
        sys.exit()
    vpc_filepath = args[0]
    beltran_filepath = args[1]
    fold_change_filepath = args[2]
    output_filepath = args[3]

    group1_colname = options.group1_colname
    group2_colname = options.group2_colname
    pval_colname = options.pval_colname
    gene_colname = options.gene_colname
    group1_fc_colname = options.group1_fc_colname
    group2_fc_colname = options.group2_fc_colname
    gene_fc_colname = options.fc_gene_colname

    # store pvals to a dic.
    pval_fc_dic = {}
    for cohort, filepath in zip(['vpc', 'beltran'],
                                [vpc_filepath, beltran_filepath]):
        pval_fc_subdic = get_pvals_fc_from_file(filepath, group1_colname,
                                                group2_colname, gene_colname,
                                                pval_colname)
        pval_fc_dic[cohort] = pval_fc_subdic

    # Add fold change to dic
    pval_fc_subdic = get_fc_from_file(fold_change_filepath, group1_fc_colname,
                                      group2_fc_colname, gene_fc_colname)
    pval_fc_dic['foldchange'] = pval_fc_subdic

    # Write dic to file
    write_outdic_to_file(pval_fc_dic, output_filepath, shape='long')
    """Returns the http url with password part replaced with '*'.

    :param url: URL to upload the plugin to.
    :type url: str

    :param start: Position of start of password.
    :type start: int
    """
    start_position = url.find(':', start) + 1
    end_position = url.find('@')
    return "%s%s%s" % (url[:start_position], '*' *
                       (end_position - start_position), url[end_position:])


if __name__ == "__main__":
    parser = OptionParser(usage="%prog [options] plugin.zip")
    parser.add_option("-w",
                      "--password",
                      dest="password",
                      help="Password for plugin site",
                      metavar="******")
    parser.add_option("-u",
                      "--username",
                      dest="username",
                      help="Username of plugin site",
                      metavar="user")
    parser.add_option("-p",
                      "--port",
                      dest="port",
                      help="Server port to connect to",
                      metavar="80")
# Debug level
debugLevel = 0
# Number of option arguments.
numOpts = len(sys.argv)

# Usage message
usage = u'''`Usage: %prog [options...] [seqFile]'''
description = u'''\
    Calculates statistics of protein properties.\
'''
epilog = u'''For further information about the EMBOSS pepstats web service, see
http://www.ebi.ac.uk/tools/webservices/services/seqstats/emboss_pepstats_rest.'''
version = u'98c6601'

# Process command-line options
parser = OptionParser(usage=usage, description=description, epilog=epilog, version=version)

# Tool specific options (Try to print all the commands automatically)

parser.add_option('--sequence', help='The sequence to be analysed can be entered directly into this form.\
                The sequence can be in GCG, FASTA, PIR, NBRF, PHYLIP or UniProtKB/Swiss-Prot format.\
                Partially formatted sequences are not accepted..\
            ')
parser.add_option('--termini', help='Include charges from the N-terminus and C-terminus when calculating the Isoelectric Point')
parser.add_option('--mono', help='Use weight from the most abundant (prinicpal) isotope of each amino acid when calculating molecular weights. By default this is not enabled, so the average isotope weight is used instead.')
# General options
parser.add_option('--email', help='e-mail address')
parser.add_option('--title', help='job title')
parser.add_option('--outfile', help='file name for results')
parser.add_option('--outformat', help='output format for results')
parser.add_option('--async', action='store_true', help='asynchronous mode')
import math as math
import os
import CMS_lumi, tdrstyle

gSystem.Load("%s/lib/slc6_amd64_gcc481/libHiggsAnalysisCombinedLimit.so"%os.environ["CMSSW_BASE"])

gSystem.Load("PDFs/PdfDiagonalizer_cc.so")
gSystem.Load("PDFs/Util_cxx.so")
gSystem.Load("PDFs/hyperg_2F1_c.so")
gSystem.Load("PDFs/HWWLVJRooPdfs_cxx.so")
from ROOT import draw_error_band
from ROOT import RooPoly3Pdf, RooChiSqPdf, RooErfExpPdf, RooErfPowExpPdf, RooErfPowPdf, RooErfPow2Pdf, RooExpNPdf, RooAlpha4ExpNPdf, RooExpTailPdf, RooAlpha4ExpTailPdf, Roo2ExpPdf, draw_error_band_extendPdf

gStyle.SetOptStat(0)
gStyle.SetOptTitle(0)
parser	= OptionParser()
parser.add_option('--POI',dest='POI',help='parameter of interest')
parser.add_option('--pval',dest='pval',help='value of parameter')
parser.add_option('-c',action='store_true',dest='close',default=False)
(options,args) = parser.parse_args()

POI		= options.POI
pval		= options.pval
par_latex	= {'cwww' : 'c_{WWW} / #Lambda^{2} (TeV^{-2})', 'ccw' : 'c_{W} / #Lambda^{2} (TeV^{-2})', 'cb' : 'c_{B} / #Lambda^{2} (TeV^{-2})', 'lZ' : '#lambda_{Z}', 'dg1z' : '#Deltag_{1}^{Z}', 'dkz' : '#Delta#kappa_{Z}'}
par_noUnits	= {'cwww' : 'c_{WWW} / #Lambda^{2}', 'ccw' : 'c_{W} / #Lambda^{2}', 'cb' : 'c_{B} / #Lambda^{2}', 'lZ' : '#lambda_{Z}', 'dg1z' : '#Deltag_{1}^{Z}', 'dkz' : '#Delta#kappa_{Z}'}

def plots():
	path		= './'
	
	wsNameExp	= 'higgsCombine_%s_%s.MultiDimFit.mH120.root'%(POI,pval)
	print 'Reading '+wsNameExp
Example #30
0
        else:
            print 'error: syntax of line: ' + l
    return modules


def get_collapsed(modules):
    collapsed = networkx.MultiDiGraph()
    collapsed.add_nodes_from(modules.nodes())
    for e in modules.edges():
        if not e in collapsed.edges():
            collapsed.add_edge(e[0], e[1])
    return collapsed


if __name__ == '__main__':
    parser = OptionParser(usage='Usage: %prog [options] mapfile out.dot')
    parser.set_defaults(exclude_modules=[])
    parser.add_option(
        '-x',
        '--exclude-modules',
        action='append',
        dest='exclude_modules',
        metavar='FILTER',
        help=
        'ignore modules that match FILTER, expressed as a regular expression.')
    parser.add_option(
        '-e',
        '--entry',
        dest='entry',
        metavar='ENTRY',
        help=