Пример #1
0
def main(): 
    usage = """usage: %prog [options] /path/to/dump.rdb

Example 1 : %prog -k "user-" -k "friends-" /var/redis/6379/dump.rdb
Example 2 : %prog /var/redis/6379/dump.rdb"""

    parser = OptionParser(usage=usage)

    parser.add_option("-k", "--key", dest="keys", action="append",
                  help="Keys that should be grouped together. Multiple prefix can be provided")
    
    (options, args) = parser.parse_args()
    
    if len(args) == 0:
        parser.error("Redis RDB file not specified")
    dump_file = args[0]
    if not options.keys:
        options.keys = []

    stat2d = MyStatsAggregator(options.keys)
    callback = MemoryCallback(stat2d, 64)
    parser = RdbParser(callback)
    try:
        parser.parse(dump_file)
    except Exception, e:
        print 'error: ', e
Пример #2
0
def main(): 
    usage = """usage: %prog [options] /path/to/dump.rdb

Example 1 : %prog -k "user.*" -k "friends.*" -f memoryreport.html /var/redis/6379/dump.rdb
Example 2 : %prog /var/redis/6379/dump.rdb"""

    parser = OptionParser(usage=usage)

    parser.add_option("-f", "--file", dest="output",
                      help="Output file", metavar="FILE")
    parser.add_option("-k", "--key", dest="keys", action="append",
                      help="Keys that should be grouped together. Multiple regexes can be provided")
    
    (options, args) = parser.parse_args()
    
    if len(args) == 0:
        parser.error("Redis RDB file not specified")
    dump_file = args[0]
    
    if not options.output:
        output = "redis_memory_report.html"
    else:
        output = options.output

    stats = StatsAggregator()
    callback = MemoryCallback(stats, 64)
    parser = RdbParser(callback)
    parser.parse(dump_file)
    stats_as_json = stats.get_json()
    
    t = open(os.path.join(os.path.dirname(__file__),"report.html.template")).read()
    report_template = Template(t)
    html = report_template.substitute(REPORT_JSON = stats_as_json)
    print(html)
Пример #3
0
def main():
    usage = """usage: %prog [options] /path/to/dump.rdb

Example : %prog --command json -k "user.*" /var/redis/6379/dump.rdb"""

    parser = OptionParser(usage=usage)
    parser.add_option("-c", "--command", dest="command",
                  help="Command to execute. Valid commands are json or diff", metavar="FILE")
                  
    parser.add_option("-f", "--file", dest="output",
                  help="Output file", metavar="FILE")
    parser.add_option("-n", "--db", dest="dbs", action="append",
                  help="Database Number. Multiple databases can be provided. If not specified, all databases will be included.")
    parser.add_option("-k", "--key", dest="keys", default=None,
                  help="Keys to export. This can be a regular expression")
    parser.add_option("-t", "--type", dest="types", action="append",
                  help="""Data types to include. Possible values are string, hash, set, sortedset, list. Multiple typees can be provided. 
                    If not specified, all data types will be returned""")
    
    (options, args) = parser.parse_args()
    
    if len(args) == 0:
        parser.error("Redis RDB file not specified")
    dump_file = args[0]
    
    filters = {}
    if options.dbs:
        filters['dbs'] = []
        for x in options.dbs:
            try:
                filters['dbs'].append(int(x))
            except ValueError:
                raise Exception('Invalid database number %s' %x)
    
    if options.keys:
        filters['keys'] = options.keys
    
    if options.types:
        filters['types'] = []
        for x in options.types:
            if not x in VALID_TYPES:
                raise Exception('Invalid type provided - %s. Expected one of %s' % (x, (", ".join(VALID_TYPES))))
            else:
                filters['types'].append(x)
    
    destination = sys.stdout 
    if options.output:
        desitnaion = open(options.output, 'wb')

    cmds = { 'diff': DiffCallback, 
             'json': JSONCallback, 
             'memory': lambda r : MemoryCallback(r, 64) }
    for key,cb in cmds.items():
        if key != options.command:
            continue
        with destination as f:
            parser = RdbParser(cb(f), filters=filters)
            parser.parse(dump_file)
        return True
    raise Exception('Invalid Caommand %s' % options.output)
Пример #4
0
def main():
    parser = OptionParser(usage="Usage: %prog [options] [[--] <application> [<args>]]")
    collection_group = OptionGroup(parser, "Collection options")
    collection_group.add_option("--file",
                                metavar="FILE",
                                dest="file",
                                help="")

    collection_group.add_option("--func",
                                metavar="OUT",
                                dest="function",
                                help="")

    collection_group.add_option("--out",
                                metavar="OUT",
                                dest="out",
                                help="")

    collection_group.add_option("--cache",
                                metavar="CACHE",
                                dest="cache",
                                help="")

    collection_group.add_option("--format",
                                metavar="FORMAT",
                                dest="format",
                                help="")

    parser.add_option_group(collection_group)

    (options, arguments) = parser.parse_args()
    if options.file:
        if not os.path.exists(options.file):
            parser.error("file not found")
    else:
        parser.error("file not specified")

    info = HpccInfo('xstftool', options)
    parser = Parser(options.format, info, options.out, options.cache)

    print datetime.datetime.now().ctime() + ' Start'
    if options.cache is None:
        parser.parse()
    else:
        if not parser.read_cache():
            parser.parse()

    parser.to_print()
    print "\n" + datetime.datetime.now().ctime() + ' End'
    return 0
Пример #5
0
def main():
    from optparse import OptionParser
    parser = OptionParser()
    parser.add_option("-p", "--port", dest="port", type="int", default=8822,
                      help="AMGA instance port")
    parser.add_option("-i", "--interval", dest="interval", type="int", default=1,
                      help="Poll interval")

    (opts, args) = parser.parse_args()    
    if len(args) == 0:
        host = "localhost"
    else:
        host = args[0]

    print "Querying: " + host + ":" + str(opts.port)
    parser = Parser()
    log = None
#    count = 0
    while True:
        (serverType, xmlData) = pollAMGA(host, opts.port)
        #print xmlData
        oldLog = log
        log = parser.parse(serverType, xmlData)
        print log
#        count += 1
#        print "Queries:",count
        time.sleep(opts.interval)
Пример #6
0
def main():
    from optparse import OptionParser
    parser = OptionParser()
    parser.add_option("-p", "--port", dest="port", type="int", default=8822,
                      help="AMGA instance port")
    parser.add_option("-i", "--interval", dest="interval", type="int", default=1,
                      help="Poll interval")

    (opts, args) = parser.parse_args()
    if len(args) == 0:
        host = "localhost"
    else:
        host = args[0]

    print "Querying: " + host + ":" + str(opts.port)

    parser = amgaMonitor.Parser()
    log = None
    (serverType, xmlData) = amgaMonitor.pollAMGA(host, opts.port)
    if serverType == amgaMonitor.ServerType.MDSERVER:
        miner = ServerMiner()
    elif serverType == amgaMonitor.ServerType.MDREPDAEMON:
        miner = RepDaemonMiner()
    else:
        raise Exception("Bad id: " + str(serverType))

    while True:
#        print xmlData
        log = parser.parse(serverType, xmlData)        
        print log
        miner.processLog(log)
        time.sleep(opts.interval)
        (serverType, xmlData) = amgaMonitor.pollAMGA(host, opts.port)
Пример #7
0
def main():
    usage= 'usage: %prog [options] infname'
    parser= OptionParser(usage=usage)
    parser.add_option('-o', '--output', 
                        dest='output_folder', default='patterns',
                        help='output folder')
    parser.add_option('-c', '--no-cache', 
                        dest='cache', action='store_false', default=True, 
                        help='discard cache')

    options, args= parser.parse_args(argv[1:])
    if len(args) < 1: parser.error('not enaught args')

    parser= parserclass('models_cache')

    infname= args[0]
    score= parser.parse(infname)
    score= quantize(score)

    output_folder_prefix= options.output_folder

    pat_sizes= [4]
    margins= range(3)
    f= 2
    key= lambda n:('%s|%s' % (n.duration, n.is_silence),)

    writer= writerclass()

    results= {}
    patterns= get_score_patterns(score, pat_sizes, margins, f, key)
    instr, all_notes= score.notes_per_instrument.iteritems().next()

    output_folder= os.path.join(output_folder_prefix, infname[:-4])
    if not os.path.exists(output_folder):
        os.mkdir(output_folder)

    for i, (pat, matches) in enumerate(patterns.iteritems()):
        pattern_folder= os.path.join(output_folder, 'pat_%s' % i)
        if not os.path.exists(pattern_folder):
            os.mkdir(pattern_folder)
        for j, (start, end) in enumerate(matches):
            notes= all_notes[start:end]
            notes= [n.copy() for n in notes]

            notes[0].start= 0
            for prev, next in zip(notes, notes[1:]):
                next.start= prev.start+prev.duration
                            
            s= score.copy()
            s.notes_per_instrument={instr:notes}

            writer.dump(s, os.path.join(pattern_folder, 'match_%s.mid' % j))
Пример #8
0
def run():
    usage = "usage: %prog source dest [options]"

    parser = OptionParser()
    parser.add_option("-s", "--with-slots",
                      dest="with_slots",
                      action="store_true",
                      help="generate code using __slots__")
    (options, args) = parser.parse_args()

    if len(args) != 2:
        parser.error("exactly two arguments required: path to directory with .proto files and path to destination package")

    d = args[0]
    od = args[1]
    exit_status = 0

    protos = [f for f in os.listdir(d) if f.endswith(".proto")]

    for p in protos:
        sys.stdout.write(("%s..." % p).ljust(70))
        sys.stdout.flush()

        source = open(os.path.join(d, p)).read()

        try:
            parser = make_parser()
            r = parser.parse(source)

            _, res, l = r
            if l != len(source):
                raise SyntaxError("Syntax error on line %s near %r" % (
                    source[:l].count('\n') + 1,
                    source[l:l+10]))
            s = gen_module([m for m in res if type(m) is tuple],
                    [m for m in res if type(m) is str],
                    [m for m in res if type(m) is list],
                    options.with_slots,
                    )
            open(os.path.join(od, convert_proto_name(p) + ".py"), 'wb').write(s)
        except:
            sys.stdout.write("[FAIL]\n")
            sys.stdout.flush()
            traceback.print_exc()
            exit_status = 1
        else:
            sys.stdout.write("[OKAY]\n")
            sys.stdout.flush()

    return exit_status
Пример #9
0
def main():
    usage= 'usage: %prog [options] infname1 [infname2 ...]'
    parser= OptionParser(usage=usage)
    parser.add_option('-o', '--output', 
                        dest='output_fname', default='cluster.out',
                        help='output fname')
    parser.add_option('-c', '--no-cache', 
                        dest='cache', action='store_false', default=True, 
                        help='discard cache')
    parser.add_option('-k', '--resulting-dimensionality', 
                        dest='k', default=150, 
                        help='el numero de dimensiones resultantes')

    options, args= parser.parse_args(argv[1:])
    if len(args) < 1: parser.error('not enaught args')

    parser= parserclass('models_cache')

    infnames= args
    outfname= options.output_fname

    print 'parsing scores'
    scores= [parser.parse(infname, cache=options.cache) for infname in infnames]

    nclusterss= [2] # range(2,5)
    npasss= [5,10, 15, 20, 25]
    methods= ['a', 'm']
    dists= ['e', 'b', 'c', 'a', 'u', 'x', 's', 'k']
    configs= list(combine(nclusterss, npasss, methods, dists))

    results= {}
    for k in range(2,10,2):
        print 'k=', k
        concept_vectors= apply_lsa(scores, k)
        step= len(configs)/10
        for i, (nclusters, npass, method, dist) in enumerate(configs):
            if (i+1)%step == 0: print '\t', ((i+1)*100)/len(configs)
            r= Pycluster.kcluster(concept_vectors, 
                                  nclusters= nclusters, 
                                  method= method, dist= dist)
            results[(k, nclusters, npass, method, dist)]= r

    
    f= open('clusters_results.pickle', 'w')
    pickle.dump(results, f, 2)
    f.close()
Пример #10
0
def main():
    usage= 'usage: %prog [options] infname'
    parser= OptionParser(usage=usage)
    parser.add_option('-c', '--no-cache', 
                        dest='cache', action='store_false', default=True, 
                        help='discard cache')


    options, args= parser.parse_args(argv[1:])
    if len(args) < 1: parser.error('not enaught args')

    parser= parserclass('models_cache')
    
    infname= args[0]
    score= parser.parse(infname, cache=options.cache)
    #import ipdb;ipdb.set_trace()
    notes= score.notes_per_instrument.values()[0]
    for n in notes: print n
Пример #11
0
def main():
    usage= 'usage: %prog [options] infname outfname'
    parser= OptionParser(usage=usage)

    parser.add_option('-p', '--patch', dest='patch', type='int', help='patch to select')
    
    options, args= parser.parse_args(argv[1:])
    if len(args) < 1: parser.error('not enaught args')
    
    infname= args[0]        
    outfname= infname.replace('.mid', '.svg')

    parser= MidiScoreParser()
    score= parser.parse(infname)

    rank(score)
    graph= build_graph(score)
    graph.draw(outfname, prog='dot') # , args='-Gsize="4096,4096"')
Пример #12
0
def main():
    usage= 'usage: %prog [options] infname [outfname]'
    parser= OptionParser(usage=usage)
    parser.add_option('-f', '--from', dest='moment_from', help='start moment in ticks', default=0)
    parser.add_option('-t', '--to', dest='moment_to', help='end moment in ticks')
    parser.add_option('-d', '--duration', dest='duration', help='duration of the slice')
    parser.add_option('-c', '--no-cache', dest='cache', action='store_false', default=True, help='discard cache')

    options, args= parser.parse_args(argv[1:])
    if len(args) < 1: parser.error('not enaught args')

    parser= parserclass('models_cache')
    moment_from= int(options.moment_from)
    if options.moment_to is None and options.duration is None:
        parser.error('falta --to y --duration')
    elif options.moment_to is None:
        moment_to= int(options.moment_from)+int(options.duration)
    else:
        moment_to= int(options.moment_to)

    infname= args[0]
    if len(args) == 1: outfname= '%s-%s-%s.mid' % (infname[:-4], moment_from, moment_to)
    else: outfname= args[1]

    print 'creating score'
    score= parser.parse(infname, cache=options.cache)
    beat_duration= float(score.tempo)/1e6
    divisions= score.divisions
    for instrument, notes in score.notes_per_instrument.iteritems():
        new_notes= [n for n in notes  \
                      if n.start/divisions*beat_duration >= moment_from and \
                         (n.start + n.duration)/divisions*beat_duration< moment_to]
        score.notes_per_instrument[instrument]= new_notes

    print 'dumping'
    writer= writerclass()
    writer.dump(score, outfname)
Пример #13
0
def main():
    usage = """usage: %prog [options] /path/to/dump.rdb

Example : %prog --command json -k "user.*" /var/redis/6379/dump.rdb"""

    parser = OptionParser(usage=usage)
    parser.add_option("-c", "--command", dest="command",
                  help="Command to execute. Valid commands are json, diff, justkeys, justkeyvals, memory and protocol", metavar="FILE")
    parser.add_option("-f", "--file", dest="output",
                  help="Output file", metavar="FILE")
    parser.add_option("-n", "--db", dest="dbs", action="append",
                  help="Database Number. Multiple databases can be provided. If not specified, all databases will be included.")
    parser.add_option("-k", "--key", dest="keys", default=None,
                  help="Keys to export. This can be a regular expression")
    parser.add_option("-o", "--not-key", dest="not_keys", default=None,
                  help="Keys Not to export. This can be a regular expression")
    parser.add_option("-t", "--type", dest="types", action="append",
                  help="""Data types to include. Possible values are string, hash, set, sortedset, list. Multiple typees can be provided. 
                    If not specified, all data types will be returned""")
    parser.add_option("-b", "--bytes", dest="bytes", default=None,
                  help="Limit memory output to keys greater to or equal to this value (in bytes)")
    parser.add_option("-l", "--largest", dest="largest", default=None,
                  help="Limit memory output to only the top N keys (by size)")
    parser.add_option("-e", "--escape", dest="escape", choices=ESCAPE_CHOICES,
                      help="Escape strings to encoding: %s (default), %s, %s, or %s." % tuple(ESCAPE_CHOICES))

    (options, args) = parser.parse_args()
    
    if len(args) == 0:
        parser.error("Redis RDB file not specified")
    dump_file = args[0]
    
    filters = {}
    if options.dbs:
        filters['dbs'] = []
        for x in options.dbs:
            try:
                filters['dbs'].append(int(x))
            except ValueError:
                raise Exception('Invalid database number %s' %x)
    
    if options.keys:
        filters['keys'] = options.keys
        
    if options.not_keys:
        filters['not_keys'] = options.not_keys
    
    if options.types:
        filters['types'] = []
        for x in options.types:
            if not x in VALID_TYPES:
                raise Exception('Invalid type provided - %s. Expected one of %s' % (x, (", ".join(VALID_TYPES))))
            else:
                filters['types'].append(x)

    out_file_obj = None
    try:
        if options.output:
            out_file_obj = open(options.output, "wb")
        else:
            # Prefer not to depend on Python stdout implementation for writing binary.
            out_file_obj = os.fdopen(sys.stdout.fileno(), 'wb')

        try:
            callback = {
                'diff': lambda f: DiffCallback(f, string_escape=options.escape),
                'json': lambda f: JSONCallback(f, string_escape=options.escape),
                'justkeys': lambda f: KeysOnlyCallback(f, string_escape=options.escape),
                'justkeyvals': lambda f: KeyValsOnlyCallback(f, string_escape=options.escape),
                'memory': lambda f: MemoryCallback(PrintAllKeys(f, options.bytes, options.largest),
                                                   64, string_escape=options.escape),
                'protocol': lambda f: ProtocolCallback(f, string_escape=options.escape)
            }[options.command](out_file_obj)
        except:
            raise Exception('Invalid Command %s' % options.command)

        if not PYTHON_LZF_INSTALLED:
            eprint("WARNING: python-lzf package NOT detected. " +
                "Parsing dump file will be very slow unless you install it. " +
                "To install, run the following command:")
            eprint("")
            eprint("pip install python-lzf")
            eprint("")

        parser = RdbParser(callback, filters=filters)
        parser.parse(dump_file)
    finally:
        if options.output and out_file_obj is not None:
            out_file_obj.close()
Пример #14
0
def main():
    usage = """usage: %prog [options] /path/to/dump.rdb

Example : %prog --command json -k "user.*" /var/redis/6379/dump.rdb"""

    parser = OptionParser(usage=usage)
    parser.add_option("-c", "--command", dest="command",
                  help="Command to execute. Valid commands are json or diff", metavar="FILE")
                  
    parser.add_option("-f", "--file", dest="output",
                  help="Output file", metavar="FILE")
    parser.add_option("-n", "--db", dest="dbs", action="append",
                  help="Database Number. Multiple databases can be provided. If not specified, all databases will be included.")
    parser.add_option("-k", "--key", dest="keys", default=None,
                  help="Keys to export. This can be a regular expression,"
                       "When picking this mode, we will dump a deep copy of the key")
    parser.add_option("-t", "--type", dest="types", action="append",
                  help="""Data types to include. Possible values are string, hash, set, sortedset, list. Multiple typees can be provided. 
                    If not specified, all data types will be returned""")

    parser.add_option("-p", "--pos", dest="pos",
                  help="""Position in RDB file to skip to, after generated an memory index file this can be used to speed up.
                          The data starts reading at 9 bytes""",
                  default = 9)

    parser.add_option("-m", "--max", dest="max",
                  help=""" Read maximum number of keys, to limit search.""",
                  default = 2e31)


    parser.add_option("-v", "--verbose", dest="verbose",
                  help="""If true dump a deep copy of the data structure""",
                  action = "store_true",
                  default = False)

    parser.add_option("-q", "--quick", dest="quick",
                  help="""If true dump a deep copy of the data structure""",
                  action = "store_true",
                  default = False)
    
    (options, args) = parser.parse_args()
    
    if len(args) == 0:
        parser.error("Redis RDB file not specified")
    dump_file = args[0]
    
    filters = {}
    if options.dbs:
        filters['dbs'] = []
        for x in options.dbs:
            try:
                filters['dbs'].append(int(x))
            except ValueError:
                raise Exception('Invalid database number %s' %x)
    
    if options.keys:
        filters['keys'] = options.keys
    
    if options.types:
        filters['types'] = []
        for x in options.types:
            if not x in VALID_TYPES:
                raise Exception('Invalid type provided - %s. Expected one of %s' % (x, (", ".join(VALID_TYPES))))
            else:
                filters['types'].append(x)


    filters['pos'] = options.pos
    filters['max'] = options.max
    
    # TODO : Fix this ugly if-else code
    callback = None
    if options.output:
        f = open(options.output, "wb")
        if 'diff' == options.command:
            callback = DiffCallback(f)
        elif 'json' == options.command:
            callback = JSONCallback(f)
        elif 'memory' == options.command:
            reporter = PrintAllKeys(f)
            callback = MemoryCallback(reporter, 64, options.verbose)
        else:
            raise Exception('Invalid Command %s' % options.output)
    else:
        if 'diff' == options.command:
            callback = DiffCallback(sys.stdout)
        elif 'json' == options.command:
            callback = JSONCallback(sys.stdout)
        elif 'memory' == options.command:
            reporter = PrintAllKeys(sys.stdout)
            callback = MemoryCallback(reporter, 64, options.verbose)
        else:
            raise Exception('Invalid Command %s' % options.output)


    start = time.clock()
    parser = RdbParser(callback, filters=filters, quick=options.quick)
    parser.parse(dump_file)
    end =  time.clock()

    print "time=%s seconds" % (end-start)
Пример #15
0
def main():
    usage= 'usage: %prog [options] midis'
    parser= OptionParser(usage=usage)
    parser.add_option('--n-measures', dest='n_measures', default=1, type='int', help='if the partition algorithm is MEASURE, especifies the number of measures to take as a unit')
    parser.add_option('-o', dest='outfname', help='the output file')


    options, args= parser.parse_args(argv[1:])
    if len(args) < 1: parser.error('not enaught args')

    outfname= options.outfname
    if outfname is None:
        parser.error('missing outfname')

    infnames= args[:]

    parser= MidiScoreParser()
    models= []
    for infname in infnames: 
        score= parser.parse(infname)
        if not score: import ipdb;ipdb.set_trace() # por que podria devolver None?

        #########
        # AJUSTES DE PARSING
        notes= score.get_first_voice()
        notes= [n for n in notes if n.duration > 0]
        instr= score.notes_per_instrument.keys()[0]
        patch= instr.patch
        channel= instr.channel
        score.notes_per_instrument= {instr:notes}
        #########


        interval_size= measure_interval_size(score, options.n_measures)
        algorithm= HarmonyHMM(interval_size, instrument=patch, channel=channel)
        algorithm.train(score)
        algorithm.create_model()
        models.append(algorithm.model)


    def sim(m1, m2):
        """
        calcula H(m1|m2)
        """
        from math import log
        ans= 0
        for s1 in m2.state_transition:
            for s2, prob in m1.state_transition.get(s1, {}).iteritems():
                ans+= m2.pi[s1]*prob*log(prob)
        
        return -ans


    from os import path
    def get_node_name(ticks):
        n_quarters= 0
        while ticks >= score.divisions:
            ticks-= score.divisions
            n_quarters+= 1

        if ticks > 0:
            f= Fraction(ticks, score.divisions)
            if n_quarters > 0: return "%s + %s" % (n_quarters, f)
            else: return repr(f)
        return "%s" % n_quarters 
    for i, m in enumerate(models):
        m.pi= m.calc_stationationary_distr()

        from pygraphviz import AGraph
        from utils.fraction import Fraction
            
           
        g= AGraph(directed=True, strict=False)
        for n1, adj in m.state_transition.iteritems():
            n1_name= get_node_name(n1)
            for n2, prob in adj.iteritems():
                n2_name= get_node_name(n2)
                g.add_edge(n1_name, n2_name)
                e= g.get_edge(n1_name, n2_name)
                e.attr['label']= str(prob)[:5]
        model_fname= path.basename(infnames[i]).replace('mid', 'png')
        g.draw(model_fname, prog='dot', args='-Grankdir=LR')                

    sims= defaultdict(dict)
    for i, m1 in enumerate(models):
        for j in xrange(i+1, len(models)):
            m2= models[j]
            sims[path.basename(infnames[i])][path.basename(infnames[j])]= sim(m1, m2)
            sims[path.basename(infnames[j])][path.basename(infnames[i])]= sim(m2, m1)
    
    import csv

    f= open(outfname, 'w')
    writer= csv.writer(f)
    head= sorted(sims)
    head.insert(0, "-")
    writer.writerow(head)
    for (k, row) in sorted(sims.iteritems(), key=lambda x:x[0]):
        row= sorted(row.iteritems(), key=lambda x:x[0])
        row= [t[1] for t in row]
        row.insert(0, k)
        writer.writerow(row)

    f.close()
    return

    from pygraphviz import AGraph
    from utils.fraction import Fraction
        
       

    g= AGraph(directed=True, strict=False)
    for infname1, adj in sims.iteritems():
        for infname2, sim in adj.iteritems():
            if sim>0.2 and sim!=0: continue
            g.add_edge(infname1, infname2)
            e= g.get_edge(infname1, infname2)
            e.attr['label']= str(sim)[:5]
    g.draw(outfname, prog='dot')
      if status == BuildStatus.Success:
        pass

      # Parse log to save 'TestFailed' results.
      elif status == BuildStatus.TestFailed :
        logging.info("Checking build log for '%s' at %d (%s)" % (name, starttime, ctime(starttime)))
        try:
          failures = []
          # Grab the build log.
          log, headers = urllib.urlretrieve("http://tinderbox.mozilla.org/%s/%s" % (options.tree, build['logfile']))
          gz = GzipFile(log) # I need a list of lines from the build log

          # assured to match because we search for this above
          harness_type = re.match(harness_regex, logname).groups()[0]
          parser = parsers.get(harness_type, logparser.LogParser)()
          failures = parser.parse(gz)

          # add the failures to the database
          for failure in failures:

            # convenience variables; can probably delete
            test = failure['test']
            text = failure['text']
            reason = failure['reason']
            
            testnames_id=GetOrInsertTest(conn,test)
            if HaveFailRecord(conn,buildid,  reason, testnames_id):
              logging.info("Skipping already recorded failure '%s' in build with id '%s' with failure record '%s' " % (test, buildid, text))
            else:  
              InsertTest(conn, buildid, reason, testnames_id, text)
                      
Пример #17
0
def main():
    usage = """usage: %prog [options] /path/to/dump.rdb

Example : %prog --command json -k "user.*" /var/redis/6379/dump.rdb"""

    parser = OptionParser(usage=usage)
    parser.add_option("-c", "--command", dest="command",
                  help="Command to execute. Valid commands are json, diff, and protocol", metavar="FILE")
    parser.add_option("-f", "--file", dest="output",
                  help="Output file", metavar="FILE")
    parser.add_option("-n", "--db", dest="dbs", action="append",
                  help="Database Number. Multiple databases can be provided. If not specified, all databases will be included.")
    parser.add_option("-k", "--key", dest="keys", default=None,
                  help="Keys to export. This can be a regular expression")
    parser.add_option("-t", "--type", dest="types", action="append",
                  help="""Data types to include. Possible values are string, hash, set, sortedset, list. Multiple typees can be provided. 
                    If not specified, all data types will be returned""")
    
    (options, args) = parser.parse_args()
    
    if len(args) == 0:
        parser.error("Redis RDB file not specified")
    dump_file = args[0]
    
    filters = {}
    if options.dbs:
        filters['dbs'] = []
        for x in options.dbs:
            try:
                filters['dbs'].append(int(x))
            except ValueError:
                raise Exception('Invalid database number %s' %x)
    
    if options.keys:
        filters['keys'] = options.keys
    
    if options.types:
        filters['types'] = []
        for x in options.types:
            if not x in VALID_TYPES:
                raise Exception('Invalid type provided - %s. Expected one of %s' % (x, (", ".join(VALID_TYPES))))
            else:
                filters['types'].append(x)
    
    # TODO : Fix this ugly if-else code
    if options.output:
        with open(options.output, "wb") as f:
            if 'diff' == options.command:
                callback = DiffCallback(f)
            elif 'json' == options.command:
                callback = JSONCallback(f)
            elif 'csv' == options.command:
                callback = CSVCallback(f)
            elif 'memory' == options.command:
                reporter = PrintAllKeys(f)
                callback = MemoryCallback(reporter, 64)
            elif 'protocol' == options.command:
                callback = ProtocolCallback(f)
            else:
                raise Exception('Invalid Command %s' % options.command)
            parser = RdbParser(callback)
            parser.parse(dump_file)
    else:
        if 'diff' == options.command:
            callback = DiffCallback(sys.stdout)
        elif 'json' == options.command:
            callback = JSONCallback(sys.stdout)
        elif 'csv' == options.command:
            callback = CSVCallback(sys.stdout)
        elif 'memory' == options.command:
            reporter = PrintAllKeys(sys.stdout)
            callback = MemoryCallback(reporter, 64)
        elif 'protocol' == options.command:
            callback = ProtocolCallback(sys.stdout)
        else:
            raise Exception('Invalid Command %s' % options.command)

        parser = RdbParser(callback, filters=filters)
        parser.parse(dump_file)
Пример #18
0
    # These are duplicates, only found via a contents page still
    # linked to via a "Back to contents" link.
    if re.search('day-wa-01_war0816\.htm',filename):
        continue
    if re.search('day-wa-03_war1110\.htm',filename):
        continue
    if re.search('day-wa-03_war0922\.htm',filename):
        continue    
    if re.search('day-wa-03_war0805\.htm',filename):
        continue

    # This is a misnamed file, a duplicate of wa-02_wa1004.htm
    if re.search('day-wa-02_wa0104\.htm',filename):
        continue

    parser = Parser()

    parser.parse(filename)

    # try:
    #     parser.parse(filename)
    # except Exception, e:
    #     if verbose: print "An unhandled exception occured in parsing: "+filename
    #     if verbose: print "The exception was: "+str(e)
    #     traceback.print_exc(file=sys.stdout)
    #     files_and_exceptions.append( (filename,e) )

# if verbose: print "All exceptions:"
# for e in files_and_exceptions:
#     if verbose: print "%s: %s" % e
Пример #19
0
def main():
    #################### args parsing ############
        help = "#################Usage:(run from AN nodes)##################\n dsTemplater -i <input XML> \n "
        help = help + "Example 1: expergen -i dstemplate.xml \n "
        help = help + "Example 2: expergen -i examples/dstemplate.60lo0FUTURE.xml \n"
        #print usage
	basedir = os.environ.get('BASEDIR')
        if(basedir is None):
	   print "Warning: BASEDIR environment variable not set"	
	##### get version BRANCH info ######################
        branch = os.environ.get('BRANCH')
        if(branch is None):
           print "Warning: BRANCH env variable not set. BRANCH will be set to undefined"
           branch = "undefined"
	####################################################
        parser = OptionParser(usage=help)
        parser.add_option("-i", "--file", dest="uinput",
        help="pass location of XML template", metavar="FILE")
      #since we now have an XML tag  parser.add_option("-f", "--force",action="store_true",default=False, help="Force override existing output. Default is set to FALSE")
        parser.add_option("--msub", "--msub",action="store_true",default=False, help="Automatically submit the master runscripts using msub.Default is set to FALSE")
        #parser.add_option("-v", "--version",action="store_true", dest="version",default="v20120422", help="Assign version for downscaled data. Default is set to v20120422")        
        
        (options, args) = parser.parse_args()
        verOpt = True #default
	msub = False 
        forOpt = True #default
        inter = 'on' #for debugging
#########if platform is not PAN, disable expergen at this time 11/14/2014 ###############
        system,node,release,version,machine = os.uname()
        if(node.startswith('pp')):
                print "Running on PP(PAN) node", node
        if(node.startswith('an')):
                print "Running on AN(PAN) node", node
        else:
                 print "\033[1;41mERROR code -5: Running on a workstation not tested for end-to-end runs yet. Please login to analysis nodes to run expergen. \033[1;m",node
		 sys.exit(-5)

   
######################################################################################### 
        if (inter == 'off'):
		uinput = 'dstemplate.xml'
	        dversion = "v20120422"
                force = False
                if os.path.exists(uinput):
                                        parser = xml.sax.make_parser(  )
                                        handler = temphandler.TempHandler(  )
                                        parser.setContentHandler(handler)
                                        parser.parse(uinput)
        if (inter == 'on'):  
        	for opts,vals in options.__dict__.items():
           	# print opts,vals
            		if(opts == 'uinput'):
                		uinput = vals
                		if os.path.exists(uinput):
				        print "XML input:",uinput
                    			#parser = xml.sax.make_parser(  )
                    			#handler = temphandler.TempHandler(  )
                    			#parser.setContentHandler(handler)
                    			#parser.parse(uinput)
                		else:
                    			print "Please pass a valid input XML filename with the -i argument and try again. See -h for syntax. Quitting now.."
                    			sys.exit()
            		if(opts == 'msub'):
                		if (vals == True):
                    			msub = True 
				else:
					msub = False
                		msub = vals 
        #########  call listVars() #############################################################
	output_grid,kfold,lone,region,fut_train_start_time,fut_train_end_time,file_j_range,hist_file_start_time,hist_file_end_time,hist_train_start_time,hist_train_end_time,lats,late,lons,late, basedir,method,target_file_start_time,target_file_end_time,target_train_start_time,target_train_end_time,spat_mask,fut_file_start_time,fut_file_end_time,predictor,target,params,outdir,dversion,dexper,target_scenario,target_model,target_freq,hist_scenario,hist_model,hist_freq,fut_scenario,fut_model,fut_freq,hist_pred_dir,fut_pred_dir,target_dir,expconfig,target_time_window,hist_time_window,fut_time_window,tstamp,ds_region,target_ver,auxcustom,qc_mask,qc_varname,qc_type,adjust_out,sbase,pr_opts,masklists= listVars(uinput,basedir,msub)
        ######### call script creators..  #######################################################
        ############################### 1 ###############################################################
        #  make.code.tmax.sh 1 748 756 /vftmp/Aparna.Radhakrishnan/pid15769 outdir 1979 2008 tasmax

	# If the runscript directory already exists, please quit
	#/home/a1r/gitlab/cew/fudge2014///scripts/tasmax/35txa-CDFt-A00X01K02
	scriptdir = [sbase+"/master",sbase+"/runcode",sbase+"/runscript"]
        for sd in scriptdir:
	    if(os.path.exists(sd)):
       		 if os.listdir(sd):
		    if (overwrite == False):    
			print '\033[1;41mERROR: Scripts Directory already exists. Clean up using cleanup_script and try again please -or- (Use <ifpreexist>erase</ifpreexist> if output/scripts already exist and you want to override this and let expergen run the cleanup_script for you; Use <ifpreexist>move</ifpreexist> to move existing output)\033[1;m',sd
                        print "\033[1;41mERROR code -6: script directory already exists.Check --\033[1;m",scriptdir
                	sys.exit(-6)
		    if (overwrite == True):
			print "\033[1;43mWarning: Scripts Directory already exists. But, since <ifpreexist> has different settings, cleanup utility will handle this\033[1;m "
			print "Now invoking cleanup utility..........."
		        cleaner_script = basedir+"/utils/bin/"+"cleanup_script.csh"
			if(preexist_glob == 'erase'):
				print "Lets erase it ........... ifpreexist"
		        	cleaner_cmd = cleaner_script+" d "+uinput 
			else: 
				if(preexist_glob != 'move') & (preexist_glob != 'exit'):
					print "CHECK ifpreexist settings, quitting now"
					sys.exit(1)
				print "Lets move it............... if preexist" 
                               	cleaner_cmd = cleaner_script+" m "+uinput
	                print "cleaner_cmd"
			print cleaner_cmd		
	                pclean = subprocess.Popen(cleaner_cmd,shell=True,stdout=PIPE,stdin=PIPE, stderr=PIPE)
			#check return code
        		output0, errors0 = pclean.communicate()
        		if(pclean.returncode != 0):
         			print "033[1;41mCleaner Step:!!!! FAILED !!!!, please contact developer.033[1;m"
         			print output0, errors0
        			sys.exit(1)
			else:
				print output0,errors0
			print "continue expergen run...................." 
			break 
	    else:
		   print "scriptdir "+sd+" does not exist. Looks like a clean slate "
        #print  "hist_freq"+hist_freq
        script1Loc = basedir+"/utils/bin/create_runcode"
        make_code_cmd = script1Loc+" "+str(predictor)+" "+str(target)+" "+str(output_grid)+" "+str(spat_mask)+" "+str(region)
        make_code_cmd = make_code_cmd+" "+str(file_j_range)+" "+str(lons)+" "+str(lats)+" "+str(late)
	make_code_cmd = make_code_cmd+" "+str(hist_file_start_time)+" "+str(hist_file_end_time)+" "+str(hist_train_start_time)+" "+str(hist_train_end_time)+" "+str(hist_scenario)+" "+str(hist_model)+" "+hist_freq+" "+str(hist_pred_dir)+" "+str(hist_time_window)
        make_code_cmd = make_code_cmd+" "+str(fut_file_start_time)+" "+str(fut_file_end_time)+" "+str(fut_train_start_time)+" "+str(fut_train_end_time)+" "+str(fut_scenario)+" "+str(fut_model)+" "+fut_freq+" "+str(fut_pred_dir)+" "+str(fut_time_window)
        make_code_cmd = make_code_cmd+" "+str(target_file_start_time)+" "+str(target_file_end_time)+" "+str(target_train_start_time)+" "+str(target_train_end_time)+" "+str(target_scenario)+" "+str(target_model)+" "+target_freq+" "+str(target_dir)+" "+str(target_time_window)
        make_code_cmd = make_code_cmd+" "+str(method)+" "+str(expconfig)+" "+str(kfold)+" "+str(outdir)+" "+str(tstamp)+" "+'na'+" "+basedir+" "+str(lone)

        print "Step 1: R code starters generation ..in progress"
        #p = subprocess.Popen('tcsh -c "'+make_code_cmd+'"',shell=True,stdout=PIPE,stdin=PIPE, stderr=PIPE)
        
        params_new =  '"'+str(params)+'\"'
	params_pr_opts = '"'+str(pr_opts)+'\"'
        make_code_cmd = make_code_cmd +" "+params_new+" "+"'"+str(ds_region)+"'"
        make_code_cmd = make_code_cmd+" "+str(auxcustom)+" "+str(qc_mask)+" "+str(qc_varname)+" "+str(qc_type)+" "+str(adjust_out)+" "+str(sbase)+" "+str(params_pr_opts)+" "+str(branch)+" "+'"'+str(masklists)+'"' 
	#cprint make_code_cmd
        #p = subprocess.Popen(make_code_cmd +" "+params_new,shell=True,stdout=PIPE,stdin=PIPE, stderr=PIPE)
        p = subprocess.Popen(make_code_cmd,shell=True,stdout=PIPE,stdin=PIPE, stderr=PIPE)
        output, errors = p.communicate() 
        if(p.returncode != 0):
         print "Step1:!!!! FAILED !!!!, please contact developer."
         print output, errors
         sys.exit(0)
        #cprint output, errors
	#cprint "----Log-----"
        #cprint output,errors
        ###############################################################################################
        print "1- completed\n"
        #print "debug............msub turned ",msub
        ############################### 2 ################################################################
	#target_time_window,hist_time_window,fut_time_window
        script2Loc = basedir+"/utils/bin/"+"create_runscript"
        create_runscript_cmd = script2Loc+" "+str(lons)+" "+str(lone)+" "+str(expconfig)+" "+str(basedir)+" "+target+" "+method+" "+target_dir+" "+hist_pred_dir+" "+fut_pred_dir+" "+outdir+" "+str(file_j_range)+" "+tstamp+" "+str(target_file_start_time)+" "+str(target_file_end_time)+" "+str(hist_file_start_time)+" "+str(hist_file_end_time)+" "+str(fut_file_start_time)+" "+str(fut_file_end_time)+" "+str(spat_mask)+" "+str(region)+" "+auxcustom+" "+target_time_window+" "+hist_time_window+" "+fut_time_window+" "+sbase
        print "Step 2: Individual Runscript generation: \n"+create_runscript_cmd
        p1 = subprocess.Popen('tcsh -c "'+create_runscript_cmd+'"',shell=True,stdout=PIPE,stdin=PIPE, stderr=PIPE)
        print "Step 2: Individual runscript  creation.. in progress"
        output1, errors1 = p1.communicate()
	#cprint output1,errors1
        print "2- completed\n"
        if(p1.returncode != 0):
         print "Step2:!!!! FAILED !!!!, please contact developer."
         print output1, errors1
         sys.exit(0)
        #print output1, errors1
        #c print errors1
        
        ###################################### 3 ################################################################
        script3Loc = basedir+"/utils/bin/"+"create_master_runscript"
        create_master_cmd= script3Loc+" "+str(lons)+" "+str(lone)+" "+str(predictor)+" "+method+" "+sbase+" "+expconfig+" "+file_j_range+" "+tstamp+" "+str(ppn)+" "+str(msub)
        print "Step 3: --------------MASTER SCRIPT GENERATION-----------------------"#+create_master_cmd
        p2 = subprocess.Popen('tcsh -c "'+create_master_cmd+'"',shell=True,stdout=PIPE,stdin=PIPE, stderr=PIPE)
        #cprint create_master_cmd
        print "Create master script .. in progress"
        output2, errors2 = p2.communicate()
        #######
        if(p2.returncode != 0):
         print "Step3:!!!! FAILED !!!!, please contact developer."
         print output2, errors2
         sys.exit(0)
        print output2, errors2
        print "3- completed"
	##################################### 4 ############################################
        # copy xml to configs dir 
        cdir = sbase+"/config/"
        try:
	  os.mkdir(cdir)
	except:
	  print "Unable to create dir. Dir may exist already", cdir 
	shutil.copy2(uinput, cdir)
        print "Config XML saved in ",cdir 
        print "RunScripts will be saved under:",sbase
	print "----See readMe in fudge2014 directory for the next steps----"
	print "Use submit_job to submit scripts"

############### crte ppscript #################
	dev = "off" 
        print(sbase+"/postProc/aux/")
        if not os.path.exists(sbase+"/postProc/aux/"):
        	os.makedirs(sbase+"/postProc/aux/")
        if (dev == "off"):
		tsuffix = ""
        	ppbase = sbase+"/postProc/aux/"+"/postProc_source"
	else:
		tsuffix = "_"+tstamp
                ppbase = sbase+"/postProc/aux/"+"/postProc_source"+tstamp
	try:
  		ppfile = open(ppbase, 'w')
#check if qc_mask is relevant  
		if(qc_mask != 'off'):
  			pp_cmnd = "python $BASEDIR/bin/postProc -i "+os.path.abspath(uinput)+" -v "+target+","+target+"_qcmask\n"
	 	else:
                        pp_cmnd = "python $BASEDIR/bin/postProc -i "+os.path.abspath(uinput)+" -v "+target+"\n"
  		ppfile.write(pp_cmnd)
  		ppfile.close()
	except:
  		print "Unable to create postProc command file. You may want to check your settings."
	#c	print create_pp_cmd
        if(os.path.exists(ppbase)):
######################### write postProc_job to be used ############################
                ppLoc = basedir+"/utils/bin/"+"create_postProc"
                create_pp_cmd= ppLoc+" "+ppbase+" "+sbase+"  "+basedir+" "+tstamp+" "+branch
                print "Step 4: --------------PP postProc SCRIPT GENERATION-----------------------"
                p4 = subprocess.Popen('tcsh -c "'+create_pp_cmd+'"',shell=True,stdout=PIPE,stdin=PIPE, stderr=PIPE)
                output4, error4 = p4.communicate()
                if(p4.returncode != 0):
                        print "Step4:!!!! FAILED !!!!, please contact developer."
                        print output4, error4
                        sys.exit(-4)
                print output4, error4
                print "4- completed"
                print "NOTE: postProc will succeed only if you're running the model for the full downscaled region. (it will fail if you're running downscaling for a single slice for example)"
                print "----------------------------------------"
		print "\033[1;42mPlease use this script to run post post-processing (or msub this script), postProc when downscaling jobs are complete \033[1;m",sbase+"postProc/postProc_command"+tsuffix
                try:  
   			NEMSemail = os.environ["NEMSemail"]
                        print "msub -m ae -M "+os.environ.get('NEMSemail')+" "+sbase+"postProc/postProc_command"+tsuffix
		except KeyError: 
   			print "NEMSemail not set. Please use your email for notification in the following msub command i.e msub -m ae -M <email> script " 
			print "msub "+sbase+"postProc/postProc_command"+tsuffix
	else:
		print "postProc_command cannot be created. postProc_source does not exist"
################ step 5 fudgeList invocation ##############################################
        slogloc = sbase+"/"+"experiment_info.txt"
        fcmd = "python "+basedir+"/bin/fudgeList.py -f -i "+uinput+" -o "+slogloc
        f = subprocess.Popen(fcmd, stdout=subprocess.PIPE, shell=True)
        out, err = f.communicate()
        #print "fudgeList out", out
        #if err is not None:
        #       print "fudgeList err", err
        print "Summary Log File: ", slogloc
Пример #20
0
def weblab_start(directory):
    parser = OptionParser(usage="%prog create DIR [options]")

    parser.add_option("-m", "--machine",           dest="machine", default=None, metavar="MACHINE",
                                                   help = "If there is more than one machine in the configuration, which one should be started.")
    parser.add_option("-l", "--list-machines",     dest="list_machines", action='store_true', default=False, 
                                                   help = "List machines.")

    parser.add_option("-s", "--script",            dest="script", default=None, metavar="SCRIPT",
                                                   help = "If the runner option is not available, which script should be used.")

    options, args = parser.parse_args()

    check_dir_exists(directory, parser)

    def on_dir(directory, configuration_files):
        db_conf = DbConfiguration(configuration_files)
        regular_url = db_conf.build_url()
        coord_url   = db_conf.build_coord_url()
        upgrader = DbUpgrader(regular_url, coord_url)
        return upgrader.check_updated()

    if not run_with_config(directory, on_dir):
        print >> sys.stderr, "Error: WebLab-Deusto instance outdated! You may have updated WebLab-Deusto recently. Run: weblab-admin.py upgrade %s" % directory
        sys.exit(-1)


    old_cwd = os.getcwd()
    os.chdir(directory)
    try:
        if options.script: # If a script is provided, ignore the rest
            if os.path.exists(options.script):
                execfile(options.script)
            elif os.path.exists(os.path.join(old_cwd, options.script)):
                execfile(os.path.join(old_cwd, options.script))
            else:
                print >> sys.stderr, "Provided script %s does not exist" % options.script
                sys.exit(-1)
        else:
            parser = GlobalParser()
            global_configuration = parser.parse('.')
            if options.list_machines:
                for machine in global_configuration.machines:
                    print ' - %s' % machine
                sys.exit(0)

            machine_name = options.machine
            if machine_name is None: 
                if len(global_configuration.machines) == 1:
                    machine_name = global_configuration.machines.keys()[0]
                else:
                    print >> sys.stderr, "System has more than one machine (see -l). Please detail which machine you want to start with the -m option."
                    sys.exit(-1)

            if not machine_name in global_configuration.machines:
                print >> sys.stderr, "Error: %s machine does not exist. Use -l to see the list of existing machines." % machine_name
                sys.exit(-1)

            machine_config = global_configuration.machines[machine_name]
            if machine_config.runner is None:
                if os.path.exists('run.py'):
                    execfile('run.py')
                else:
                    print >> sys.stderr, "No runner was specified, and run.py was not available. Please the -s argument to specify the script or add the <runner file='run.py'/> option in %s." % machine_name
                    sys.exit(-1)
            else:
                if os.path.exists(machine_config.runner):
                    execfile(machine_config.runner)
                else:
                    print >> sys.stderr, "Misconfigured system. Machine %s points to %s which does not exist." % (machine_name, os.path.abspath(machine_config.runner))
                    sys.exit(-1)
    finally:
        os.chdir(old_cwd)
Пример #21
0
                        dest='outfname', 
                        help='output fname, default=midi_fname.intervals')
    parser.add_option('-c', '--no-cache', 
                        dest='cache', action='store_false', default=True, 
                        help='discard cache')

    options, args= parser.parse_args(argv[1:])
    if len(args) < 1: parser.error('faltan argumentos')

    infame= args[0]
    outfname= options.outfname
    if outfname is None:
        outfname= infame[:-3] + 'intervals'

    parser= parserclass()
    score= parser.parse(infame)

    notes= score.notes_per_instrument.itervalues().next()
    # que no haya dos silencios consecutivos
    assert not any((prev.is_silence and next.is_silence for (prev, next) in zip(notes, notes[1:])))

    for prev, next in zip(notes, notes[1:]):
        if next.is_silence: prev.duration += next.duration

    notes= [n for n in notes if not n.is_silence]
    def f(e):
        if e < 0:
            return 'm%s' % abs(e)
        else:
            return str(e)
    intervals= [f(next.pitch - prev.pitch) for (prev, next) in zip(notes, notes[1:])]
Пример #22
0
def main():
    parser = OptionParser()
    parser.add_option("-m", "--master", action="store", dest="master", help="Master URI", metavar="MASTER", default=os.getenv("SW_MASTER"))
    parser.add_option("-i", "--id", action="store", dest="id", help="Job ID", metavar="ID", default="default")
    parser.add_option("-e", "--env", action="store_true", dest="send_env", help="Set this flag to send the current environment with the script as _env", default=False)
    (options, args) = parser.parse_args()
   
    if not options.master:
        parser.print_help()
        print >> sys.stderr, "Must specify master URI with --master"
        sys.exit(1)

    if len(args) != 1:
        parser.print_help()
        print >> sys.stderr, "Must specify one script file to execute, as argument"
        sys.exit(1)

    script_name = args[0]
    master_uri = options.master
    id = options.id
    
    print id, "STARTED", now_as_timestamp()

    parser = SWScriptParser()
    
    script = parser.parse(open(script_name, 'r').read())

    print id, "FINISHED_PARSING", now_as_timestamp()
    
    if script is None:
        print "Script did not parse :("
        exit()
    
    cont = SWContinuation(script, SimpleContext())
    if options.send_env:
        cont.context.bind_identifier('env', os.environ)
    
    http = httplib2.Http()
    
    master_data_uri = urlparse.urljoin(master_uri, "/data/")
    pickled_cont = pickle.dumps(cont)
    (_, content) = http.request(master_data_uri, "POST", pickled_cont)
    cont_id = simplejson.loads(content)
    
    print id, "SUBMITTED_CONT", now_as_timestamp()
    
    #print continuation_uri
    
    master_netloc = urlparse.urlparse(master_uri).netloc
    task_descriptor = {'dependencies': {'_cont' : SW2_ConcreteReference(cont_id, SWNoProvenance(), len(pickled_cont), [master_netloc])}, 'handler': 'swi'}
    
    master_task_submit_uri = urlparse.urljoin(master_uri, "/job/")
    (_, content) = http.request(master_task_submit_uri, "POST", simplejson.dumps(task_descriptor, cls=SWReferenceJSONEncoder))
    
    print id, "SUBMITTED_JOB", now_as_timestamp() 
    
    
    out = simplejson.loads(content)
    
    notify_url = urlparse.urljoin(master_uri, "/job/%s/completion" % out['job_id'])
    job_url = urlparse.urljoin(master_uri, "/browse/job/%s" % out['job_id'])

    print id, "JOB_URL", job_url
    
    #print "Blocking to get final result"
    (_, content) = http.request(notify_url)
    completion_result = simplejson.loads(content, object_hook=json_decode_object_hook)
    if "error" in completion_result.keys():
        print id, "ERROR", completion_result["error"]
        return None
    else:
        print id, "GOT_RESULT", now_as_timestamp()
        #print content
        return completion_result["result_ref"]
    parser.add_option("-d", "--key-directory", default="", dest="key_directory",
                      help="Specify a parent directory for keys")

    (options, args) = parser.parse_args()

    if len(args) < 2:
        parser.error("Must specify a config file (keys.conf) AND mac_permissions.xml file(s)!")

    logging.basicConfig(level=logging.INFO if options.verbose == True else logging.WARN)

    # Read the config file
    config = ParseConfig()
    config.read(args[0])

    os.chdir(options.root)

    output_file = sys.stdout if options.output_file == "stdout" else open(options.output_file, "w")
    logging.info("Setting output file to: " + options.output_file)

    # Generate the key list
    key_map = config.generateKeyMap(options.target_build_variant.lower(), options.key_directory)
    logging.info("Generate key map:")
    for k in key_map:
        logging.info(k + " : " + str(key_map[k]))
    # Generate the XML file with markup replaced with keys
    parser = make_parser()
    parser.setContentHandler(ReplaceTags(key_map, output_file))
    for f in args[1:]:
        parser.parse(f)
Пример #24
0
                      action="store_true",
                      help="don't delete the temporary files created "\
                      "(useful for finding problems)",
                      default=False)

    options, files = parser.parse_args(argv[1:])

    if not files:
        print "Error: no files to process"
        print >> sys.stderr, __doc__
        return 1

    options.flags = options.gccxml_options
    options.verbose = not options.quiet

    parser = cparser.IncludeParser(options)
    parser.parse(files)

def main(argv=None):
    if argv is None:
        argv = sys.argv

    try:
        compile_to_xml(argv)
    except cparser.CompilerError, detail:
        print >> sys.stderr, "CompilerError:", detail
        return 1

if __name__ == "__main__":
    sys.exit(main())
Пример #25
0
	
	def endElement(self, name):
		if name == "ch1" or name == "ch2" or name == "watts" or name == "data":
			print "no"
		if name == "msg":
			self.outFile.write("\n")
		else:
			self.outFile.write(",")
	
	def characters(self, ch):
		self.outFile.write( ch.strip("\n\t,") )


if __name__ == "__main__":

    parser = OptionParser()
    parser.add_option("-f", "--file", dest="filename",
                      help="write report to FILE", metavar="FILE", default="none")

    (options, args) = parser.parse_args()
    
    if options.filename == "none":
        exit()

    parser = xml.sax.make_parser()
    
    fileCSV = open("dump.csv", "w")
    handler = XML2CSV(fileCSV)
    parser.setContentHandler(handler)
    parser.parse(options.filename)
Пример #26
0
  tarball = tarfile.open(args[0], 'r')
except Exception, e:
  print "Error: %s" % str(e)
  sys.exit(1)

queue = Queue.Queue()

#start the workers
for i in range(int(options.cores)):
  worker = Worker(queue)
  worker.setDaemon(True)
  worker.start()

#fill the queue
parser=ConfigParser(args[0])
requests=parser.parse() 

for request in requests:
  

  if os.path.exists(request._dir) and options.force is False:
    print 'Directory '+request._dir+' exists already, doing nothing.'
    print 'You can overwrite with the --force option'
    continue
  
  if not os.path.exists(request._dir):
    os.mkdir(request._dir)
 

  commandBuilder = LocalCommandBuilder(request, options.events) #, options.batch)
  command = commandBuilder.build()
Пример #27
0
    parser.add_option("-s", "--output-sprite",
        dest="sprite", 
        help="Filename of generated CSS sprite. If not specified, [stylename].png will be uesed")    

    (options, args) = parser.parse_args()    
    
    if not options.input:
        print "--mapcss parameter is required"
        raise SystemExit

    style_name = re.sub("\..*", "", options.input)

    content = open(options.input).read()
    parser = MapCSSParser(debug=False)
    mapcss = parser.parse(content) 
    
    mapcss_js = mapcss.as_js()
    subparts_var = "\n".join(map(lambda subpart: "        var s_%s = {};" % subpart, subparts))
    subparts_fill = "\n".join(map(lambda subpart: "        if (s_%s) {\n            style['%s'] = s_%s;\n        }" % (subpart, subpart, subpart), subparts))

    js = """
(function(MapCSS) {
    function restyle(tags, zoom, type, selector) {
%s
%s
        var style = {};
%s        
        return style;
    }
    """ % (subparts_var, mapcss_js, subparts_fill)
Пример #28
0
    parser = OptionParser()
    parser.add_option("-v", "--verbose", dest="verbose", help="Print verbose log to stdout", action="store_true", default=False)
    parser.add_option("", "--no-warnings", dest="warnings", help="Hide warning messages", action="store_false", default=True)
    parser.add_option("", "--no-errors", dest="errors", help="Hide error messages", action="store_false", default=True)
    parser.add_option("", "--modules", dest="modules", help="comma-separated list of modules to generate comments", metavar="MODS", default=",".join(rst_parser.allmodules))

    (options, args) = parser.parse_args(sys.argv)
    options.modules = options.modules.split(",")

    if len(args) < 2 or len(options.modules) < 1:
        parser.print_help()
        exit(0)

    parser = rst_parser.RstParser(hdr_parser.CppHeaderParser())
    for m in options.modules:
        parser.parse(m, os.path.join(selfpath, "../../" + m))

    parser.printSummary()

    generator = JavadocGenerator(parser.definitions, options.modules)
    generator.verbose = options.verbose
    generator.show_warnings = options.warnings
    generator.show_errors = options.errors

    for path in args:
        folder = os.path.abspath(path)
        for jfile in [f for f in glob.glob(os.path.join(folder,"*.java")) if not f.endswith("-jdoc.java")]:
            outfile = os.path.abspath(os.path.basename(jfile).replace(".java", "-jdoc.java"))
            generator.document(jfile, outfile)

    generator.printSummary()
Пример #29
0
def main():
    #################### args parsing ############
        help = "#################Usage:##################\n dsTemplater -i <input XML> \n "
        help = help + "Example 1: dsTemplater -i dstemplate.xml \n "
        help = help + "Example 2: dsTemplater -i examples/dstemplate.60lo0FUTURE.xml \n"
        #print usage
	basedir = os.environ.get('BASEDIR')
        if(basedir is None):
	   print "Warning: BASEDIR environment variable not set"	
        parser = OptionParser(usage=help)
        parser.add_option("-i", "--file", dest="uinput",
        help="pass location of XML template", metavar="FILE")
        parser.add_option("-f", "--force",action="store_true",default=False, help="Force override existing output. Default is set to FALSE")
        #parser.add_option("-v", "--version",action="store_true", dest="version",default="v20120422", help="Assign version for downscaled data. Default is set to v20120422")        
        
        (options, args) = parser.parse_args()
        verOpt = True #default 
        forOpt = True #default
        inter = 'on' #for debugging
        if (inter == 'off'):
		uinput = 'dstemplate.xml'
	        dversion = "v20120422"
                force = False
                if os.path.exists(uinput):
                                        parser = xml.sax.make_parser(  )
                                        handler = temphandler.TempHandler(  )
                                        parser.setContentHandler(handler)
                                        parser.parse(uinput)
        if (inter == 'on'):  
        	for opts,vals in options.__dict__.items():
           	# print opts,vals
            		if(opts == 'uinput'):
                		uinput = vals
                		if os.path.exists(uinput):
				        print "XML input:",uinput
                    			#parser = xml.sax.make_parser(  )
                    			#handler = temphandler.TempHandler(  )
                    			#parser.setContentHandler(handler)
                    			#parser.parse(uinput)
                		else:
                    			print "Please pass a valid input XML filename with the -i argument and try again. See -h for syntax. Quitting now.."
                    			sys.exit()
            		if(opts == 'force'):
                		if (vals == True):
                    			forOpt = False
                		force = vals 
        #########  call listVars() #############################################################
	output_grid,kfold,lone,region,fut_train_start_time,fut_train_end_time,file_j_range,hist_file_start_time,hist_file_end_time,hist_train_start_time,hist_train_end_time,lats,late,lons,late, basedir,method,target_file_start_time,target_file_end_time,target_train_start_time,target_train_end_time,spat_mask,fut_file_start_time,fut_file_end_time,predictor,target,params,outdir,dversion,dexper,target_scenario,target_model,target_freq,hist_scenario,hist_model,hist_freq,fut_scenario,fut_model,fut_freq,hist_pred_dir,fut_pred_dir,target_dir,expconfig,target_time_window,hist_time_window,fut_time_window,tstamp,ds_region,target_ver,auxcustom= listVars(uinput,basedir,force)
        ######### call script creators..  #######################################################
        ############################### 1 ###############################################################
        #  make.code.tmax.sh 1 748 756 /vftmp/Aparna.Radhakrishnan/pid15769 outdir 1979 2008 tasmax

	# If the runscript directory already exists, please quit
	#/home/a1r/gitlab/cew/fudge2014///scripts/tasmax/35txa-CDFt-A00X01K02
	scriptdir = basedir+"/scripts/"+target+"/"+expconfig

	if(os.path.exists(scriptdir)):
		print "Warning: script directory already exists.",scriptdir
		#print "ERROR: Directory already exists. Clean up and try again please:",scriptdir 
        script1Loc = basedir+"/utils/bin/create_runcode"
        make_code_cmd = script1Loc+" "+str(predictor)+" "+str(target)+" "+str(output_grid)+" "+str(spat_mask)+" "+str(region)
        make_code_cmd = make_code_cmd+" "+str(file_j_range)+" "+str(lons)+" "+str(lats)+" "+str(late)
	make_code_cmd = make_code_cmd+" "+str(hist_file_start_time)+" "+str(hist_file_end_time)+" "+str(hist_train_start_time)+" "+str(hist_train_end_time)+" "+str(hist_scenario)+" "+str(hist_model)+" "+hist_freq+" "+str(hist_pred_dir)+" "+str(hist_time_window)
        make_code_cmd = make_code_cmd+" "+str(fut_file_start_time)+" "+str(fut_file_end_time)+" "+str(fut_train_start_time)+" "+str(fut_train_end_time)+" "+str(fut_scenario)+" "+str(fut_model)+" "+fut_freq+" "+str(fut_pred_dir)+" "+str(fut_time_window)
        make_code_cmd = make_code_cmd+" "+str(target_file_start_time)+" "+str(target_file_end_time)+" "+str(target_train_start_time)+" "+str(target_train_end_time)+" "+str(target_scenario)+" "+str(target_model)+" "+target_freq+" "+str(target_dir)+" "+str(target_time_window)
        make_code_cmd = make_code_cmd+" "+str(method)+" "+str(expconfig)+" "+str(kfold)+" "+str(outdir)+" "+str(tstamp)+" "+'na'+" "+basedir+" "+str(lone)

        print "Step 1: R code starters generation ..in progress"
        #p = subprocess.Popen('tcsh -c "'+make_code_cmd+'"',shell=True,stdout=PIPE,stdin=PIPE, stderr=PIPE)
        
        params_new =  '"'+str(params)+'\"'
        make_code_cmd = make_code_cmd +" "+params_new+" "+"'"+str(ds_region)+"'"
        make_code_cmd = make_code_cmd+" "+str(auxcustom)
	#cprint make_code_cmd
        #p = subprocess.Popen(make_code_cmd +" "+params_new,shell=True,stdout=PIPE,stdin=PIPE, stderr=PIPE)
        p = subprocess.Popen(make_code_cmd,shell=True,stdout=PIPE,stdin=PIPE, stderr=PIPE)
        output, errors = p.communicate() 
        if(p.returncode != 0):
         print "Step1:!!!! FAILED !!!!, please contact developer."
         print output, errors
         sys.exit(0)
        #cprint output, errors
	#cprint "----Log-----"
        #cprint output,errors
        ###############################################################################################
        print "1- completed\n"
        ############################### 2 ################################################################
	#target_time_window,hist_time_window,fut_time_window
        script2Loc = basedir+"/utils/bin/"+"create_runscript"
        create_runscript_cmd = script2Loc+" "+str(lons)+" "+str(lone)+" "+str(expconfig)+" "+str(basedir)+" "+target+" "+method+" "+target_dir+" "+hist_pred_dir+" "+fut_pred_dir+" "+outdir+" "+str(file_j_range)+" "+tstamp+" "+str(target_file_start_time)+" "+str(target_file_end_time)+" "+str(hist_file_start_time)+" "+str(hist_file_end_time)+" "+str(fut_file_start_time)+" "+str(fut_file_end_time)+" "+str(spat_mask)+" "+str(region)+" "+auxcustom+" "+target_time_window+" "+hist_time_window+" "+fut_time_window
        print "Step 2: Individual Runscript generation: \n"+create_runscript_cmd
        p1 = subprocess.Popen('tcsh -c "'+create_runscript_cmd+'"',shell=True,stdout=PIPE,stdin=PIPE, stderr=PIPE)
        print "Step 2: Individual runscript  creation.. in progress"
        output1, errors1 = p1.communicate()
	#cprint output1,errors1
        print "2- completed\n"
        if(p1.returncode != 0):
         print "Step2:!!!! FAILED !!!!, please contact developer."
         print output1, errors1
         sys.exit(0)
        #print output1, errors1
        #c print errors1
        
        ###################################### 3 ################################################################
        script3Loc = basedir+"/utils/bin/"+"create_master_runscript"
        create_master_cmd= script3Loc+" "+str(lons)+" "+str(lone)+" "+str(predictor)+" "+method+" "+basedir+" "+expconfig+" "+file_j_range+" "+tstamp+" "+str(ppn)
        print "Step 3: --------------MASTER SCRIPT GENERATION-----------------------"#+create_master_cmd
        p2 = subprocess.Popen('tcsh -c "'+create_master_cmd+'"',shell=True,stdout=PIPE,stdin=PIPE, stderr=PIPE)
        #cprint create_master_cmd
        #print "Create master script .. in progress"
        output2, errors2 = p2.communicate()
        #######
        if(p2.returncode != 0):
         print "Step3:!!!! FAILED !!!!, please contact developer."
         print output2, errors2
         sys.exit(0)
        print output2, errors2
        print "3- completed"
	##################################### 4 ############################################
        # copy xml to configs dir 
        cdir = basedir+"/"+"/scripts/"+predictor+"/"+expconfig+"/config/"
        try:
	  os.mkdir(cdir)
	except:
	  print "Unable to create dir. Dir may exist already", cdir 
	shutil.copy2(uinput, cdir)
        print "Config XML saved in ",cdir 
	print "----See readMe in fudge2014 directory for the next steps----"
Пример #30
0
    sys.exit(1)

data_folder   = args[0]
output_folder = args[1]


# Import the data report
report = DataReport()
if not(report.parse(data_folder)):
    print 'ERROR: Failed to parse the data report'
    sys.exit(1)


# Parse the data written by the instrument
parser = DataParser()
(data_format, classification_results) = parser.parse(report.data(INSTRUMENT_NAME))


# Only keep the images from the test set
classification_results = filter(lambda x: report.image(x.image_index).test_index is not None, classification_results)


# Complete the data if it was saved in an old format
if data_format != '1.1':
    label_names = []
    for label in range(0, report.nbLabels()):
        label_names.append(report.labelName(label))

    database_name = report.getExperimentSettingDatabaseName()

    for classification_result in classification_results: