Esempio n. 1
0
 def parseUserProperties(self, plen, prop_delta=False):
     start = self.reader.start
     properties = UserProperties()
     while not self.matchUserPropertyEnd():
         assert not self.reader.eof, msg("""
             Expected to find a user property, instead found end of file.
             """)
         if self.matchUserPropertyKey():
             key = self.parseUserPropertyKey()
             value = self.parseUserPropertyValue()
             properties[key] = value
         elif self.matchUserPropertyDelete():
             assert prop_delta, msg("""
                 Property deletion (operation 'D') is only allowed
                 when the Prop-delta dump property is true for the
                 containing node or revision.
                 """)
             key = self.parseUserPropertyDelete()
             properties[key] = None
     else:
         self.parseUserPropertyEnd()
     stop = self.reader.start
     assert plen == stop - start, msg(
         """Property-Legnth is incorrect.
         Expected %d bytes, but found %d bytes.""" % (
             plen, stop - start))
     return properties
Esempio n. 2
0
def main(argv):

    # Initialize new external memory list.
    util.msg('Populating normal and external memory list')

    t1 = time.time()

    dirname = util.make_temp_name('em_list')

    l = []
    em_list = pyrsistence.EMList(dirname)
    for i in util.xrange(0x1000000):
        v = random.randrange(0x1000000)
        em_list.append(v)
        l.append(v)

    t2 = time.time()
    util.msg('Done in %d sec.' % (t2 - t1))

    util.msg('Verifying external memory list contents')

    for i in util.xrange(0x1000000):
        if em_list[i] != l[i]:
            util.msg('FATAL! Mismatch in element %d: Got %#x but expected %#x' % (i, em_list[i], l[i]))

    t3 = time.time()
    util.msg('Done in %d sec.' % (t3 - t2))

    # Close and remove external memory list from disk.
    em_list.close()
    shutil.rmtree(dirname)

    return 0
Esempio n. 3
0
def main(argv):

    # Initialize new external memory dictionary.
    util.msg('Populating normal and external memory dictionary')

    t1 = time.time()

    dirname = util.make_temp_name('em_dict')

    d = {}
    em_dict = pyrsistence.EMDict(dirname)
    for i in util.xrange(0x1000000):
        v = random.randrange(0x1000000)
        em_dict[i] = v
        d[i] = v

    t2 = time.time()
    util.msg('Done in %d sec.' % (t2 - t1))

    util.msg('Verifying external memory dictionary contents')

    for i in util.xrange(0x1000000):
        if em_dict[i] != d[i]:
            util.msg('FATAL! Mismatch in element %d: Got %#x but expected %#x' % (i, em_dict[i], d[i]))

    t3 = time.time()
    util.msg('Done in %d sec.' % (t3 - t2))

    # Close and remove external memory dictionary from disk.
    em_dict.close()
    shutil.rmtree(dirname)

    return 0
def addEpisode(xbmcid, scraperid, snr,enr, title, airdate):
	f = getDatabase("r")
	soup = BeautifulSoup(f.read())
	f.close()
	serie = soup.find(scraperid = scraperid)
	#TODO check inconsistency
	if serie == None :
		return False
	season = serie.find(seasonnr = snr)
	if season == None:
		tag = Tag(soup, "season")
		tag.attrs.append(('seasonnr', snr))
		serie.append(tag)
		season = serie.find(seasonnr = snr)
	if season == None:
		util.msg(localize(50000), localize(50004))
		return False
	episode = season.find(episodenr = enr)
	if episode == None:
		episodetag = Tag(soup, "episode")
		episodetag.attrs.append(('episodenr', enr))
		titletag = Tag(soup, "title")
		titletag.insert(0,title)
		episodetag.append(titletag)
		airdatetag = Tag(soup, "airdate")
		airdatetag.insert(0,airdate)
		episodetag.append(airdatetag)
		season.append(episodetag)
		
		f = getDatabase("w")
		f.write(soup.prettify())
		f.close()
	#else:
		#check consistency
	return True
Esempio n. 5
0
    def __init__(self, fn):
        
        # super constructor
        util.FileCache.__init__(self, fn)

        # open and map file
        f = open(fn)
        buf = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
        at = 0

        # traverse the file reading type 1 chunks
        self.chunks = []
        metadata = None
        while at < len(buf):
            try:
                chunk_doc = read_bson_doc(buf, at)
                at += chunk_doc.bson_len
                if chunk_doc['type']==0:
                    metadata = chunk_doc
                elif chunk_doc['type']==1:
                    self.chunks.append(Chunk(chunk_doc, metadata))
            except Exception as e:
                util.msg('stopping at bad bson doc (%s)' % e)
                return

        # bson docs should exactly cover file
        assert(at==len(buf))
Esempio n. 6
0
def dbg(fn, opt, show=True):
    def pt(t):
        return time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime(t/1000)) + ('.%03d' % (t%1000))
    for metrics in read(None, fn, opt):
        if show:
            if 'serverStatus.localTime' in metrics:
                sslt = metrics['serverStatus.localTime']
                util.msg(metrics._id, pt(metrics._id), pt(sslt[0]), pt(sslt[-1]),
                         'ds', len(sslt), 'ms', len(metrics))
            else:
                #print 'no serverStatus.localTime'
                util.msg(metrics.keys())
def initDatabase():
	addon_work_folder = os.path.join(xbmc.translatePath( "special://profile/addon_data/" ), "plugin.program.download-next-ep")
	soup = BeautifulSoup("<database><serverTime></serverTime><series></series></database>")
	try:
		if not os.path.exists(addon_work_folder):
			os.makedirs(addon_work_folder)
		f = getDatabase("w")
		f.write(soup.prettify())
		f.close()
		#util.msg("file write success","wote file to:\n" +addon_work_folder )
	except:
		util.msg(localize(50000), localize(50003))
Esempio n. 8
0
    def get_first(self):

        # did we already read ref doc and metadata?
        if self.state >= 1:
            assert(self.metrics)
            return self.metrics

        # map from metric names to list of values for each metric
        # metric names are paths through the sample document
        self.metrics = collections.OrderedDict()
        self.metrics.metadata = self.metadata
        self.metrics._id = self.chunk_doc['_id']
    
        # decompress chunk data field
        data = self.chunk_doc['data']
        data = data[4:] # skip uncompressed length, we don't need it
        data = zlib.decompress(data)
    
        # read reference doc from chunk data, ignoring non-metric fields
        ref_doc = read_bson_doc(data, 0, ftdc=True)
        #print_bson_doc(ref_doc)
    
        # traverse the reference document and extract metric names
        def extract_names(doc, n=''):
            for k, v in doc.items():
                nn = n + util.SEP + k if n else k
                if type(v)==util.BSON:
                    extract_names(v, nn)
                else:
                    self.metrics[nn] = [v]
        extract_names(ref_doc)
    
        # get nmetrics, ndeltas
        self.nmetrics = uint32.unpack_from(data, ref_doc.bson_len)[0]
        self.ndeltas = uint32.unpack_from(data, ref_doc.bson_len+4)[0]
        self.nsamples = self.ndeltas + 1
        at = ref_doc.bson_len + 8
        if self.nmetrics != len(self.metrics):
            # xxx remove when SERVER-20602 is fixed
            util.msg('ignoring bad chunk: nmetrics=%d, len(metrics)=%d' % (
                self.nmetrics, len(self.metrics)))
            return None
        #assert(self.nmetrics==len(metrics))

        # record data and position in data for decompressing deltas when we need them
        self.data = data[at:]

        # release the chunk_doc as the needed info is now parsed into self.metrics, self.data, etc.
        self.chunk_doc = None

        # our result, containing only the first (reference) sample
        self.state = 1
        return self.metrics
Esempio n. 9
0
    def about(self):
        """prints about message in popup"""
        util.msg('''
clark Sonic Commissioning Software

Clark Solutions
Hudson, MA 01749
www.clarksol.com
        
Version: {0}
By: {1}'''.format(__version__,__author__),'About')
        return
Esempio n. 10
0
def main(argv):

    # Initialize new external memory list.
    util.msg('Populating external memory list')

    t1 = time.time()

    dirname = util.make_temp_name('em_list')

    em_list = pyrsistence.EMList(dirname)
    for i in util.xrange(0x1000):
        em_list.append(random.randrange(0x100000))

    t2 = time.time()
    util.msg('Done in %d sec.' % (t2 - t1))


    # Request several iterator objects to locate possible memory leaks.
    util.msg('Testing iterator')
    for i in util.xrange(0x1000):
        for item in em_list:
            pass

    t3 = time.time()
    util.msg('Done in %d sec.' % (t3 - t2))

    # Close and remove external memory list from disk.
    em_list.close()
    shutil.rmtree(dirname)

    return 0
Esempio n. 11
0
def main(opt):

    do_server = opt.server
    do_html = opt.html
    do_connect = opt.connect
    do_browser = not opt.server and not opt.html and not opt.connect

    if do_browser:
        if opt.browser:
            util.msg('--browser flag is obsolete; browser mode is now the default')
        else:
            util.msg('browser mode is now the default; use --html out.html to generate static html')

    if do_browser:
        do_server = True

    # --server or browser mode
    if do_server:
        httpd = None
        for opt.port in range(opt.port, opt.port+100):
            try:
                Handler.exit_on_close = not opt.server
                httpd = BaseHTTPServer.HTTPServer(('', opt.port), Handler)
                break
            except Exception as e:
                util.msg('can\'t open port %d: %s' % (opt.port, e))
        if not httpd:
            raise e
        httpd.ses = Ses(opt, path='/0', server=True) # default session
        url = 'http://localhost:%d' % opt.port
        util.msg('listening for a browser request for %s' % url)
        if do_browser:
            browser(opt, url)
        httpd.serve_forever()

    # --connect
    elif do_connect:
        args = ' '.join(pipes.quote(s) for s in sys.argv[1:])
        args = urllib.urlencode({'args': args})
        url = opt.connect + '/open?' + args
        browser(opt, url)

    # standalone static html
    elif do_html:
        util.msg('generating html file', do_html)
        ses = Ses(opt, server=False)
        ses.out = open(do_html, 'w')
        html.page(ses)
Esempio n. 12
0
    def parseDumpProperty(self, name=None, store=None):
        """
        A dump property consists of a name and a value on a single
        line, separated by a colon.

        name
           If specified: (1) the parsed property must have this
           name. (2) we return the value of the property as a string.

           If None, then we return a tuple (*name*, *value*), both
           strings.

        store
           If specified, it must be a dictionary.  We'll update it as
           store[name] = value before we return.

        returns
          *value* or ( *name*, *value* )
        """
        m = pat_dump_property.match(self.reader.cur)

        assert m, "Expecting a dump property, but found \n%s" % (self.reader,)
        assert name == None or name == m.group(1), msg("""
            Expected property %s, but found %s
            """ % (name, m.group(1)))

        value = m.group(2)
        if store != None:
            store[m.group(1)] = value
        self.reader.next()
        if name == None:
            return m.group(1), value
        else:
            return value
Esempio n. 13
0
 def do_POST(self):
     util.msg('POST', self.path)
     if self.path.endswith('/model'):
         path = self.path[:-len('/model')] # strip off /model to get session path
         ses = Ses.sessions[path]
         l = int(self.headers.getheader('content-length', 0))
         data = self.rfile.read(l)
         js = json.loads(data)
         for name in js:
             setattr(ses.opt, name, js[name])
     elif self.path=='/save':
         l = int(self.headers.getheader('content-length', 0))
         req = urlparse.parse_qs(self.rfile.read(l))
         fn = req['fn'][0]
         open(fn, 'w').write(ses.get_save())
         util.msg('saved to', fn)
Esempio n. 14
0
def main(argv):

    # Initialize new external memory list.
    util.msg('Populating external memory list')

    t1 = time.time()

    dirname = util.make_temp_name('em_list')

    em_list = pyrsistence.EMList(dirname)
    for i in util.xrange(0x1000000):
        em_list.append(i)

    t2 = time.time()
    util.msg('Done in %d sec.' % (t2 - t1))

    # Close and remove external memory list from disk.
    em_list.close()
    shutil.rmtree(dirname)

    return 0
Esempio n. 15
0
def main(argv):

    # Initialize new external memory dictionary.
    util.msg('Populating external memory dictionary')

    t1 = time.time()

    dirname = util.make_temp_name('em_dict')

    em_dict = pyrsistence.EMDict(dirname)
    for i in util.xrange(0x1000000):
        em_dict[i] = i

    t2 = time.time()
    util.msg('Done in %d sec.' % (t2 - t1))

    # Close and remove external memory dictionary from disk.
    em_dict.close()
    shutil.rmtree(dirname)

    return 0
Esempio n. 16
0
 def __str__(self):
     """
     The str(Reader) is intended for debugging.
     """
     return msg("""
         LineReader
         cur[%3d] = %s
         start    = %d
         stop     = %d
         linenr   = %d
         eof      = %s
         """ % (len(self.cur), self.cur[:72],
                self.start, self.stop, self.linenr, self.eof))
Esempio n. 17
0
    def getPropertyEntryContent(self):
        """
        Parses property entry of the form:

           chararcter space n:integer newline bytes[n] newline

        and returns the bytes[n].
        """
        assert (len(self.reader.cur) and
                self.reader.cur[0:2] in ['K ', 'V ', 'D ']), msg("""
                Expected a pair of property entry lines, where the
                first has the form (K|V|D) <number>. Found this:
                %s""" % (self.reader,))
        n = int(self.reader.cur[2:])
        self.reader.next()
        result = self.getBytes(n)
        return result
Esempio n. 18
0
def browser(opt, url):

    # what os?
    if sys.platform=='darwin':
        cmd = 'sleep 1; open -a "Google Chrome" "%s"' % url
    elif sys.platform=='linux2':
        cmd = 'sleep 1; google-chrome "%s" &' % url
    elif sys.platform=='win32':
        cmd = 'timeout 2 && start /b chrome "%s" &' % url
    else:
        raise Exception('unknown platform ' + sys.platform)

    # launch it
    if cmd:
        util.msg('opening a browser window on', url)
        rc = subprocess.call(cmd, shell=True)
        if rc != 0:
            util.msg('can\'t open browser; is Google Chrome installed?')
    else:
        util.msg('don\'t know how to open a browser on your platform')

    # go into background
    # not as robust as daemonizing, but that's not needed, and adds an external dependency
    if not opt.nofork:
        log_fn = 'timeseries.%d.log' % opt.port
        util.msg('going into background; sending output to ' + log_fn)
        util.msg('will terminate when browser window closes')
        util.msg('use --nofork to run in foreground')
        if os.fork():
            os._exit(0)
        sys.stdin.close()
        sys.stderr = sys.stdout = open(log_fn, 'a')
        util.msg('\n===', url)
        signal.signal(signal.SIGHUP, signal.SIG_IGN)
Esempio n. 19
0
                module[task['module']].exploit(task['request'])  
                self.work_queue.task_done()
            except Exception, e:
                #print str(e)
                break
 
 

if __name__ == '__main__':
    
 
    O = {}
    NODE_KEY = "a88b92531ba974f68bc1fd5938fc77"
    NODE_DEBUG = 0
    SERVER = "http://w/uauc/playweb/"
    util.msg("PlayWeb Node 1.0")
    util.msg("Server:%s Key:%s Debug:%d" % (SERVER, NODE_KEY, NODE_DEBUG))
    util.msg("Listening server project...")
    while 1:
        r = util.http_get(SERVER + "/index.php?m=node&a=get_task")
        if r['data'] != " " :
            O = eval(util.decode_str(r['data'], NODE_KEY))
            break
        time.sleep(1)
    O['debug'] = NODE_DEBUG
    util.msg("[Project] Target:%s  Time:%s Module:%s  Thread:%s" % (O['target'], util.date(O['start_time']), O['module'], O['thread']), 1)
    O['target'] = "w"
    O['key'] = NODE_KEY
    #O['depth'] = 5  # notice
    O['server_url'] = SERVER + "?m=node&a="
    O['web-ports'] = util.csv2array(O['web-ports'])
Esempio n. 20
0
    def do_GET(self):

        # parse command-line args passed in url query string as an 'args' parameter
        def query2opt(query):
            args = shlex.split(query['args'][0])
            return __main__.get_opt(args)

        # parse url, extracting path and query portions
        _, _, path, _, query, _ = urlparse.urlparse(self.path)
        query = urlparse.parse_qs(query)

        # query to root is redirected to default session 0
        if path=='/':
            self.send_response(301) # permanent
            self.send_header('Location', '/0')

        # open a new view in a new window
        # expects command-line arg string in url parameter "args"
        # parse off the query, open new session based on that, then redirect to bare session url
        elif path=='/open':
            opt = query2opt(query) # parse url "args" parameter
            ses = Ses(opt, server=True) # new session
            self.send_response(302) # temporary redirect
            self.send_header('Location', ses.path)

        # top-level page: return the container, which includes
        #   progress message area - loaded via /ses/progress url in progress phase (below)
        #   content area - loaded via /ses/content url in content phase (below)
        elif path in Ses.sessions:
            ses = Ses.sessions[path]
            self.prepare(ses)
            html.container(ses)

        # info for a given time t
        elif path.endswith('/info'):
            path = path.rsplit('/', 1)[0]
            ses = Ses.sessions[path]
            t = float(query['t'][0])
            self.prepare(ses)
            html.info(ses, t)

        # raw info for a given time t
        elif path.endswith('/raw'):
            path = path.rsplit('/', 1)[0]
            ses = Ses.sessions[path]
            t = float(query['t'][0])
            self.prepare(ses)
            html.raw(ses, t, kind='raw')

        elif path.endswith('/metadata'):
            path = path.rsplit('/', 1)[0]
            ses = Ses.sessions[path]
            t = float(query['t'][0])
            self.prepare(ses)
            html.raw(ses, t, kind='metadata')

        # progress phase: load the data in preparation for generating content
        # while emitting progress messages. We also accept new view parameters to open
        # new view in current window as command-line arg string in url parameter "args"
        elif path.endswith('/progress'):
            path = path.rsplit('/', 1)[0]
            ses = Ses.sessions[path]
            if 'args' in query:
                ses.opt = query2opt(query) # parse url "args" parameter
            self.prepare(ses)
            html.load(ses)

        # content phase: generate actual html view from graph data loadedin progress phase
        elif path.endswith('/content'):
            path = path.rsplit('/', 1)[0]
            ses = Ses.sessions[path]
            self.prepare(ses)
            html.page(ses)

        # a window closed
        elif '/close/' in path:
            if Handler.exit_on_close:
                path, _, what = path.rsplit('/', 2)
                util.msg('closing', path, '('+what+')')
                if path in Ses.sessions:
                    del Ses.sessions[path]
                if not Ses.sessions:
                    util.msg('all sessions closed, exiting')
                    os._exit(0)

        # otherwise not found
        else:
            self.send_response(404) # not found
Esempio n. 21
0
 def error(self, p):
     if not p:
         util.msg('parse error: unexpected EOF')
     else:
         util.msg(f'parse error: {p}')
Esempio n. 22
0
File: repo.py Progetto: tgphelps/ddu
    def verify_backups(self, only='') -> None:
        """Verify all, or only the latest, backup(s) in this repository.

        Iterate through every 'new' and 'old' entry in every (or latest)
        backup, and verify that (1) the hash exists, and (2) its contents
        matches its name.
        """
        if only == '':
            bkup_list = self.all_backups
        else:
            bkup_list = [only]
        num_backups = len(bkup_list)
        # entries = 0
        num_files = 0
        hits = 0
        all_hashes: Set[str] = set()
        good: Set[str] = set()
        bad: Set[str] = set()
        missing: Set[str] = set()
        for b in bkup_list:
            util.msg(f'Checking: {b}')
            bkup = os.path.join(self.backups, b)
            with open(bkup, 'rt') as f:
                line_num = 0
                saw_end = False
                for line in f:
                    line_num += 1
                    if line_num == 1 and not line.startswith('start'):
                        util.fatal(f'Backup {bkup} corrupted. No start.')
                    if line[0:3] in ('new', 'old'):
                        num_files += 1
                        flds = line.split('\t')
                        h = flds[1]
                        this_file = flds[3]
                        all_hashes.add(h)
                        if h in good:
                            hits += 1
                            # continue
                        elif h in bad:
                            hits += 1
                            util.error(f'invalid hash {h} for {this_file}')
                            # continue
                        else:
                            # We haven't seen this hash before
                            (d, ff) = self.fname_from_hash(h)
                            if self.find_file(os.path.join(d, ff)):
                                if _verify_hash(os.path.join(self.objects, d),
                                                ff):
                                    good.add(h)
                                else:
                                    bad.add(h)
                                    t = this_file
                                    util.error(f'invalid hash {h} for {t}')
                            else:
                                missing.add(h)
                                util.error(f'missing {h} for {this_file}')
                    else:
                        # print(line_num, line)
                        # This should be a trailer line
                        if line.startswith('end'):
                            saw_end = True
                if not saw_end:
                    util.warning(f'Backup {bkup} has no end marker')

        if len(all_hashes) != len(good) + len(bad) + len(missing):
            util.fatal(f'hash bug: {len(all_hashes)} \
                         {len(good)} {len(bad)} {len(missing)}')
        util.msg('Verify results:')
        util.msg(f'backups checked = {num_backups}')
        util.msg(f'files checked = {num_files}')
        util.msg(f'invalid = {len(bad)}')
        util.msg(f'missing = {len(missing)}')
        util.msg(f'cache hits = {hits}')
Esempio n. 23
0
def main():
    parser = argparse.ArgumentParser(description="")
    parser.add_argument(
        "alignment", help="Fasta format alignment of sequences to be analysed")
    parser.add_argument("width", help="Width of nucleotide window to analyse")
    parser.add_argument(
        "run_id",
        help=
        "Identifier for this run, to be used as a key for joining datatables")
    parser.add_argument("-p",
                        help="Overlap proportions for windows",
                        nargs="+",
                        default=[])
    parser.add_argument("-wd", help="Working directory", default=None)
    args = parser.parse_args()

    if args.wd == None:
        WD = os.getcwd() + "/"
    else:
        raise ValueError("User specified working directory not implemented!")
        #if os.path.exists(args.wd): WD = args.wd
        #else: raise ValueError("Working directory path does not exist")
    W = int(args.width)

    try:
        user_p_fractions = [float(x) for x in args.p]
    except ValueError:
        print "Error: Overlap proportion arguments must be numerical values, separated by space character"
        exit()

    if len(user_p_fractions) > util.MAX_P_FRACS:
        raise ValueError("Error: Maximum of %d user-defined window shifts" %
                         util.MAX_P_FRACS)

    p_fractions = [0.0] + user_p_fractions

    colours = dict(zip(p_fractions,
                       util.COLOURS))  # colours used for p fractions in plot

    sequences = reader.ReadInFasta(args.alignment)

    util.init_msg("motif_pattern='%s'" % util.MOTIF)
    util.init_msg("stem_loop_pattern='%s'" % util.STEM_LOOP_PATTERN)

    # NB for current setup to work, the ref seq needs to be the longest seq.
    # sites not found in the ref seq can't be described for others
    reference_seq = sequences[0][1]  # expecting this will include gaps

    ref_unalign_indices = util.unalign_indices(
        reference_seq)  # indices for reference sequence unaligned
    ref_unalign_len = max(
        ref_unalign_indices)  # assuming first seq in file is the reference

    util.init_msg("width=%d" % W)

    align_len = len(
        reference_seq)  # assuming aligned properly and all the same length
    check_names_length(
        [pair[0] for pair in sequences], align_len
    )  # most unaligned sequences will have length less than align_len, but this is the max length possible
    util.init_msg("aln_length=%d" % align_len)

    SEQ_NAME_DELIM = "."

    HEAD = "HEADER_"
    TAG_SEQ = "SEQ:"
    TAG_WIN = "WIN:"
    TAG_STRUCT = "STRUCT:"
    TAG_LOOP = "SL:"
    print util.tabmsg([HEAD + TAG_SEQ, "run_id", "seq_id", "group", "length"])
    print util.tabmsg([
        HEAD + TAG_WIN, "run_id", "win_id", "seq_id", "shift", "start_a",
        "end_a", "start_u", "end_u", "has_motif"
    ])
    print util.tabmsg([
        HEAD + TAG_STRUCT, "run_id", "struct_id", "seq_id", "win_id",
        "struct_number", "s_energy", "dotbracket"
    ])
    print util.tabmsg([
        HEAD + TAG_LOOP, "run_id", "loop_id", "seq_id", "win_id", "struct_id",
        "loop_number", "start_a", "end_a", "start_u", "end_u"
    ])

    win_id, struct_id, loop_id = (0, 0, 0)
    run_id = args.run_id  # NB remains string
    print "run_id:\t%s" % run_id

    # do analysis
    for nameSeqPair in sequences:

        genome_with_gaps = nameSeqPair[1].upper().replace("T", "U")
        unalign_indices = util.unalign_indices(genome_with_gaps)

        genome = genome_with_gaps.replace(util.GAP, "")

        group, seq_id = nameSeqPair[0].split(
            SEQ_NAME_DELIM)  # assuming name is in format GROUP.SEQ_ID

        L = len(genome)

        #util.init_msg("sequence=%s, unalign_length=%d" % (seq_id, L))
        print util.tabmsg([TAG_SEQ, run_id, seq_id, group, L])

        for P in p_fractions:

            util.init_msg("shift=%.2f" % P)

            check_window_parameters(W, P)
            windows = get_windows(L, W, P)

            for window in windows:
                util.init_msg(
                    "windows:%d.%d" % (window[0] + 1, window[1])
                )  # correct for zero based. Don't +1 for end so numbers are inclusive

            windows_sanity_check(windows, genome)

            for window in windows:
                win_start_u, win_end_u = window  # unaligned numbers. NB indices are like slice numbers (upper is exclusive)
                win_start_a, win_end_a = unalign_indices.index(
                    win_start_u
                ), unalign_indices.index(win_end_u - 1) + 1  # aligned numbers

                analysis_id = ".".join(
                    [seq_id, str(win_start_u + 1),
                     str(win_end_u)]
                )  # +1 to correct for zero based, and don't -1 for win_end_u for the same reason
                win_seq = genome[win_start_u:win_end_u]

                has_motif = pattern.has_motif(win_seq)
                print util.tabmsg([
                    TAG_WIN, run_id, win_id, seq_id, P, win_start_a, win_end_a,
                    win_start_u, win_end_u, has_motif
                ])

                #util.msg(analysis_id, "seq:"+win_seq)
                if has_motif:
                    util.msg(analysis_id, "contains motif, estimating SS")

                    seq_structs = ss_prediction.predict_ss(
                        win_seq, analysis_id,
                        WD)  # will return empty list if no .ct file available

                    util.msg(analysis_id, "%d SS produced" % len(seq_structs))
                    for iSeqStruct in range(len(seq_structs)):
                        print util.tabmsg([
                            TAG_STRUCT, run_id, struct_id, seq_id, win_id,
                            iSeqStruct + 1, seq_structs[iSeqStruct].dG,
                            seq_structs[iSeqStruct].ss
                        ])

                        motif_loops = pattern.find_motif_loops(
                            seq_structs[iSeqStruct]
                        )  # list of subsequence indices for stem loops containing pattern in this ss
                        if len(motif_loops) == 0:
                            util.msg(
                                analysis_id, "SS_%d does NOT have loop_motif" %
                                (iSeqStruct + 1))
                        else:
                            util.msg(
                                analysis_id,
                                "SS_%d DOES have %d motif_loop(s):" %
                                (iSeqStruct + 1, len(motif_loops)))
                            util.result_msg(seq_structs[iSeqStruct]
                                            )  # print the whole SeqStruct
                            for iLoop in range(
                                    len(motif_loops)
                            ):  # NB motif_loop indices are like slices: start is inclusive, end is exclusive
                                loop_start_u, loop_end_u = motif_loops[iLoop][
                                    0] + win_start_u, motif_loops[iLoop][
                                        1] + win_start_u  # _u meaning unaligned
                                loop_start_a, loop_end_a = unalign_indices.index(
                                    loop_start_u), unalign_indices.index(
                                        loop_end_u)

                                print util.tabmsg([
                                    TAG_LOOP, run_id, loop_id, seq_id, win_id,
                                    struct_id, iLoop + 1, loop_start_a,
                                    loop_end_a, loop_start_u, loop_end_u
                                ])

                                #loop_id = ".".join([analysis_id, str(iSeqStruct+1), str(iLoop+1)])
                                #stem_loop_seq = win_seq[motif_loops[iLoop][0]:motif_loops[iLoop][1]]
                                # for loop_start_u/loop_end_u, +1 correct for zero based for start. Not +1 for end because index value is exclusive
                                #util.indx_msg("U", loop_id, P, (loop_start_u+1), loop_end_u, stem_loop_seq) # this seq's indices
                                #util.indx_msg("A", loop_id, P, (loop_start_a+1), loop_end_a, stem_loop_seq) # the ref seq's indices
                                loop_id += 1
                        struct_id += 1
                else:  # if has motif
                    util.msg(analysis_id, "no motif in this window")
                win_id += 1

    print "End of analysis"
Esempio n. 24
0
def cmd_configure(ctx, config):
	srcnode = ctx.srcnode.abspath()
	bldnode = ctx.bldnode.abspath()

	ctx.load('waf', tooldir='pylib/')
	ctx.load('waf_unit_test')

	from pylib.util import parse_version
	parse_version(config)

	ctx.env.NTPS_RELEASE = config["NTPS_RELEASE"]
	ctx.env.NTPS_VERSION_MAJOR = config["NTPS_VERSION_MAJOR"]
	ctx.env.NTPS_VERSION_MINOR = config["NTPS_VERSION_MINOR"]
	ctx.env.NTPS_VERSION_REV = config["NTPS_VERSION_REV"]

	ctx.env.NTPS_VERSION = "%s.%s.%s" % (ctx.env.NTPS_VERSION_MAJOR, ctx.env.NTPS_VERSION_MINOR, ctx.env.NTPS_VERSION_REV)
	ctx.define("NTPS_VERSION_MAJOR", ctx.env.NTPS_VERSION_MAJOR, comment="Major version number")
	ctx.define("NTPS_VERSION_MINOR", ctx.env.NTPS_VERSION_MINOR, comment="Minor version number")
	ctx.define("NTPS_VERSION_REV", ctx.env.NTPS_VERSION_REV, comment="Revision version number")

	ctx.env.OPT_STORE = config["OPT_STORE"]

	opt_map = {}
	# Wipe out and override flags with those from the commandline
	for flag in ctx.env.OPT_STORE:
		opt = flag.replace("--", "").upper() # XXX: find a better way.
		opt_map[opt] = ctx.env.OPT_STORE[flag]

	msg("--- Configuring host ---")
	ctx.setenv('host', ctx.env.derive())

	ctx.load('compiler_c')

	if not ctx.env.NTPS_RELEASE:
		ctx.load('bison')

	for opt in opt_map:
		ctx.env[opt] = opt_map[opt]

	from compiler import check_compiler
	check_compiler(ctx)



	if ctx.options.enable_rtems_trace:
		ctx.find_program("rtems-tld", var="BIN_RTEMS_TLD", path_list=[ctx.options.rtems_trace_path, ctx.env.BINDIR])
		ctx.env.RTEMS_TEST_ENABLE = True
		ctx.env.RTEMS_TEST_FLAGS = ["-C", "%s/devel/trace/ntpsec-trace.ini" % srcnode,
									"-W", "%s/ntpsec-wrapper" % bldnode,
									"-P", "%s/devel/trace/" % srcnode,
									"-f", "-I%s" % bldnode,
									"-f", "-I%s/include/" % srcnode,
									"-f", "-I%s/libisc/include/" % srcnode,
									"-f", "-I%s/libisc/unix/include/" % srcnode]

	# Not needed to build.  Used by utility scripts.
	ctx.find_program("awk", var="BIN_AWK", mandatory=False)
	ctx.find_program("perl", var="BIN_PERL", mandatory=False)
	ctx.find_program("sh", var="BIN_SH", mandatory=False)

	# used to make man and html pages
	ctx.find_program("asciidoc", var="BIN_ASCIIDOC", mandatory=False)
	ctx.find_program("a2x", var="BIN_A2X", mandatory=False)
	ctx.find_program("xsltproc", var="BIN_XSLTPROC", mandatory=False)

	ctx.env.ENABLE_DOC = False
	if ctx.env.BIN_ASCIIDOC and ctx.env.BIN_XSLTPROC and ctx.env.BIN_A2X:
		ctx.env.ENABLE_DOC = True


	if (ctx.options.enable_doc or ctx.options.enable_doc_only) and not ctx.env.ENABLE_DOC:
		ctx.fatal("asciidoc and xsltproc are required in order to build documentation")
	elif (ctx.options.enable_doc or ctx.options.enable_doc_only):
		ctx.env.ASCIIDOC_FLAGS = ["-f", "%s/docs/asciidoc.conf" % ctx.srcnode.abspath()]
		ctx.env.ENABLE_DOC_ONLY = ctx.options.enable_doc_only
		ctx.env.ENABLE_DOC_USER = ctx.options.enable_doc
		ctx.env.PATH_DOC = ctx.options.path_doc

	# XXX: conditionally build this with --disable-man?  Should it build without docs enabled?
	ctx.env.A2X_FLAGS = ["--format", "manpage", "--asciidoc-opts=--conf-file=%s/docs/asciidoc.conf" % ctx.srcnode.abspath()]
	if not ctx.options.enable_a2x_xmllint:
		ctx.env.A2X_FLAGS += ["--no-xmllint"]


	# Disable manpages within build()
	if ctx.options.disable_manpage:
		ctx.env.DISABLE_MANPAGE = True

	from os.path import exists
	from waflib.Utils import subprocess
	if exists(".git") and ctx.find_program("git", var="BIN_GIT", mandatory=False):
		ctx.start_msg("DEVEL: Getting revision")
		cmd = ["git", "log", "-1", "--format=%H"]
		p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=None)
		ctx.env.NTPS_REVISION, stderr = p.communicate()
		ctx.env.NTPS_REVISION = ctx.env.NTPS_REVISION.replace("\n", "")
		ctx.end_msg(ctx.env.NTPS_REVISION)

	ctx.start_msg("Building version")
	ctx.env.NTPS_VERSION_STRING = ctx.env.NTPS_VERSION

	if ctx.env.NTPS_REVISION:
		ctx.env.NTPS_VERSION_STRING += "-%s" % ctx.env.NTPS_REVISION[:7]

	if ctx.options.build_version_tag:
		ctx.env.NTPS_VERSION_STRING += "-%s" % ctx.options.build_version_tag

	ctx.define("NTPS_VERSION_STRING", ctx.env.NTPS_VERSION_STRING)
	ctx.end_msg(ctx.env.NTPS_VERSION_STRING)


	msg("--- Configuring main ---")
	ctx.setenv("main", ctx.env.derive())

	# XXX: temp hack to fix --enable-doc-only
	ctx.env.ENABLE_DOC_ONLY = ctx.options.enable_doc_only

	# The rest is not needed for documentation building.
	if ctx.options.enable_doc_only:
		return

	from check_type import check_type
	from check_sizeof import check_sizeof
	from check_structfield import check_structfield

	for opt in opt_map:
		ctx.env[opt] = opt_map[opt]

	if ctx.options.cross_compiler:
		ctx.env.ENABLE_CROSS = True

		ctx.start_msg("Using Cross compiler CC:")
#		ctx.get_cc_version(ctx.env.CC, gcc=True)
		ctx.end_msg(ctx.options.cross_compiler)

		ctx.env.CC = [ctx.options.cross_compiler]
		ctx.env.LINK_CC = [ctx.options.cross_compiler]

		if ctx.env["CROSS-CFLAGS"]:
			ctx.env.CFLAGS = opt_map["CROSS-CFLAGS"]

		if ctx.env["CROSS-LDFLAGS"]:
			ctx.env.LDFLAGS = opt_map["CROSS-LDFLAGS"]


	if ctx.options.list:
		from refclock import refclock_map
		print "ID    Description"
		print "~~    ~~~~~~~~~~~"
		for id in refclock_map:
			print "%-5s %s" % (id, refclock_map[id]["descr"])

		return

	# This needs to be at the top since it modifies CC and AR
	if ctx.options.enable_fortify:
		from check_fortify import check_fortify
		check_fortify(ctx)


	if ctx.options.enable_debug_gdb:
		ctx.env.CFLAGS += ["-g"]

        if ctx.options.enable_saveconfig:
                ctx.define("SAVECONFIG", 1)

	if not ctx.options.disable_debug:
		ctx.define("DEBUG", 1, comment="Enable debug mode")
		ctx.env.BISONFLAGS += ["--debug"]

	ctx.env.CFLAGS += ["-Wall", "-Wextra"]	# Default CFLAGS.


	# Check target platform.
	ctx.start_msg("Checking build target")
	from sys import platform
	if platform == "win32":
		ctx.env.PLATFORM_TARGET = "win"
	elif platform == "darwin":
		ctx.env.PLATFORM_TARGET = "osx"
	elif platform.startswith("freebsd"):
		ctx.env.PLATFORM_TARGET = "freebsd"
	elif platform.startswith("netbsd"):
		ctx.env.PLATFORM_TARGET = "netbsd"
	elif platform.startswith("openbsd"):
		ctx.env.PLATFORM_TARGET = "openbsd"
	else:
		ctx.env.PLATFORM_TARGET = "unix"
	ctx.end_msg(ctx.env.PLATFORM_TARGET	)

	ctx.define("PLATFORM_%s" % ctx.env.PLATFORM_TARGET.upper(), 1, comment="Operating system detected by Python (%s)" % platform)


	# XXX: hack
	if ctx.env.PLATFORM_TARGET in ["freebsd", "osx", "openbsd"]:
		ctx.env.PLATFORM_INCLUDES = ["/usr/local/include"]
		ctx.env.PLATFORM_LIBPATH = ["/usr/local/lib"]
	elif ctx.env.PLATFORM_TARGET == "netbsd":
		ctx.env.PLATFORM_LIBPATH = ["/usr/lib"]
	elif ctx.env.PLATFORM_TARGET == "win":
		ctx.load("msvc")

	# OS X needs this for IPv6
	if ctx.env.PLATFORM_TARGET == "osx":
		ctx.define("__APPLE_USE_RFC_3542", 1, comment="Needed for IPv6 support")

	# int32_t and uint32_t probes aren't really needed, POSIX guarantees
	# them.  But int64_t and uint64_t are not guaranteed to exist on 32-bit
	# machines.
	# Used by timevalops and timespecops in tests/libntp/
	# May go away when that is cleaned up.
	types = ["uint64_t"]

	for inttype in sorted(types):
		ctx.check_type(inttype, ["stdint.h", "sys/types.h"])

	net_types = (
		("struct if_laddrconf", ["sys/types.h", "net/if6.h"]),
		("struct if_laddrreq", ["sys/types.h", "net/if6.h"]),
		)
	for (f, h) in net_types:
		ctx.check_type(f, h)

	structures = (
		("struct timex", ["sys/time.h", "sys/timex.h"]),
		("struct ntptimeval", ["sys/time.h", "sys/timex.h"]),
		)
	for (s, h) in structures:
		ctx.check_type(s, h)

	structure_fields = (
		("time_tick", "timex", ["sys/time.h", "sys/timex.h"]),
		("modes", "timex", ["sys/time.h", "sys/timex.h"]),
		("time.tv_nsec", "ntptimeval", ["sys/time.h", "sys/timex.h"]),
		)
	for (f, s, h) in structure_fields:
		ctx.check_structfield(f, s, h)

	# mostly used by timetoa.h and timespecops.h
	sizeofs = [
		("time.h",		"time_t"),
		(None,			"int"),
		(None,			"long"),
		(None,			"long long"),
	]

	for header, sizeof in sorted(sizeofs):
		ctx.check_sizeof(header, sizeof)

	# The protocol major number
	ctx.define("NTP_API", 4, comment="Protocol major number.")

	ctx.define("NTP_KEYSDIR", "%s/etc" % ctx.env.PREFIX, comment="NTP key file directory")
	ctx.define("GETSOCKNAME_SOCKLEN_TYPE", "socklen_t", quote=False, comment="socklen type")
	ctx.define("DFLT_RLIMIT_STACK", 50, comment="Default stack size")
	ctx.define("DFLT_RLIMIT_MEMLOCK", 32, comment="Locked memory size")

	probe_multicast(ctx, "MCAST", "Checking for multicast capability")

	ctx.define("TYPEOF_IP_MULTICAST_LOOP", "u_char", quote=False, comment="Multicast loop type") #XXX: check for mcast type

	# These are helpful and don't break Linux or *BSD
	ctx.define("OPEN_BCAST_SOCKET", 1, comment="Whether to open a broadcast socket")
	ctx.define("HAS_ROUTING_SOCKET", 1, comment="Whether a routing socket exists")

	ctx.check_cc(lib="edit", mandatory=False, comment="libedit library")
	ctx.check_cc(lib="m", comment="Math library")
	ctx.check_cc(lib="ossaudio", mandatory=False, comment="ossaudio for NetBSD")  # NetBSD audio
	ctx.check_cc(lib="rt", mandatory=False, comment="realtime library")
	ctx.check_cc(lib="curses", mandatory=False, comment="curses library, required for readline on OpenBSD") # Required for readline on OpenBSD.
	ctx.check_cc(lib="readline", use="CURSES", mandatory=False, comment="readline library")
	ctx.check_cc(lib="gcc_s", mandatory=False, comment="GCC runtime library.")

	# Find OpenSSL. Must happen before function checks
	if ctx.options.enable_crypto:
		from check_openssl import configure_ssl
		configure_ssl(ctx)

	# Optional functions.  Do all function checks here, otherwise
	# we're likely to duplicate them.
	functions = (
		('adjtimex', ["sys/time.h", "sys/timex.h"]),
		('closefrom', ["stdlib.h"]),
		('clock_gettime', ["time.h"], "RT"),
		('clock_settime', ["time.h"], "RT"),
		('EVP_MD_do_all_sorted', ["openssl/evp.h"], "CRYPTO"),
		('getclock', ["sys/timers.h"]),
		('getpassphrase', ["stdlib.h"]),		# Sun systems
		('MD5Init', ["md5.h"], "CRYPTO"),
		('ntp_adjtime', ["sys/time.h", "sys/timex.h"]),		# BSD
		('ntp_gettime', ["sys/time.h", "sys/timex.h"]),		# BSD
		('res_init', ["resolv.h"]),
		("rtprio", ["sys/rtprio.h"]),		# Sun/BSD
		('sched_setscheduler', ["sched.h"]),
		('settimeofday', ["sys/time.h"], "RT"),	# BSD
		('strlcpy', ["string.h"]),
		('strlcat', ["string.h"]),
		('timer_create', ["time.h"])
		)
	for ft in functions:
		if len(ft) == 2:
			probe_function_with_prerequisites(ctx, function=ft[0],
							  prerequisites=ft[1])
		else:
			probe_function_with_prerequisites(ctx, function=ft[0],
							  prerequisites=ft[1],
							  use=ft[2])

	# Nobody uses the symbol, but this seems like a good sanity check.
	ctx.check_cc(header_name="stdbool.h", mandatory=True, comment="Sanity check.")

	# This is a list of every optional include header in the
	# codebase that is guarded by a directly corresponding HAVE_*_H symbol.
	#
	# In some cases one HAVE symbol controls inclusion of more than one
	# header; there is an example of this in ntp/audio.c.  In these cases
	# only the one header name matching the pattern of the HAVE_*_H symbol
	# name is listed here, so we can invert the relationship to generate
	# tests for all the symbols.
	#
	# Some of these are cruft from ancient big-iron systems and should
	# be removed.
	optional_headers = (
		"dns_sd.h",		# NetBSD, Apple, mDNS
		"histedit.h",		# Apple
		("ifaddrs.h", ["sys/types.h"]),
		"libscf.h",		# Solaris
		"linux/if_addr.h",
		"linux/rtnetlink.h",
		"linux/serial.h",
		#"linux/seccomp.h",	- Doesn't build yet, investigate
		"machine/soundcard.h",
		("md5.h", ["sys/types.h"]),
		"net/if6.h",
		("net/route.h", ["sys/types.h","sys/socket.h","net/if.h"]),
		"netinfo/ni.h",		# Apple
		"priv.h",               # Solaris
		("readline/readline.h",["stdio.h"]),
		("readline/history.h", ["stdio.h","readline/readline.h"]),
		("resolv.h", ["sys/types.h","netinet/in.h","arpa/nameser.h"]),
		"semaphore.h",
		"stdatomic.h",
		"sys/audioio.h",
		"sys/capability.h",     # Linux
		"sys/ioctl.h",
		"sys/modem.h",          # Apple
		"sys/prctl.h",          # Linux
		"sys/sockio.h",
		"sys/soundcard.h",
		("sys/sysctl.h", ["sys/types.h"]),
		("timepps.h", ["inttypes.h"]),
		("sys/timepps.h", ["inttypes.h", "sys/time.h"]),
		"utmpx.h",       # missing on RTEMS and OpenBSD
		("sys/timex.h", ["sys/time.h"]),
		"sys/audio.h"
	)
	for hdr in optional_headers:
		if type(hdr) == type(""):
			if ctx.check_cc(header_name=hdr, mandatory=False, comment="<%s> header" % hdr):
				continue
		else:
			(hdr, prereqs) = hdr
			if probe_header_with_prerequisites(ctx, hdr, prereqs):
				continue
		if os.path.exists("/usr/include/" + hdr):
			# Sanity check...
			print "Compilation check failed but include exists %s" % hdr

	if ctx.get_define("HAVE_TIMEPPS_H") or ctx.get_define("HAVE_SYS_TIMEPPS_H"):
		ctx.define("HAVE_PPSAPI", 1, comment="Enable the PPS API")


	# Check for Solaris capabilities
	if ctx.get_define("HAVE_PRIV_H") and sys.platform == "Solaris":
		ctx.define("HAVE_SOLARIS_PRIVS", 1, comment="Enable Solaris Privileges (Solaris only)")

	from check_sockaddr import check_sockaddr
	check_sockaddr(ctx)

	# Some systems don't have sys/timex.h eg OS X, OpenBSD...
	if ctx.get_define("HAVE_SYS_TIMEX_H"):
		ctx.env.HEADER_SYS_TIMEX_H = True

	# Some systems don't have sys/audio.h eg OS X, OpenBSD...
	if ctx.get_define("HAVE_SYS_AUDIO_H") or \
	   ctx.get_define("HAVE_SYS_SOUNDCARD_H") or \
	   ctx.get_define("HAVE_MACHINE_SOUNDCARD_H"):
		ctx.env.HAVE_AUDIO = True  # makes util/tg2

	if ctx.options.refclocks:
		from refclock import refclock_config

		# Enable audio when the right headers exist.
		if ctx.get_define("HAVE_SYS_AUDIOIO_H") or \
				ctx.get_define("HAVE_SYS_SOUNDCARD_H") or \
				ctx.get_define("HAVE_MACHINE_SOUNDCARD_H"):
			ctx.env.AUDIO_ENABLE = True

		refclock_config(ctx)

	# NetBSD (used to) need to recreate sockets on changed routing.
	# Perhaps it still does. If so, this should be set.  The autoconf
	# build set it "if the OS clears cached routes when more specifics
	# become available".
	# ctx.define("OS_MISSES_SPECIFIC_ROUTE_UPDATES", 1)

	if ctx.options.enable_leap_smear:
		ctx.define("ENABLE_LEAP_SMEAR", 1, comment="Enable experimental leap smearing code")

	if ctx.options.enable_mssntp:
		ctx.define("ENABLE_MSSNTP", 1, comment="Enable MS-SNTP extensions https://msdn.microsoft.com/en-us/library/cc212930.aspx")

	if ctx.options.enable_lockclock:
		ctx.define("ENABLE_LOCKCLOCK", 1, comment="Enable NIST 'lockclock'")

	if not ctx.options.disable_droproot:
		ctx.define("ENABLE_DROPROOT", 1, comment="Drop root after initialising")

	if not ctx.options.disable_dns_lookup:
		ctx.define("ENABLE_DNS_LOOKUP", 1, comment="Enable DNS lookup of hostnames")

	if not ctx.options.disable_dns_retry:
		ctx.define("ENABLE_DNS_RETRY", 1, comment="Retry DNS lookups after an initial failure")


	# There is an ENABLE_ASYMMETRIC that enables a section of the
	# protocol code having to do with handling very long asymmetric
	# delays, as in space communications. Likely this code has never
	# been enabled for production.

	# Won't be true under Windows, but is under every Unix-like OS.
	ctx.define("HAVE_WORKING_FORK", 1, comment="Whether a working fork() exists")

	# Does the kernel implement a phase-locked loop for timing?
	# All modern Unixes (in particular Linux and *BSD have this).
	#
	# The README for the (now deleted) kernel directory says this:
	# "If the precision-time kernel (KERNEL_PLL define) is
	# configured, the installation process requires the header
	# file /usr/include/sys/timex.h for the particular
	# architecture to be in place."
	#
	if ctx.get_define("HAVE_SYS_TIMEX_H"):
		ctx.define("HAVE_KERNEL_PLL", 1, comment="Whether phase-locked loop for timing exists")

	# SO_REUSEADDR socket option is needed to open a socket on an
	# interface when the port number is already in use on another
	# interface. Linux needs this, NetBSD does not, status on
	# other platforms is unknown.  It is probably harmless to
	# have it on everywhere.
	ctx.define("NEED_REUSEADDR_FOR_IFADDRBIND", 1, comment="Whether SO_REUSEADDR is needed to open same sockets on alternate interfaces, required by Linux at least")

	# Not yet known how to detect HP-UX at version < 8, but that needs this.
	# Shouldn't be an issue as 8.x shipped in January 1991!
	# ctx.define("NEED_RCVBUF_SLOP", 1)

	# It should be possible to use asynchrpnous I/O with notification
	# by SIGIO on any Unix conformant to POSIX.1-2001. But the code to
	# do this is untested and there are historical reasons to suspect
	# it might not work reliably on all platforms.  Enable cautiously
	# and test carefully.
	# ctx.define("ENABLE_SIGNALED_IO", 1)

	# Used in libntp/audio.c:
	#	[[
	#	    #ifdef HAVE_MACHINE_SOUNDCARD_H
	#	    # include <machine/soundcard.h>
	#	    #endif
	#	    #ifdef HAVE_SYS_SOUNDCARD_H
	#	    # include <sys/soundcard.h>
	#	    #endif
	#	]],
	#	[[
	#	    extern struct snd_size *ss;
	#	    return ss->rec_size;
	#	]]
	# ctx.define("HAVE_STRUCT_SND_SIZE", 1)

        # These are required by the SHA2 code and various refclocks
        if sys.byteorder == "little":
                pass
        elif sys.byteorder == "big":
                ctx.define("WORDS_BIGENDIAN", 1)
        else:
                print "Can't determine byte order!"

	probe_vsprintfm(ctx, "VSNPRINTF_PERCENT_M",
			    "Checking for %m expansion in vsnprintf(3)")

	# Define CFLAGS/LDCFLAGS for -vv support.
	ctx.define("NTPS_CFLAGS", " ".join(ctx.env.CFLAGS).replace("\"", "\\\""), comment="CFLAGS used when compiled")
	ctx.define("NTPS_LDFLAGS", " ".join(ctx.env.LDFLAGS).replace("\"", "\\\""), comment="LDFLAGS used when compiled")


	# Check for directory separator
	if ctx.env.PLATFORM_TARGET == "win":
		sep = "\\"
	else:
		sep = "/"

	ctx.define("DIR_SEP", "'%s'" % sep, quote=False, comment="Directory separator used")

	# libisc/
	# XXX: Hack that needs to be fixed properly for all platforms
	ctx.define("ISC_PLATFORM_NORETURN_PRE", "", quote=False)
	ctx.define("ISC_PLATFORM_NORETURN_POST", "__attribute__((__noreturn__))", quote=False)
	ctx.define("ISC_PLATFORM_HAVEIFNAMETOINDEX", 1)
	ctx.define("ISC_PLATFORM_HAVEIN6PKTINFO", 1)
	ctx.define("ISC_PLATFORM_HAVEIPV6", 1)
	ctx.define("ISC_PLATFORM_HAVESCOPEID", 1)

	if ctx.get_define("HAVE_SYS_SYSCTL_H"):
		ctx.define("HAVE_IFLIST_SYSCTL", 1, comment="Whether sysctl interface exists")


	# Header checks
	from pylib.check_cap import check_cap_header
	check_cap_header(ctx)

	from pylib.check_libevent2 import check_libevent2_header
	check_libevent2_header(ctx)

	from pylib.check_pthread import check_pthread_header_lib
	check_pthread_header_lib(ctx)

	if not ctx.options.disable_mdns_registration:
		from pylib.check_mdns import check_mdns_header
		check_mdns_header(ctx)


	# Run checks
	from pylib.check_cap import check_cap_run
	check_cap_run(ctx)

	from pylib.check_libevent2 import check_libevent2_run
	check_libevent2_run(ctx)


	from pylib.check_pthread import check_pthread_run
	check_pthread_run(ctx)

	if not ctx.options.disable_mdns_registration:
		from pylib.check_mdns import check_mdns_run
		check_mdns_run(ctx)


	if ctx.env.PTHREAD_ENABLE:
		ctx.define("ISC_PLATFORM_USETHREADS", 1)


	ctx.start_msg("Writing configuration header:")
	ctx.write_config_header("config.h")
	ctx.end_msg("config.h", "PINK")


	def yesno(x):
		if x:
			return "Yes"
		return "No"


	msg("")
	msg("Build Options")
	msg_setting("CC", " ".join(ctx.env.CC))
	msg_setting("CFLAGS", " ".join(ctx.env.CFLAGS))
	msg_setting("LDFLAGS", " ".join(ctx.env.LDFLAGS))
	msg_setting("PREFIX", ctx.env.PREFIX)
	msg_setting("Debug Support", yesno(not ctx.options.disable_debug))
	msg_setting("Refclocks", ", ".join(ctx.env.REFCLOCK_LIST))
	msg_setting("Build Manpages", yesno(ctx.env.ENABLE_DOC and not ctx.env.DISABLE_MANPAGE))

	if ctx.options.enable_debug:
		msg("")
		msg("*** --enable-debug ignored.  (default on now)")
		msg("")
Esempio n. 25
0
    def parseNode(self):
        """
        Parse a single node, which may contain both UserProperties and
        TextContent.

          Node =
              BeginNode
              BlankLine?
              UserProperties?
                (
                TextContent
                BlankLine
                )?
        """
        chunk_pos = self.reader.start
        dump_props = BeginNode()
        node_path = self.parseDumpProperty("Node-path", dump_props)
        if self.matchDumpProperty("Node-kind"):
            node_kind = self.parseDumpProperty("Node-kind", dump_props)
        else:
            node_kind = None
        node_action = self.parseDumpProperty("Node-action", dump_props)

        # A number of optional properties follow. We know what they
        # can be, but their order isn't fixed.
        #
        # Content-length is optional, contrary to the available
        # documentation, which claims that it is always present for
        # compatability with generic rfc822 parsers.
        #
        # e.g. a dir node with action add and no properties has no
        # lengths at all.

        tlen, plen, clen = None, None, None
        while self.matchDumpProperty():
            name, value = self.parseDumpProperty(store=dump_props)
            if name == "Text-content-length":
                tlen = int(value)
            elif name == "Prop-content-length":
                plen = int(value)
            elif name == "Content-length":
                clen = int(value)

        prop_delta = dump_props.get("Prop-delta") == "true"
        if prop_delta:
            assert self.version > 2, msg("""
                Property deltas should not occur in this dumpfile.
                Its format is too old to support them.
                """)

        text_delta = dump_props.get("Text-delta") == "true"
        if text_delta:
            assert self.version > 2, msg("""
                Text deltas should not occur in this dumpfile.
                Its format is too old to support them.
                """)

        if clen == None:
            clen = 0
        if plen == None:
            plen = 0
        if tlen != None:
            assert tlen == clen - plen, msg("""
                Content-Length must be the sum of Text-Content-Length and 
                Prop-Content-Length.  This is not what was found:
                Content-Length:      %(clen)d
                Text-Content-Length: %(tlen)d
                Prop-Content-Length: %(plen)d
                """ % locals())
        else:
            tlen = clen - plen

        yield dump_props

        if plen > 0 or tlen > 0:
            yield self.parseBlankLine()

        if plen > 0:
            yield self.parseUserProperties(plen, prop_delta)

        if tlen > 0:
            text = self.getBytes(tlen)
            assert len(text) == tlen, msg("""
                Expected text to have length %d, instead it had length %d.
                """ % (tlen, len(text)))

            # We can only verify the checksum when text_deltas are not
            # in use.  When text_deltas are being used, the checksum
            # refers to the *result* of applying the deltas and we
            # have no idea how nor desire to do that here.
            if not text_delta:
                expected_chksum = dump_props.get("Text-content-md5")
                if expected_chksum:
                    computed_chksum = md5(text).hexdigest()
                    assert expected_chksum == computed_chksum, msg("""
                           MD5 mismatch.
                           expected: %s,
                           computed: %s.
                           """ % (expected_chksum, computed_chksum))

            yield TextContent(text)
            # TextContent is always terminated by an 'extra' newline,
            # which getBytes consumes for us, but does not return.
            yield BlankLine()

        for evt in self.parseBlankLines():
            yield evt

        yield EndNode()
Esempio n. 26
0
def read(ses, fn, opt, progress=True):

    # initial progress message
    if progress:
        ses.progress('reading %s' % fn)

    # get concatenated list of chunks for all files
    chunks = []

    def get_chunks(fn):
        if os.path.isdir(fn):
            for f in sorted(os.listdir(fn)):
                get_chunks(os.path.join(fn, f))
        elif is_ftdc_file(fn):
            chunks.extend(File.get(fn))

    get_chunks(fn)
    if not chunks:
        raise Exception(fn + ' is not an ftdc file or directory')

    # compute time ranges for each chunk using _id timestamp
    for i in range(len(chunks)):
        t = chunks[i]._id
        fudge = -300  # xxx _id is end instead of start; remove when SERVER-20582 is fixed
        chunks[i].start_time = t + fudge
        if i > 0:
            chunks[i - 1].end_time = t
    chunks[-1].end_time = float(
        'inf')  # don't know end time; will filter last chunk later

    # roughly filter by timespan using _id timestamp as extracted above
    # fine filtering will be done during processing
    in_range = lambda chunk: chunk.start_time <= opt.before and chunk.end_time >= opt.after
    filtered_chunks = [chunk for chunk in chunks if in_range(chunk)]

    # init stats for progress report
    total_bytes = sum(len(chunk) for chunk in filtered_chunks)
    read_chunks = 0
    read_samples = 0
    read_bytes = 0
    used_samples = 0

    # compute number of output samples desired for overview mode
    # uses time-filtered data sizes so resolution automatically increases for smaller timespans
    # returns a subset of the samples, aiming for each sample to represent same number of bytes,
    # except that we return at least one sample for each chunk
    if opt.overview == 'heuristic':
        overview = 1000
        util.msg('limiting output to %d samples; use --overview to override' %
                 overview)
    elif opt.overview == 'none' or opt.overview == 'all' or not opt.overview:
        overview = float('inf')
    else:
        overview = int(opt.overview)
    overview_bytes = int(max(total_bytes / overview, 1))

    # propagate sample numbers if we can
    sample_number = 0 if filtered_chunks and chunks and filtered_chunks[
        0] == chunks[0] else None

    # we already filtered filtered_chunk_docs by type and time range
    for chunk in filtered_chunks:

        # propagate sample numbers if we can
        if sample_number != None:
            chunk.sample_number = sample_number

        # compute desired subset of metrics based on target number of samples
        max_samples = (read_bytes + len(chunk)
                       ) / overview_bytes - read_bytes / overview_bytes
        if max_samples <= 1:
            metrics = chunk.get_first()
            metrics = util.BSON((n, [v[0]]) for (n, v) in metrics.items())
            used_samples += 1
        else:
            metrics = chunk.get_all()
            every = int(math.ceil(float(chunk.nsamples) / max_samples))
            if every != 1:
                metrics = util.BSON(
                    (n, v[0::every]) for (n, v) in metrics.items())
            used_samples += chunk.nsamples / every
        yield metrics

        # propagate sample numbers if we can
        sample_number = chunk.sample_number + chunk.nsamples if chunk.sample_number != None else None

        # report progress
        read_chunks += 1
        read_bytes += len(chunk)
        read_samples += chunk.nsamples
        if progress and (read_chunks % 10 == 0 or read_bytes == total_bytes):
            msg = '%d chunks, %d samples, %d bytes (%.0f%%), %d bytes/sample; %d samples used' % (
                read_chunks, read_samples, read_bytes, 100.0 * read_bytes /
                total_bytes, read_bytes / read_samples, used_samples)
            ses.progress(msg)

    if used_samples != read_samples:
        s = 'displaying overview of ~%d of ~%d samples in selected time range (use z to zoom in or v to view all)'
        ses.advise(s % (used_samples, read_samples))
    else:
        ses.advise('displaying all ~%d samples in selected time range' %
                   used_samples)
Esempio n. 27
0
def get_series(ses, spec, spec_ord):

    # parse helper
    def split(s, expect, err, full):
        m = re.split('([' + expect + err + '])', s, 1)
        s1, d, s2 = m if len(m)==3 else (m[0], '$', '')
        if d in err:
            msg = 'expected %s at pos %d in %s, found %s' % (expect, len(full)-len(s)+1, full, d)
            raise Exception(msg)
        return s1, d, s2

    # parse the spec
    left, d, s = split(spec, '(:=', ')', spec)
    if d=='=': # has tag
        tag = left
        spec_name, d, s = split(s, '(:', ')=', spec)        
    else: # no tag
        tag = None
        spec_name = left
    params = {}
    if d == '(': # has args
        while d != ')': # consume args
            name, d, s = split(s, '=)', '(', spec) # get arg name
            value, d, s = split(s, '(),', '', spec) # bare value
            p = 0
            while d=='(' or p>0: # plus balanced parens
                value += d
                if d=='(': p += 1
                elif d==')': p -= 1
                v, d, s = split(s, '(),', '', spec)
                value += v
            params[name] = value
    fn = s.lstrip(':') # xxx canonicalize fn
    util.dbg(spec_name, params, fn)
    ses.add_title(fn)

    def detect_file_type(fn):
        if ftdc.is_ftdc_file_or_dir(fn):
            return 'ftdc'
        with open(fn) as f:
            for _ in range(10):
                try:
                    json.loads(f.next())
                    return 'json'
                except Exception as e:
                    util.dbg(e)
        return 'text'

    file_type = detect_file_type(fn)
    util.msg('detected type of', fn, 'as', file_type)

    # find matching descriptors
    scored = collections.defaultdict(list)
    spec_name_words = util.words(spec_name)
    for desc in descriptors.descriptors:
        if get(desc,'file_type') != file_type:
            continue
        desc_name_words = util.words(desc['name'])
        last_i = -1
        beginning = matched = in_order = adjacent = 0
        for w, word in enumerate(spec_name_words):
            try:
                i = desc_name_words.index(word)
                if i==0 and w==0: beginning = 1
                matched += 1
                if i==last_i+1: adjacent += 1
                elif i>last_i: in_order += 1
                last_i = i
            except ValueError:
                pass
        score = (beginning, matched, adjacent, in_order)
        scored[score].append(desc)
    best_score = sorted(scored.keys())[-1]
    best_descs = scored[best_score] if best_score != (0,0,0,0) else []
    series = [Series(spec, desc, params, fn, spec_ord, tag, ses.opt) for desc in best_descs]

    # no match?
    if not series:
        util.msg('no descriptors match', spec_name)

    return series
Esempio n. 28
0
def main():

    # Initialize new external memory dictionary.
    util.msg('Populating external memory dictionary')

    t1 = time.time()

    dirname = util.make_temp_name('em_dict')

    em_dict = pyrsistence.EMDict(dirname)
    for i in util.xrange(random.randrange(0x1000)):
        k = random.randrange(0x100000)
        v = random.randrange(0x100000)
        em_dict[k] = v

    t2 = time.time()
    util.msg('Done in %d sec.' % (t2 - t1))


    # Request several iterator objects to locate possible memory leaks.
    util.msg('Testing item iterator')
    for i in util.xrange(0x1000):
        for item in em_dict.items():
            pass

    util.msg('Testing keys iterator')
    for i in util.xrange(0x1000):
        for key in em_dict.keys():
            pass

    util.msg('Testing values iterator')
    for i in util.xrange(0x1000):
        for value in em_dict.values():
            pass

    t3 = time.time()
    util.msg('Done in %d sec.' % (t3 - t2))


    # Close and remove external memory dictionary from disk.
    em_dict.close()
    shutil.rmtree(dirname)

    return 0
Esempio n. 29
0
    def parse(self, fileLike):
        """
        Generate parse events from the bytes provided by fileLike.
        """
        try:
            self.reader = None
            self.reader = Reader(fileLike)
            self.reader.next()
            for evt in self.parseDumpfile():
                yield evt
        except AssertionError, e :
            sys.stderr.write(str(self.reader)+"\n")
            raise
        else:
            assert self.reader.eof, msg("Stopped parsing before end of input\n"
                                        + str(self.reader))

    def parseDumpfile(self):
        """
        A Dumpfile consists of a Version, an (optional?) UUID, and
        zero or more Revisions.  Extra blank lines may occur.  Blank
        lines before and after the version declaration are not
        reported.
        """
        self.version = None
        self.skipBlankLines()
        version = int(self.parseDumpProperty("SVN-fs-dump-format-version"))
        assert 2 <= version <= 3, \
               "Only dump format versions 2 and 3 are supported"
        self.version = version
        self.skipBlankLines()
Esempio n. 30
0
def main():

    # Initialize new external memory dictionary.
    util.msg('Populating external memory dictionary')

    t1 = time.time()

    dirname = util.make_temp_name('em_dict')

    em_dict = pyrsistence.EMDict(dirname)
    for i in util.xrange(random.randrange(0x1000)):
        k = random.randrange(0x100000)
        v = random.randrange(0x100000)
        em_dict[k] = v

    t2 = time.time()
    util.msg('Done in %d sec.' % (t2 - t1))

    # Request several iterator objects to locate possible memory leaks.
    util.msg('Testing item iterator')
    for i in util.xrange(0x1000):
        for item in em_dict.items():
            pass

    util.msg('Testing keys iterator')
    for i in util.xrange(0x1000):
        for key in em_dict.keys():
            pass

    util.msg('Testing values iterator')
    for i in util.xrange(0x1000):
        for value in em_dict.values():
            pass

    t3 = time.time()
    util.msg('Done in %d sec.' % (t3 - t2))

    # Close and remove external memory dictionary from disk.
    em_dict.close()
    shutil.rmtree(dirname)

    return 0
Esempio n. 31
0
    def data_point_after_splits(self, t, d, get_field, set_field):

        # may not have data in case of a count, so just use 0
        try:
            d = float(d)
        except:
            d = 0

        # wrapping 32-bit counter hack
        if self.wrap:
            if self.last_d > self.wrap/2 and d < -self.wrap/2:
                self.wrap_offset += 2 * self.wrap
                util.dbg('wrap', d, self.last_d, self.wrap_offset)
            elif self.last_d < -self.wrap/2 and d > self.wrap/2:
                self.wrap_offset -= 2 * self.wrap
                util.dbg('wrap', d, self.last_d, self.wrap_offset)
            self.last_d = d
            d += self.wrap_offset

        # compute a rate
        if self.rate:
            if self.last_t==t:
                return
            if self.last_t:
                dd = d - self.last_d
                if self.rate != 'delta':
                    dd /= t - self.last_t
                self.last_t = t
                self.last_d = d
                d = dd
            else:
                self.last_t = t
                self.last_d = d
                return
            if d < 0:
                self.wrapped = True
                if not self.rate: 
                    util.msg('possible wrap detected in', self.get('name'), 'at', util.f2s(t))

        # scale - xxx need general computation mechanism here instead
        if self.scale_field:
            scale_field = self.scale_field.format(**self.descriptor)
            try:
                div = float(get_field(scale_field))
            except:
                return
            if div:
                d /= div
        d /= self.scale

        # record the data
        if self.buckets:
            s0 = t
            s1 = s0 // self.buckets * self.buckets
            t = s1
            self.ys[t] = self.op(self.ys, t, d)
        elif self.queue:
            if d>self.queue_min_ms:
                ms = datetime.timedelta(0, d/1000.0)
                self.queue_times.append((t-ms,+1))
                self.queue_times.append((t,-1))
        else:
            self.ys[t] = d

        # make data available for computation
        if self.set_field and set_field:
            set_field(self.set_field, d)

        # tell our caller what we recorded
        return d
Esempio n. 32
0
 def progress(self, msg):
     if self.server:
         self.put(msg + '<br/><script>this.document.body.scrollIntoView(false)</script>')
     util.msg(msg)
Esempio n. 33
0
def page(ses):

    opt = ses.opt

    # support for save in server mode
    if ses.server:
        ses.start_save()

    # in server mode graphs were alread generated in the "progress" phase
    if not ses.server:
        _get_graphs(ses)

    # start page
    _head(ses)
    ses.eltend('script', {}, 'document.title="%s"' % ', '.join(ses.title))
    ses.elt('body', {
        'onkeypress': 'key()',
        'onload': 'loaded_content()',
        #'onunload': "do_unload('body')",
    })
    
    # no data - finish with empty page and return
    if not ses.graphs:
        ses.put('NO DATA')
        ses.endall()
        return

    # compute stats
    spec_matches = collections.defaultdict(int)
    for graph in ses.graphs:
        for series in graph:
            spec_matches[series.spec] += 1
    spec_empty = collections.defaultdict(int)
    spec_zero = collections.defaultdict(int)

    # provide browser with required client-side parameters
    if not hasattr(opt, 'cursors'): opt.cursors = []
    model_items = [
        'tleft', 'tright', 'cursors', 'level', 'before', 'after', 'live', 'selected', 'scrollY',
    ]
    model = dict((n, getattr(opt, n)) for n in model_items if hasattr(opt, n))
    spec_cmdline = ' '.join(pipes.quote(s) for s in opt.specs)
    model['spec_cmdline'] = spec_cmdline
    ses.advise('viewing ' + spec_cmdline + ' (use o or O to change)')
    #util.msg(model)
    ses.eltend('script', {}, 'top.model = %s' % json.dumps(model))

    # state-dependent informational message
    ses.advise('current detail level is <span id="current_level"></span> (hit 1-9 to change)', 0)
    
    # help message at the top
    ses.elt('div', {'onclick':'toggle_help()'})
    ses.put('<b>click here for help</b></br>')
    ses.elt('div', {'id':'help', 'style':'display:none'})
    ses.put(help_all)
    if ses.server:
        ses.put(help_server)
    ses.put('<br/>')
    ses.end('div')
    ses.end('div')
    ses.put('<br/>'.join(ses.advice))
    ses.put('<br/><br/>')

    # table of graphs
    ses.elt('table', {'id':'table', 'style':'position:relative;'})

    # this row holds cursor heads, cursor letters, and time labels
    ses.elt('tr')
    ses.eltend('td')
    ses.eltend('td')
    ses.elt('td')
    cursors_html(ses, opt.width, opt.tmin, opt.tmax, opt.ticks)
    ses.end('td')
    ses.end('tr')

    # this row holds data column heads (min, max, name)
    ses.elt('tr')
    ses.td('head data', 'avg')
    ses.td('head data', 'max')
    ses.eltend('td')
    if opt.number_rows:
        ses.td('head row-number', 'row')
    ses.td('head desc', 'name')
    ses.td('', ' ')
    ses.end('tr')

    # function to emit a graph
    def emit_graph(data, ymax=None, sparse=False):
        graphing.html_graph(
            ses, data=data,
            tmin=opt.tmin, tmax=opt.tmax, width=opt.width,
            ymin=0, ymax=ymax, height=opt.height,
            #ticks=ticks, shaded=not opt.no_shade and len(data)==1)
            ticks=opt.ticks, shaded=len(data)==1, bins=opt.bins,
            sparse=sparse
        )

    # colors for merged graphs
    colors = ['rgb(50,102,204)','rgb(220,57,24)','rgb(253,153,39)','rgb(20,150,24)',
              'rgb(153,20,153)', 'rgb(200,200,200)']
    def color(i):
        return colors[i] if i <len(colors) else 'black'

    # word-by-word common prefix
    # used to factor out common prefix and suffix in merged graph names
    def commonprefix(names):
        pfx = []
        for words in zip(*[n.split() for n in names]):
            if all(w==words[0] for w in words):
                pfx.append(words[0])
            else:
                break
        return ' '.join(pfx)

    # format graph name, factoring out common prefixes and common suffixes for merged graphs
    def name_td(g):
        ses.td('name')
        pfx = commonprefix([s.name for s in g])
        sfx = commonprefix([s.name[::-1] for s in g])[::-1]
        ses.put(pfx)
        if sfx != pfx:
            for i,s in enumerate(g):
                mid = ' ' + s.name[len(pfx):len(s.name)-len(sfx)]
                ses.eltend('span', {'style':'color:%s' % color(i)}, mid)
            ses.put(sfx)
        ses.end('td')

    # determine which graphs to show, suppressing empty and uniformly zero if desired
    # emit placeholders (graph==None, generating empty tr) to facilitate maintaining order
    rows = []
    for graph in sorted(ses.graphs, key=lambda g: g[0].sort_ord):
        graph.sort(key=lambda s: s.sort_ord)
        graph.ymin = min(s.ymin for s in graph)
        graph.ymax = max(s.ymax for s in graph)
        graph.ysum = sum(s.ysum for s in graph)
        graph.wrapped = any(s.wrapped for s in graph)
        graph.ylen = sum(len(s.ys) for s in graph)
        graph.display_ymax = max(s.display_ymax for s in graph)
        if graph.ylen:
            if graph.ymax!=0 or graph.ymin!=0 or opt.show_zero:
                rows.append(graph)
            else:
                rows.append(None) # placeholder
                util.dbg('skipping uniformly zero data for', graph[0].get('name'), 'in', graph[0].fn)
                for s in graph:
                    spec_zero[s.spec] += 1
        elif opt.show_empty:
            rows.append(graph)
        else:
            rows.append(None) # placeholder
            util.dbg('no data for', graph[0].get('name'), 'in', graph[0].fn)
            for s in graph:
                spec_empty[s.spec] += 1

    # emit html for graphs we are showing, in the requested order
    if hasattr(opt,'row_order') and len(opt.row_order)==len(rows):
        row_order = opt.row_order
    else:
        row_order = range(len(rows))    
    for row in row_order:
        graph = rows[row]
        if graph==None: # placeholder
            ses.eltend('tr', {
                'class': 'row',
                '_level': 1000,
                '_row': row,
            })
        elif graph.ylen:
            ses.elt('tr', {
                'onclick': 'sel(this)',
                'class': 'row',
                '_level': graph[0].level,
                '_row': row,
            })
            avg = '{:,.3f}'.format(float(graph.ysum)/graph.ylen) if not graph.wrapped else 'WRAPPED'
            ses.td('data', avg)
            ses.td('data', '{:,.3f}'.format(graph.ymax))
            ses.td('graph')
            graph_color = lambda graph, i: color(i) if len(graph)>1 else 'black'
            data = [(s.ts, s.ys, graph_color(graph,i)) for i,s in enumerate(graph)]
            emit_graph(data, graph.display_ymax, graph.sparse)
            ses.end('td')
            if opt.number_rows:
                ses.td('row-number', str(row))
            row += 1
            name_td(graph)
            ses.end('tr')
        else:
            ses.elt('tr', {'onclick':'sel(this)', 'class':'row', '_level':graph[0].level})
            ses.td('data', 'n/a')
            ses.td('data', 'n/a')
            ses.td('graph')
            emit_graph([])
            ses.end('td')
            if opt.number_rows:
                ses.td('row-number', str(row))
            name_td(graph)
            ses.end('tr')

    # close it out
    ses.endall()

    for spec in opt.specs:
        util.msg('spec', repr(spec), 'matched:', spec_matches[spec],
            'zero:', spec_zero[spec], 'empty:', spec_empty[spec])
Esempio n. 34
0
      threading.Thread.__init__(self)
    def run(self):
        while 1:
         if task_queue.empty() == True:
         	 break
         task = task_queue.get()
         module[task['module']].exploit(task['request'])   

if __name__ == '__main__':
    
 
    O = {}
    NODE_KEY = "a88b92531ba974f68bc1fd5938fc77"
    NODE_DEBUG = 0
    SERVER = "http://w/uauc/playweb/"
    util.msg("PlayWeb Node 1.0")
    util.msg("Server:%s Key:%s Debug:%d" % (SERVER, NODE_KEY, NODE_DEBUG))
    util.msg("Listening server project...")
    while 1:
        r = util.http_get(SERVER + "/index.php?m=node&a=get_task")
        if r['data'] != " " :
            O = eval(util.decode_str(r['data'], NODE_KEY))
            break
        time.sleep(1)
    O['debug'] = NODE_DEBUG
    util.msg("[Project] Target:%s  Time:%s Module:%s  Thread:%s" % (O['target'], util.date(O['start_time']), O['module'], O['thread']), 1)
    O['target'] = "w"
    O['key'] = NODE_KEY
    #O['depth'] = 5  # notice
    O['server_url'] = SERVER + "?m=node&a="
    O['web-ports'] = util.csv2array(O['web-ports'])
Esempio n. 35
0
import util
import DB
import sqlite3
import sys

# global
space = 50

# test of data base connection
util.welcome_msg(space)
try:
    DB.init_db()
    util.msg('This is your firse time log in', space)
    util.msg('Wish you happy with my service', space)
except:
    util.log('DB ok')

while (1):
    cmd = input('<< ')

    if (cmd == 'e'): sys.exit()
    elif (cmd == 'h'): util.info(space)
    elif (cmd == 'n'): DB.insert(space)
    elif (cmd == 'l'): DB.list_account(space)
    elif (cmd == 't'): DB.list_tag(space)
    elif (cmd == 'u'): DB.update(space)
    elif (cmd == 'd'): DB.delete(space)
    else:
        util.welcome_msg(space)
Esempio n. 36
0
def read(ses, fn, opt, progress=True):

    # initial progress message
    if progress:
        ses.progress('reading %s' % fn)

    # get concatenated list of chunks for all files
    chunks = []
    def get_chunks(fn):
        if os.path.isdir(fn):
            for f in sorted(os.listdir(fn)):
                get_chunks(os.path.join(fn,f))
        elif is_ftdc_file(fn):
            chunks.extend(File.get(fn))
    get_chunks(fn)
    if not chunks:
        raise Exception(fn + ' is not an ftdc file or directory')

    # compute time ranges for each chunk using _id timestamp
    for i in range(len(chunks)):
        t = chunks[i]._id
        fudge = -300 # xxx _id is end instead of start; remove when SERVER-20582 is fixed
        chunks[i].start_time = t + fudge
        if i>0:
            chunks[i-1].end_time = t
    chunks[-1].end_time = float('inf') # don't know end time; will filter last chunk later

    # roughly filter by timespan using _id timestamp as extracted above
    # fine filtering will be done during processing
    in_range = lambda chunk: chunk.start_time <= opt.before and chunk.end_time >= opt.after
    filtered_chunks = [chunk for chunk in chunks if in_range(chunk)]

    # init stats for progress report
    total_bytes = sum(len(chunk) for chunk in filtered_chunks)
    read_chunks = 0
    read_samples = 0
    read_bytes = 0
    used_samples = 0

    # compute number of output samples desired for overview mode
    # uses time-filtered data sizes so resolution automatically increases for smaller timespans
    # returns a subset of the samples, aiming for each sample to represent same number of bytes,
    # except that we return at least one sample for each chunk
    if opt.overview=='heuristic':
        overview = 1000
        util.msg('limiting output to %d samples; use --overview to override' % overview)
    elif opt.overview=='none' or opt.overview=='all' or not opt.overview:
        overview = float('inf')
    else:
        overview = int(opt.overview)
    overview_bytes = int(max(total_bytes / overview, 1))

    # propagate sample numbers if we can
    sample_number = 0 if filtered_chunks and chunks and filtered_chunks[0]==chunks[0] else None

    # we already filtered filtered_chunk_docs by type and time range
    for chunk in filtered_chunks:

        # propagate sample numbers if we can
        if sample_number != None:
            chunk.sample_number = sample_number

        # compute desired subset of metrics based on target number of samples
        max_samples = (read_bytes+len(chunk)) / overview_bytes - read_bytes / overview_bytes
        if max_samples <= 1:
            metrics = chunk.get_first()
            metrics = util.BSON((n,[v[0]]) for (n,v) in metrics.items())
            used_samples += 1
        else:
            metrics = chunk.get_all()
            every = int(math.ceil(float(chunk.nsamples)/max_samples))
            if every != 1:
                metrics = util.BSON((n,v[0::every]) for (n,v) in metrics.items())
            used_samples += chunk.nsamples / every
        yield metrics

        # propagate sample numbers if we can
        sample_number = chunk.sample_number+chunk.nsamples if chunk.sample_number!=None else None

        # report progress
        read_chunks += 1
        read_bytes += len(chunk)
        read_samples += chunk.nsamples
        if progress and (read_chunks%10==0 or read_bytes==total_bytes):
            msg = '%d chunks, %d samples, %d bytes (%.0f%%), %d bytes/sample; %d samples used' % (
                read_chunks, read_samples, read_bytes, 100.0*read_bytes/total_bytes,
                read_bytes/read_samples, used_samples
            )
            ses.progress(msg)

    if used_samples != read_samples:
        s = 'displaying overview of ~%d of ~%d samples in selected time range (use z to zoom in or v to view all)'
        ses.advise(s % (used_samples, read_samples))
    else:
        ses.advise('displaying all ~%d samples in selected time range' % used_samples)
Esempio n. 37
0
def process(series, fn, opt):

    # to track metrics present in the data but not processed by any series
    unrecognized = set()

    # xxx does time parsing belong here or in the parse routines?
    pt = util.parse_time()

    # process all chunk that we are sent
    while True:

        try:

            # get our next input
            chunk = yield

            def process_series(s, data_key):
                tz = chunk.tz if hasattr(chunk, 'tz') else s.tz
                time_key = s.time_key  # e.g. 'serverStatus.localTime'
                if data_key in chunk and time_key in chunk:
                    ts = chunk[time_key]
                    if type(ts[0]) == str or type(ts[0]) == unicode:
                        for i, t in enumerate(ts):
                            ts[i] = pt.parse_time(t, opt, tz)
                    if ts[0] / s.time_scale > opt.before or ts[
                            -1] / s.time_scale < opt.after:
                        return
                    for i, (t, d) in enumerate(zip(ts, chunk[data_key])):
                        t = t / s.time_scale
                        if t >= opt.after and t <= opt.before:

                            def get_field(key):
                                try:
                                    return chunk[key][i]
                                except IndexError:
                                    return None

                            if d != None:
                                s.data_point(t, d, get_field, None, opt)

            # send each series our data points
            for s in series:
                if s.special:
                    s.special(chunk)
                if s.split_on_key_match:
                    for data_key in chunk:
                        if data_key == s.time_key:
                            continue
                        m = s.split_on_key_match_re.match(data_key)
                        if m:
                            description = m.groupdict()
                            ss = s.get_split(data_key, description)
                            process_series(ss, data_key)
                else:
                    process_series(s, s.data_key)

            # track what we have used
            unrecognized.update(chunk.keys())

        except GeneratorExit:
            break

        except Exception as e:
            traceback.print_exc()
            raise Exception('error while processing ' + fn + ': ' + str(e))

    # compute and print unrecognized metrics
    ignore = re.compile('^serverStatus.(repl|start|end)|'
                        '^replSetGetStatus|slot_closure_rate')
    for s in series:
        unrecognized.discard(s.data_key)
        unrecognized.discard(s.time_key)
    unrecognized = filter(lambda x: not ignore.match(str(x)), unrecognized)
    is_str = lambda x: type(x) == str or type(x) == unicode
    unrecognized = filter(lambda x: x in chunk and not is_str(chunk[x][0]),
                          unrecognized)
    if unrecognized:
        util.msg('unrecognized metrics:')
        for u in sorted(unrecognized):
            util.msg('   ', u)
Esempio n. 38
0
    def do_GET(self):

        # parse command-line args passed in url query string as an 'args' parameter
        def query2opt(query):
            args = shlex.split(query['args'][0])
            return __main__.get_opt(args)

        # parse url, extracting path and query portions
        _, _, path, _, query, _ = urlparse.urlparse(self.path)
        query = urlparse.parse_qs(query)

        # query to root is redirected to default session 0
        if path == '/':
            self.send_response(301)  # permanent
            self.send_header('Location', '/0')

        # open a new view in a new window
        # expects command-line arg string in url parameter "args"
        # parse off the query, open new session based on that, then redirect to bare session url
        elif path == '/open':
            opt = query2opt(query)  # parse url "args" parameter
            ses = Ses(opt, server=True)  # new session
            self.send_response(302)  # temporary redirect
            self.send_header('Location', ses.path)

        # top-level page: return the container, which includes
        #   progress message area - loaded via /ses/progress url in progress phase (below)
        #   content area - loaded via /ses/content url in content phase (below)
        elif path in Ses.sessions:
            ses = Ses.sessions[path]
            self.prepare(ses)
            html.container(ses)

        # info for a given time t
        elif path.endswith('/info'):
            path = path.rsplit('/', 1)[0]
            ses = Ses.sessions[path]
            t = float(query['t'][0])
            self.prepare(ses)
            html.info(ses, t)

        # raw info for a given time t
        elif path.endswith('/raw'):
            path = path.rsplit('/', 1)[0]
            ses = Ses.sessions[path]
            t = float(query['t'][0])
            self.prepare(ses)
            html.raw(ses, t, kind='raw')

        elif path.endswith('/metadata'):
            path = path.rsplit('/', 1)[0]
            ses = Ses.sessions[path]
            t = float(query['t'][0])
            self.prepare(ses)
            html.raw(ses, t, kind='metadata')

        # progress phase: load the data in preparation for generating content
        # while emitting progress messages. We also accept new view parameters to open
        # new view in current window as command-line arg string in url parameter "args"
        elif path.endswith('/progress'):
            path = path.rsplit('/', 1)[0]
            ses = Ses.sessions[path]
            if 'args' in query:
                ses.opt = query2opt(query)  # parse url "args" parameter
            self.prepare(ses)
            html.load(ses)

        # content phase: generate actual html view from graph data loadedin progress phase
        elif path.endswith('/content'):
            path = path.rsplit('/', 1)[0]
            ses = Ses.sessions[path]
            self.prepare(ses)
            html.page(ses)

        # a window closed
        elif '/close/' in path:
            if Handler.exit_on_close:
                path, _, what = path.rsplit('/', 2)
                util.msg('closing', path, '(' + what + ')')
                if path in Ses.sessions:
                    del Ses.sessions[path]
                if not Ses.sessions:
                    util.msg('all sessions closed, exiting')
                    os._exit(0)

        # otherwise not found
        else:
            self.send_response(404)  # not found
Esempio n. 39
0
def browser(opt, url):

    # what os?
    if sys.platform == 'darwin':
        cmd = 'sleep 1; open -a "Google Chrome" "%s"' % url
    elif sys.platform == 'linux2':
        cmd = 'sleep 1; google-chrome "%s" &' % url
    elif sys.platform == 'win32':
        cmd = 'timeout 2 && start /b chrome "%s" &' % url
    else:
        raise Exception('unknown platform ' + sys.platform)

    # launch it
    if cmd:
        util.msg('opening a browser window on', url)
        rc = subprocess.call(cmd, shell=True)
        if rc != 0:
            util.msg('can\'t open browser; is Google Chrome installed?')
    else:
        util.msg('don\'t know how to open a browser on your platform')

    # go into background
    # not as robust as daemonizing, but that's not needed, and adds an external dependency
    if not opt.nofork:
        log_fn = 'timeseries.%d.log' % opt.port
        util.msg('going into background; sending output to ' + log_fn)
        util.msg('will terminate when browser window closes')
        util.msg('use --nofork to run in foreground')
        if os.fork():
            os._exit(0)
        sys.stdin.close()
        sys.stderr = sys.stdout = open(log_fn, 'a')
        util.msg('\n===', url)
        signal.signal(signal.SIGHUP, signal.SIG_IGN)
Esempio n. 40
0
 def parseUserPropertyEnd(self):
     assert self.matchUserPropertyEnd(), msg("""
         Expected PROPS-END, but found\n%s
         """ % (self.reader,))
     self.reader.next()
Esempio n. 41
0
import jsonEncoder

import networkUtil  # Custom Network functions (WIFI + MQTT)
from umqtt.simple import MQTTClient

# Entry Point Indication
util.dots()

# Setup led
green = led.Led(13)
red = led.Led(15)

# Setup i2c Bus
i2c = machine.I2C(-1, machine.Pin(5), machine.Pin(4))
util.msg('i2c bus created')

# Instantiate Temperature sensor
#Address of TMP007 Temperature Sensor
temp007Address = 64
tempSensor = temp007.Temp007(i2c, temp007Address)
util.msg('Temp007 instantiated, name: tempSensor')

# Instantiate Proximity sensor
#Address of VCNL4010 Proximity Sensor
proxAddress = 19
proxSensor = vcnl4010.Vcnl4010(i2c, proxAddress)
proxSensor.setup()
util.msg('Vcnl instantiated, name: proxSensor')

# Instantiate 2 servos