Esempio n. 1
1
def get_tree(request, args):
    ref_name = args[0]
    path = args[1:]
    ref = repo.refs[ref_name]
    rows = []
    tree = ref.commit.tree
    for p in path:
        tree = tree[p]
    if isinstance(tree, git.Blob):
        body = html.pre(tree.data_stream.read())
    else:
        rows.append(html.tr(html.th("Name"), html.th("Size"), html.th("Type")))
        if len(args) > 1:
            rows.append(
                html.tr(html.td(html.a("..", href="/" + "/".join(["tree"] + args[:1]))), html.td(), html.td("[DIR]"))
            )
        for d in tree.trees:
            link = html.td(html.a(d.name + "/", href="/" + "/".join(["tree"] + args + [d.name])))
            rows.append(html.tr(link, html.td(), html.td("[DIR]")))
        for blob in tree.blobs:
            link = html.td(html.a(blob.name, href="/" + "/".join(["tree"] + args + [blob.name])))
            size = html.td(bytes_to_human(blob.size))
            rows.append(html.tr(link, size, html.td(blob.mime_type)))
        body = html.table(*rows, **{"class": "list"})
    return html_page("Tree {} /{}".format(ref_name, "/".join(path)), html.div(body))
Esempio n. 2
0
  def output(self):
    answer = {}
    aggregate_in = {}
    aggregate_out= {}
    for h in self.hosts:
      aggregate_in[h.name] = 0.0
      aggregate_out[h.name]=0.0

    for h1 in self.hosts:
      answer[h1.name] = {}
      for h2 in self.hosts:
        if h1 != h2:
          out = h1.open('iperf_output/%s-%s' % (h1.name, h2.name)).readlines()
          vals = out[-1].split(' ')[-2:]
          answer[h1.name][h2.name] = ' '.join(vals).strip().replace('bits/sec','')
          aggregate_in[h2.name] += float(vals[0])
          aggregate_out[h1.name] += float(vals[0])
        else:
          answer[h1.name][h2.name] = '----'
    
    IN = append_ratios({'values' : aggregate_in})
    OUT= append_ratios({'values' : aggregate_out})

    return '\n'.join([
      html.section("IPerf all pairs (%d sec)" % self.t, html.table(answer)),
      html.section("iperf aggregate IN", html.table(IN)),
      html.section("iperf aggregate OUT", html.table(OUT))
    ])
Esempio n. 3
0
	def draw(self, users):
		#print 'Content-type: text/html\n\n'

		ipaddr = socket.gethostbyname(socket.gethostname())
		baseref = 'http://%s:5000/' % ipaddr

		output = ''
		output += """
		<form>
		<td><input type="text" name="pattern" value="" /></td>
		<td><input type="submit" value="Search" /></td>
		</form>
		"""
		output += html.tr("<th>" + "</th><th>".join(users[0]) + "</th>")
		addform = """
		<form>
		<td><input type="text" name="role" value="WAITER" /></td>
		<td>Auto</td>
		<td><input type="text" name="fname" value="" /></td>
		<td><input type="text" name="lname" value="" /></td>
		<td><input type="text" name="login" value="" /></td>
		<td><input type="text" name="tel" value="+380" /></td>
		<td><input type="submit" value="Add" /></td>
		<input type="hidden" name="q" value="add"> 
		</form>
		"""
		output += html.tr(addform)
		for user in users[1:]:
			ref = baseref + 'udel?id=%d' % user[1]
			user += html.a(ref, 'del'),
			output += html.tr("<td>" + "</td><td>".join([str(x) for x in user]) + "</td>")
	 
		return html.doc(html.table(output))
Esempio n. 4
0
    def tree(self):
        z = []
        z += h.table(id=self.id,
                     class_='display',
                     cellspacing='0',
                     width='100%')
        if self.use_index_column:
            js_headers = json.dumps([{
                'title': item
            } for item in ([''] + self.headers)],
                                    ensure_ascii=False,
                                    encoding='utf8')
            js_data = json.dumps([([''] + row) for row in self.data],
                                 ensure_ascii=False,
                                 encoding='utf8')
        else:
            js_headers = json.dumps([{
                'title': item
            } for item in self.headers],
                                    ensure_ascii=False,
                                    encoding='utf8')
            js_data = json.dumps(self.data,
                                 ensure_ascii=False,
                                 encoding='utf8')
        js = '''\
$(document).ready(function() {
    var data = %(js_data)s;
    var headers = %(js_headers)s;
    var t = $('#%(id)s').DataTable({
        "data": data,
        "columns": headers,
        "columnDefs": [ {
            "searchable": false,
            "orderable": false,
            "targets": 0,
        } ],
        "order": [[ 1, 'asc' ]],
        "dom": 'lfTC<"clear">rtip',
        "tableTools": {
            "sSwfPath": "/static/datatables/extensions/TableTools/swf/copy_csv_xls_pdf.swf"
        },
        "lengthMenu": [[-1, 25], ["All", 25]],
        "autoWidth": false,
    });
    //new $.fn.dataTable.FixedHeader( t , {
    //    "offsetTop": 50
    //    });
    t.on( 'order.dt search.dt', function () {
        t.column(0, {search:'applied', order:'applied'}).nodes().each( function (cell, i) {
            cell.innerHTML = i+1;
        } );
    } ).draw();
} );
''' % {
            'js_data': js_data,
            'js_headers': js_headers,
            'id': self.id,
        }
        z += h.script(js, type='text/javascript')
        return z
Esempio n. 5
0
def histogram_grid(post):
	histogram = post["analysis"]["histogram"]
	id = post["data"]["id"]

        n = len(post["analysis"]["data"])
	p = 1 / 256.0
	expected = n * p
	stddev = math.sqrt(n * p * (1 - p))
	if stddev == 0:
		stddev = 1

	rows = []
	for y in range(16):
		row = []
		for x in range(16):
			i = y * 16 + x
			value = histogram[i]
			offset = float(value - expected) / (stddev * 8)
			bright = min(max(int(offset * 128 + 127), 0), 255)
			bgcolor = "#%02x%02x%02x" % (bright, bright, bright)
			td = html.td("&nbsp;" * 5, bgcolor=bgcolor,
			             title="0x%02x: %i" % (i, value))
			row.append(td)

		rows.append(html.tr(*row))

	return expander("histogram-grid-%s" % id, html.table(*rows))
Esempio n. 6
0
def histogram_grid(post):
    histogram = post["analysis"]["histogram"]
    id = post["data"]["id"]

    n = len(post["analysis"]["data"])
    p = 1 / 256.0
    expected = n * p
    stddev = math.sqrt(n * p * (1 - p))
    if stddev == 0:
        stddev = 1

    rows = []
    for y in range(16):
        row = []
        for x in range(16):
            i = y * 16 + x
            value = histogram[i]
            offset = float(value - expected) / (stddev * 8)
            bright = min(max(int(offset * 128 + 127), 0), 255)
            bgcolor = "#%02x%02x%02x" % (bright, bright, bright)
            td = html.td("&nbsp;" * 5,
                         bgcolor=bgcolor,
                         title="0x%02x: %i" % (i, value))
            row.append(td)

        rows.append(html.tr(*row))

    return expander("histogram-grid-%s" % id, html.table(*rows))
Esempio n. 7
0
def get_refs(request, args):
    def get_ref(ref):
        commits = html.a("commits", href=html.absolute("commits", ref.name))
        tree = html.a("tree", href=html.absolute("tree", ref.name))
        return html.tr(html.td(ref.name), html.td(commits), html.td(tree))

    return html_page("Refs", html.div(html.table(*map(get_ref, repo.refs), **{"class": "list"})))
Esempio n. 8
0
def vert_arrangement(moves):
    html_text = ''
    for first, moves_group in moves.iteritems():
        moves_data = [[move] for move in moves_group]
        html_text += html.table(moves_data, algorithm_html,
                                MOVE_TABLE_ATTRIBUTES)
        html_text += '<BR>'
    return html_text
Esempio n. 9
0
def print_failed_html(data, aname, area, whitelist,area2='cloudhwname'):
    agg_data = aggregate.flat(data, area)
    table_data=[]
    for name,data in agg_data.items():
        for test in data:
            pass
    html_output = html.table(table_data)
    print html_output
Esempio n. 10
0
  def output(self):
    ret = {'values':{}}

    for h in self.hosts:
      v =  int(h.open('cpu_stress_output/%s' % h.name).read().strip())
      ret['values'][h.name] = v

    return html.section("CPU Stress (seconds)", html.table(append_ratios(ret)))
Esempio n. 11
0
def grid_arrangement(moves):
    html_text = ''
    moves_groups = [moves.values()]

    def vert(moves_group):
        moves_data = [[move] for move in moves_group]
        return html.table(moves_data, algorithm_html, MOVE_TABLE_ATTRIBUTES)

    html_text += html.table(moves_groups, vert, BORDERLESS_TABLE_ATTRIBUTES)
    return html_text
Esempio n. 12
0
def zeigeJahrgang(jahr):
    import database as database

    db = database.Database()
    Inhalt = db.query(
        "select p.vor, p.nach, t.Fachbereich, t.title, b.nr from pupil as p, book as b, type as t, ausleihe as a where a.pnr = p.nr and b.nr = a.bnr and t.nr = b.type and p.jahrgang = %s order by t.Fachbereich, p.nach"
        % str(jahr)
    )
    Liste = html.table("Vorname", "Nachname", "Fachbereich", "Buchtitel", "Buchnummer")
    for Row in Inhalt:
        exec "Liste.createLine" + str(tuple(Row))
    return Liste.rtn()
Esempio n. 13
0
def uebersicht():
    import html
    h=""
    t=html.table("Nr.","Kommentar","Datum","Wiederherstellen","L&ouml;schen")

    #Dateien abfragen
    import os
    f=os.listdir(pfad)
    f.sort()

    #Kommentare auslesen
    k=[]
    for fs in f:
        fl=file(pfad+fs, 'r')
        k.append(fl.readline()[1:])
        fl.close()

    #Tabelle mit Daten f&uuml;llen
    i=0
    for i in range(len(f)):
        line=[]
        #Nr
        nr=str(i+1)

        #Kommentar
        kom=k[i]

        #Datum
        from datetime import date
        datum=date.fromtimestamp(float(f[i][:-4])).strftime("%d.%m.%Y")

        #Adminrechte?
        import Cookie, os, database
        db=database.Database()
        c=Cookie.SimpleCookie()
        c.load(os.environ['HTTP_COOKIE'])
        bn=c['Benutzername'].value
        rights=db.query('SELECT Backend FROM benutzer WHERE Benutzername="'+bn+'"')[0][0]
        del(db)

        #Wiederherstellen / L&ouml;schen
        if rights==1:
            wbutton='''<a href="./init.py?mn=backup&act=wh&ts='''+f[i][:-4]+'''">Admin!</a>'''
            lbutton='''<a href="./init.py?mn=backup&act=del&ts='''+f[i][:-4]+'''">Admin!</a>'''
        else:
            wbutton="..."
            lbutton="..."

        t.createLine(nr,kom,datum,wbutton,lbutton)
    h+=t.rtn()
    import time
    h+=html.paragraph('''<div align="right"><a href="init.py?mn=backup&act=sp&ts='''+str(int(time.time()))+'''">Neues Backup erstellen...</a></div>''').rtn()
    return h
Esempio n. 14
0
    def tree(self):
        z = []
        z += h.table(id=self.id, class_='display', cellspacing='0', width='100%')
        if self.use_index_column:
            js_headers = json.dumps([{'title': item} for item in ([''] + self.headers)],
                    ensure_ascii=False, encoding='utf8')
            js_data = json.dumps([([''] + row) for row in self.data], ensure_ascii=False, encoding='utf8')
        else:
            js_headers = json.dumps([{'title': item} for item in self.headers],
                    ensure_ascii=False, encoding='utf8')
            js_data = json.dumps(self.data, ensure_ascii=False, encoding='utf8')
        js = '''\
$(document).ready(function() {
    var data = %(js_data)s;
    var headers = %(js_headers)s;
    var t = $('#%(id)s').DataTable({
        "data": data,
        "columns": headers,
        "columnDefs": [ {
            "searchable": false,
            "orderable": false,
            "targets": 0,
        } ],
        "order": [[ 1, 'asc' ]],
        "dom": 'lfTC<"clear">rtip',
        "tableTools": {
            "sSwfPath": "/static/datatables/extensions/TableTools/swf/copy_csv_xls_pdf.swf"
        },
        "lengthMenu": [[-1, 25], ["All", 25]],
        "autoWidth": false,
    });
    //new $.fn.dataTable.FixedHeader( t , {
    //    "offsetTop": 50
    //    });
    t.on( 'order.dt search.dt', function () {
        t.column(0, {search:'applied', order:'applied'}).nodes().each( function (cell, i) {
            cell.innerHTML = i+1;
        } );
    } ).draw();
} );
''' % {
    'js_data': js_data,
    'js_headers': js_headers,
    'id': self.id,
}
        z += h.script(js, type='text/javascript')
        return z
Esempio n. 15
0
def get_commits(request, args):
    ref_name = "/".join(args)
    rows = []
    for commit in repo.iter_commits(ref_name, max_count=config["displayed_commits"]):
        check = html.input(type="checkbox", name=commit.hexsha)
        rows.append(html.tr(html.td(check, " ", *commit_to_html(commit))))
    create_review = html.input(value="Create Review", type="submit")
    reset = html.input(value="Reset", type="reset")
    body = html.form(
        create_review,
        reset,
        html.hr(),
        html.table(*rows, **{"class": "list"}),
        method="post",
        action=html.absolute("review", "create"),
    )
    return html_page("Commits {}".format(ref_name), html.div(body))
Esempio n. 16
0
    def draw(self, data):
        print 'Content-type: text/html\n\n'

        ipaddr = socket.gethostbyname(socket.gethostname())
        baseref = 'http://%s/cgi-bin/admin.py' % ipaddr

        output = ''
        if data == "#ERROR":
            output += "<h3>Error: invalid login or password</h3>"
        loginform = """
        <form>
        <p><input type="text" name="login" value="" /></p>
        <p><input type="text" name="pw" value="" /></p>
        <p><input type="submit" value="Login" /></p>
        </form>
        """
        output += html.tr(loginform)
        print html.doc(html.table(output))
Esempio n. 17
0
def diff_to_html(diff):
    def parse_segment_header(header):
        m = re.match("^@@ -(\d+),(\d+) \+(\d+),(\d+) @@", header)
        return (int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)))

    def line_to_html(left, right, content, classes):
        left_td = (
            html.td() if left == 0 else html.td(str(left), **{"class": "h", "id": "{}L{}".format(diff.a_blob, left)})
        )
        right_td = (
            html.td() if right == 0 else html.td(str(right), **{"class": "h", "id": "{}R{}".format(diff.b_blob, right)})
        )
        return html.tr(left_td, right_td, html.td(content), **{"class": classes})

    def italize_control_char(line):
        return (html.span(line[0], **{"class": "h"}), line[1:])

    rows = []
    lines = diff.diff.split("\n")
    moved_from = lines.pop(0)
    rows.append(line_to_html(0, 0, moved_from, "h r"))
    moved_to = lines.pop(0)
    rows.append(line_to_html(0, 0, moved_to, "h a"))
    for line in lines:
        if len(line) == 0:
            continue
        elif line[0] == "+":
            rows.append(line_to_html(0, right_line, italize_control_char(line), "c a"))
            right_line += 1
        elif line[0] == "-":
            rows.append(line_to_html(left_line, 0, italize_control_char(line), "c r"))
            left_line += 1
        elif line[0] == "@":
            (left_line, _, right_line, _) = parse_segment_header(line)
            rows.append(line_to_html(0, 0, line, "h"))
            continue
        elif line[0] == " ":
            rows.append(line_to_html(left_line, right_line, line, "c"))
            right_line += 1
            left_line += 1
        else:
            raise Exception()
    return html.div(html.table(*rows, **{"class": "diff"}))
Esempio n. 18
0
def do_dataset(dataset='ia21h2eaq', fileroot='observations', local='no'):
    '''
	Make html files for a single dataset

	110203	ksl	Added local swithch which controls where the
			real working directory is to make testing
			easier
	140307	ksl	Added information about scans and subarray observations
	'''

    record = per_list.read_ordered_list_one(fileroot, dataset)
    if len(record) == 0:
        return 'NOK: make_html failed becaouse could not find dataset %s' % dataset

    work_dir = per_list.set_path(
        record[0], 'no',
        local)  # This will be the Persist directory for the dataset
    fig_dir = work_dir + '/Figs/'  # This will be the directory where figures are stored

    html_filename = work_dir + dataset + '_persist.html'

    # page=markup.page()
    title = 'Persistence Removal Evaluation for dataset %s' % dataset
    page = html.begin(title)

    # page.init(title='Persistence Removal Evaluation for dataset %s' % dataset)
    # page.h1('Persistence Removal Evaluation for %s' % dataset)

    # page.p('''This page contains images for the evaluation of how well persistence has been removed from an image''')
    page = page + html.paragraph(
        '''This page contains images for the evaluation of how well persistence has been removed from an image'''
    )

    # Look for the history file for this dataset

    history_file = dataset + '.txt'

    if os.path.exists(work_dir + history_file):
        string = '''The history file for the processing of this dataset is '''
        string = string + html.link("here", href=history_file)
        page = page + html.paragraph(string)

        # read history simply returns all of the lines in the history file that begin with !
        # And so any processing of these lines still has to be done
        lines, table1, table2 = read_history(work_dir + history_file)
        for line in lines:
            page = page + html.paragraph(line)
        if len(table1) > 0:
            page = page + html.h2(
                'Earlier exposures that could affect this image')
            page = page + html.table(table1)
        if len(table2) > 0:
            page = page + html.h2(
                'External and total persistence for this image')
            string = '''External persistence is persistance from previous visits; internal persistence
			is persistence induced from exposures in this vist.  Total persistence includes both
			internal and external persistence.  . Generally, self-induced or internal persistence is  
			only important if the dithers larger than the psf have been used within the visit'''
            page = page + html.paragraph(string)
            page = page + html.table(table2)

    else:
        page = page + html.paragraph(
            ''' The history file for this dataset appears to be missing.  Check that the file has been processed'''
        )

    page = page + html.hline(size='3', width='100')

    string = '''The next 4-panel image shows the original flt image (upper left), the corrected flt image (upper right), 
	the persistence model (lower left) and the stimulus (lower right).  The stimulus is simply the image constructed
	maximum value in electrons of any of the images that went into the stimulus model'''

    # Look for the summary image

    xname = dataset + '_subtract.png'
    if os.path.exists(fig_dir + xname):
        # page.img(src='Figs/'+xname,width=600,height=600,alt="Thumbnails")
        page = page + html.image(
            image='Figs/' + xname, width=600, height=600, alt="Thumbnails")
    else:
        # page.p('''The summary image is missing''')
        page = page + html.paragraph('''The summary image is missing''')

    # page.hr(size='3',width='100%')
    page = page + html.hline(size='3', width='100')

    # Now include the evaluation images

    string = '''As a qualitative indicator of how well the persistence correction has worked, some of the regions with
	the highest predicted persistence have been examined. 
	The next two images give an indication of how well the persistence has been subtracted from the images.
	Both images have the original data in red and the persistence-subtracted data in blue.  The first image is
	a plot of flux vs the persisence model, the second is flux as a function of the stimulus. Ideally the blue 
	curves would all center around 0. The utility of these plots depends on how isolated the persistence peaks
	are from stars in the image. If these plots are empty, no good regions for evaluation persistence were found.'''

    page = page + html.paragraph(string)

    xname = dataset + '.sum1.png'
    if os.path.exists(fig_dir + xname):
        # page.img(src='Figs/'+xname,width=300,height=300,alt="Thumbnails")
        page = page + html.image(
            'Figs/' + xname, width=300, height=300, alt="Thumbnails")
    else:
        # page.p('''The first evaluation image showing the subtraction is missing''')
        page = page + '''The first evaluation image showing the subtraction is missing'''

    xname = dataset + '.sum2.png'
    if os.path.exists(fig_dir + xname):
        # page.img(src='Figs/'+xname,width=300,height=300,alt="Thumbnails")
        page = page + html.image(
            'Figs/' + xname, width=300, height=300, alt="Thumbnails")
    else:
        # page.p('''The second evaluation image showing the subtraction is missing''')
        page = page + html.paragraph(
            '''The second evaluation image showing the subtraction is missing'''
        )

    # page.hr(size='3',width='100%')
    page = page + html.hline(size=3, width=100)

    # Look for the peaks summary

    string = '''This figures indicates what regions were selected for evaluation. The two panels are
	identical except the regions selected are indicated in the lower panel. '''

    page = page + html.paragraph(string)

    xname = dataset + '_persist.peaks.png'
    if os.path.exists(fig_dir + xname):
        # page.img(src='Figs/'+xname,width=600,height=1000,alt="Thumbnails")
        page = page + html.image(
            'Figs/' + xname, width=900, height=900, alt="Thumbnails")
    else:
        # page.p('''The summary figure for peak identification is missing''')
        page = page + html.paragraph(
            '''The summary figure for peak identification is missing''')

    # Now find all of the individual peak files:

    searchstring = fig_dir + dataset + '.peak.*.1.png'
    print searchstring

    try:
        peaks_file = work_dir + dataset + '_persist.peaks.dat'
        p = open(peaks_file, 'r')
        lines = p.readlines()
        p.close
    except IOError:
        print 'Warning: %s not found' % peaks_file
        lines = []

    xlines = []
    for one in lines:
        one = one.strip()
        if one[0] != '#' and len(one) > 0:
            xlines.append(one)

    if len(xlines) > 0:
        string = '''The results for individual regions are shown below. The four panels are a subsection of the original flt file, the predicted persistence in that region, the persistence subtracted flt file, and a plot of pixel values as a function of predicted persistence in the region. Green points are the original values; yellow point are the corrected values. The red and blue lines show the mean values in the original and corrected and corrected images, respectively.'''
        page = page + html.paragraph(string)
        page = page + html.hline(size='3', width='100')

        for one in xlines:
            word = one.split()
            x = int(word[0])
            y = int(word[1])
            z = eval(word[2])
            zz = eval(word[3])
            # page.p('Persistence at x = %3d, y=%3d' %(x,y))
            page = page + html.paragraph(
                'Persistence at x = %3d, y=%3d is about %6.3f e/s compared to science image flux of %6.3f e/s'
                % (x, y, z, zz))
            xname = '%s.peak.%03d_%03d.1.png' % (dataset, x, y)
            if os.path.exists(fig_dir + xname):
                # page.img(src='Figs/'+xname,width=400,height=400,alt="Thumbnails")
                page = page + html.image(
                    'Figs/' + xname, width=400, height=400, alt="Thumbnails")
            else:
                # page.p('Figure %s not present' % (work_dir+xname))
                page = page + html.paragraph('Figure %s not present' %
                                             (work_dir + xname))
            # page.hr(size='3',width='100%')
            page = page + html.hline(size='3', width='100')
    else:
        string = '''Unfortunately, no good regions for evaluating persistence were found.'''
        page = page + html.paragraph(string)
        page = page + html.hline(size='3', width='100')

    page = page + html.end()

    # Open the html file with the appropriate permissions, and then write it
    g = per_list.open_file(html_filename)
    g.write('%s' % page)
    g.close()

    return 'OK: subtract_html: %s' % html_filename
Esempio n. 19
0
 def vert(moves_group):
     moves_data = [[move] for move in moves_group]
     return html.table(moves_data, algorithm_html, MOVE_TABLE_ATTRIBUTES)
Esempio n. 20
0
def prog_html(sum, records, filename):
    '''
	Make an html file for a specfic program id, having
	already read the summary file and ls file

	111003	ksl	Fixed small error in the title 
	'''

    # first get the information we would like to put in the table

    i = 0

    title = [[
        'Rootname', 'Visit.Line', 'Obs. Date', 'Obs. Time', 'Target',
        'Date_Proc', 'Status', 'Ext1', 'Ext2', 'Ext3', 'tot1', 'Tot2', 'Tot3'
    ]]
    lines = title
    title = [[
        '', '', 'YYYY-MM-DD', 'HH:MM:SS', '', 'YYYY-MM-DD', '', '%>0.1 e/s',
        '%>0.03 e/s', '%>0.01 e/s', '%>0.1 e/s', '%>0.03 e/s', '%>0.01 e/s'
    ]]
    lines = lines + title
    while i < len(sum):
        one_sum = sum[i]
        one_record = records[i]
        root = one_record[1]
        visit = one_record[3]
        obsdate = one_record[7]
        obstime = one_record[8]
        targ = one_record[14]
        xline = [root, visit, obsdate, obstime, targ]
        xline = xline + [one_sum[3]] + one_sum[5:12]
        lines.append(xline)
        i = i + 1

    page = html.begin('Summary Page: Persistence in  program %s with PI %s' %
                      (records[0][2], records[0][16]))

    string = '''The table belows gives and indication of whether persistence is likely to be a problem in any of the datasets
	obtained to date in this program.  The columns of the table are as follows:'''

    page = page + html.paragraph(string)

    comment = []
    comment.append('Rootname: Rootname of the specific exposure')
    comment.append(
        'Visit.Line: Visit and line number in phase II proposal for this exposure'
    )
    comment.append('Obs. Date: Observation date for this exposure')
    comment.append('Obs. Time: Start time for this exposure')
    comment.append('Target: Target of this exposure')
    comment.append(
        'Date Proc: Date on which persistence processing was carried out')
    comment.append(
        'Status: Status message about the persistence processing.  Normally Complete with version number'
    )
    comment.append(
        'Ext1: Percentage of pixels which external persistence is estimated to be greater than 0.1 e/s'
    )
    comment.append(
        'Ext2: Percentage of pixels which external persistence is estimated to be greater than 0.03 e/s'
    )
    comment.append(
        'Ext3: Percentage of pixels which external persistence is estimated to be greater than 0.01 e/s'
    )
    comment.append(
        'Tot1: Percentage of pixels which total persistence is estimated to be greater than 0.1 e/s'
    )
    comment.append(
        'Tot2: Percentage of pixels which total persistence is estimated to be greater than 0.03 e/s'
    )
    comment.append(
        'Tot3: Percentage of pixels which total persistence is estimated to be greater than 0.01 e/s'
    )

    page = page + html.add_list(comment)

    string = '''Note: External persistence refers to persistence caused by visits that preceed this dataset. This is usually the type of
	persistence that causes the most trouble in terms of data analysis since it can appear anywhere in the image.  Internal persistence, in our terminolgy is
	persistence that is due to earlier exposures in the same visit.  This is usually, though not always, less of a worry than external
	persistence because unless the patterns and dithers that were used in the visit will be smalll, and the persistence will mainly
	occur near bright objects.  If one has used large dithers or has multiple points within a single visit, one needs to take that into account
	when evaluating problems associated with persistence.'''

    page = page + html.paragraph(string)
    page = page + html.table(lines)
    page = page + html.end()

    g = open(filename, 'w')
    g.write(page)
    g.close()
Esempio n. 21
0
def worst(lines, records, nmax=500):
    '''
	Find the datasets with the most persitence and write information regarding these
	to the file subtract_eval_worst.txt.  

	The selection is made on the fraction of pixels with persistence EXT2.

	111110	ksl	Modified to eliminate certain programs that are ringers, like ksl's persistence testing programs
	111116  ksl	Moved censoring to the main routine, and changed inputs to reflect this
	'''

    xlines = lines

    xx = []
    for one in xlines:
        xx.append(eval(one[7]))
    xx = numpy.array(xx)

    order = numpy.argsort(xx)

    n = 0
    i = len(order) - 1
    g = open('subtract_eval_worst.txt', 'w')
    table = []
    while i >= 0:
        # line=per_list.read_ordered_list_one(dataset=xlines[order[i]][0])
        record = records[i]
        # print record
        string1 = '%50s %10s %8s %8s %10s %5s %10s %20s %20s ' % (
            record[0], record[1], record[2], record[3], record[9], record[10],
            record[11], record[14], record[16])
        one = xlines[order[i]]
        # print records[order[i]]
        string2 = '%8s %8s %8s %8s %8s %8s %30s' % (
            one[6], one[7], one[8], one[9], one[10], one[11], one[12])
        string = string1 + string2
        # print string
        g.write('%s\n' % string)
        n = n + 1
        if n > nmax:
            break
        tline = []
        tline.append(html.link(record[1], one[12]))
        tline.append(record[2])
        tline.append(record[3])
        tline.append(record[9])
        tline.append(record[10])
        tline.append(record[11])
        tline.append(record[14])
        tline.append(record[16])
        tline.append(one[6])
        tline.append(one[7])
        tline.append(one[8])
        tline.append(one[9])
        tline.append(one[10])
        tline.append(one[11])
        table.append(tline)

        i = i - 1
    g.close()

    string = html.begin('Worst Affected by Persistence')
    string = string + html.table(table)
    string = string + html.end()

    g = open('Worst.html', 'w')
    g.write(string)
    g.close()

    return
Esempio n. 22
0
def turns_html(move):
    return html.table([move], turn_html, TURN_TABLE_ATTRIBUTES)
Esempio n. 23
0
  def output(self):
    ret = ''
    bandwidth = [ ','.join( ['Time'] + [('b%d' % i) for i in xrange(1,10+1)] )]
    tcpwindow = [ ','.join( ['Time'] + [('W%d' % i) for i in xrange(1,10+1)] )]
    bs = {}
    tcp= {}
    losses = {}

    for t in xrange(0, self.t):
      bs[t]=["%d"%t]
      tcp[t]=["%d"%t]

    for h in self.hosts[1:]:
      # time series data from iperf, tcpstats.py
      lines = h.open('iperf_output/%s-%s' %(h.name, self.hosts[0].name)).readlines()
      t=0
      for l in lines:
        val = int(l.split(',')[-1])*1.0/(2**20)
        bs[t].append("%.2f" % val)
        t += 1
        if t == self.t:
          break
      
      lines = map(lambda x: x.strip(), h.open('tcpstats.csv').readlines())[0:t]
      t=0
      for l in lines[1:]:
        val = int(l.split(',')[1])
        tcp[t].append("%d" % val)
        t += 1
        if t == self.t:
          break

      # get loss packets
      losses[h.id]={}
      lines = map(lambda x: x.strip().split(' '), 
        shell_output('vzctl exec %d cat /proc/net/netstat' % h.id).split('\n')[0:2])
        #h.open('proc/net/netstat').readlines()[0:2])
        # BUG!! theres a discrepancy between the values
        # read from /proc/net/netstat from inside the container
        # and /mountpoint/proc/net/netstat! 
        # Damn :p

      for k,v in zip(lines[0], lines[1]):
        if k in ['TCPLoss', 'TCPTimeouts']:
          losses[h.id][k]=v
      losses[h.id]['debug']=zip(lines[0],lines[1])
      
    # put values together
    for t in xrange(0,self.t):
      bandwidth.append(','.join(bs[t]))
      tcpwindow.append(','.join(tcp[t]))

    ret += html.csv(bandwidth, 
      "Bandwidth per host", 
      ["valueRange:[0,15]"])

    ret += html.csv(tcpwindow, 
      "TCP Window size hosts",
      ["rollPeriod:1", "showRoller:true"])

    # this is related to the netstat problem
    ret += html.join([html.tag("h3", "TCP stats"), html.table(losses)])
    return ret
Esempio n. 24
0
    for Row in Inhalt:
        exec "Liste.createLine" + str(tuple(Row))
    return Liste.rtn()


def zeige_buch(pupilnummer):
    import html, ausleihe, book

    a = ausleihe.Ausleihe()
    b = book.Book()
    htm = ""

    try:
        bks = a.pupil_got(pupilnummer)
    except Exception, e:
        htm += html.paragraph(
            '<div style="background-color:red">Fehler bei der Abfrage der ausgeliehenen B&uuml;cher!</div>'
        ).rtn()
        bks = ()
    if bks == False:
        htm += html.paragraph(
            '<div style="background-color:green">Sch&uuml;ler hat kein Buch mehr ausgeliehen! :)</div>'
        ).rtn()
    else:
        t = html.table("Fachbereich", "Titel", "Buchnummer")
        for book in bks:
            info = b.info(book)
            t.createLine(info[3], info[2], str(book))
        htm += t.rtn()
    return htm