Beispiel #1
0
    def test(self) :
        self.assertEquals(breaks("hello world"),"\nhello world\n")
        self.assertEquals(p('hello world'),"<p>hello world</p>")
        self.assertEquals(div('hello world'),"<div>hello world</div>")
        self.assertEquals(div({'class':'myclass','id':'myd'},'hello world'),
                              """<div class="myclass" id="myd">hello world</div>""")

        self.assertEquals(div('a','b'),'<div>ab</div>')

        self.assertEquals(p(),'<p/>')

        self.assertEquals(html(
            head(),
            body(
                h2("Header"),
                p('para1'),
                p('para2')
                )),
            """<html><head/><body><h2>Header</h2><p>para1</p><p>para2</p></body></html>""")
Beispiel #2
0
def do_dataset(dataset='ia21h2eaq', fileroot='observations', local='no'):
    '''
	Make html files for a single dataset

	110203	ksl	Added local swithch which controls where the
			real working directory is to make testing
			easier
	140307	ksl	Added information about scans and subarray observations
	'''

    record = per_list.read_ordered_list_one(fileroot, dataset)
    if len(record) == 0:
        return 'NOK: make_html failed becaouse could not find dataset %s' % dataset

    work_dir = per_list.set_path(
        record[0], 'no',
        local)  # This will be the Persist directory for the dataset
    fig_dir = work_dir + '/Figs/'  # This will be the directory where figures are stored

    html_filename = work_dir + dataset + '_persist.html'

    # page=markup.page()
    title = 'Persistence Removal Evaluation for dataset %s' % dataset
    page = html.begin(title)

    # page.init(title='Persistence Removal Evaluation for dataset %s' % dataset)
    # page.h1('Persistence Removal Evaluation for %s' % dataset)

    # page.p('''This page contains images for the evaluation of how well persistence has been removed from an image''')
    page = page + html.paragraph(
        '''This page contains images for the evaluation of how well persistence has been removed from an image'''
    )

    # Look for the history file for this dataset

    history_file = dataset + '.txt'

    if os.path.exists(work_dir + history_file):
        string = '''The history file for the processing of this dataset is '''
        string = string + html.link("here", href=history_file)
        page = page + html.paragraph(string)

        # read history simply returns all of the lines in the history file that begin with !
        # And so any processing of these lines still has to be done
        lines, table1, table2 = read_history(work_dir + history_file)
        for line in lines:
            page = page + html.paragraph(line)
        if len(table1) > 0:
            page = page + html.h2(
                'Earlier exposures that could affect this image')
            page = page + html.table(table1)
        if len(table2) > 0:
            page = page + html.h2(
                'External and total persistence for this image')
            string = '''External persistence is persistance from previous visits; internal persistence
			is persistence induced from exposures in this vist.  Total persistence includes both
			internal and external persistence.  . Generally, self-induced or internal persistence is  
			only important if the dithers larger than the psf have been used within the visit'''
            page = page + html.paragraph(string)
            page = page + html.table(table2)

    else:
        page = page + html.paragraph(
            ''' The history file for this dataset appears to be missing.  Check that the file has been processed'''
        )

    page = page + html.hline(size='3', width='100')

    string = '''The next 4-panel image shows the original flt image (upper left), the corrected flt image (upper right), 
	the persistence model (lower left) and the stimulus (lower right).  The stimulus is simply the image constructed
	maximum value in electrons of any of the images that went into the stimulus model'''

    # Look for the summary image

    xname = dataset + '_subtract.png'
    if os.path.exists(fig_dir + xname):
        # page.img(src='Figs/'+xname,width=600,height=600,alt="Thumbnails")
        page = page + html.image(
            image='Figs/' + xname, width=600, height=600, alt="Thumbnails")
    else:
        # page.p('''The summary image is missing''')
        page = page + html.paragraph('''The summary image is missing''')

    # page.hr(size='3',width='100%')
    page = page + html.hline(size='3', width='100')

    # Now include the evaluation images

    string = '''As a qualitative indicator of how well the persistence correction has worked, some of the regions with
	the highest predicted persistence have been examined. 
	The next two images give an indication of how well the persistence has been subtracted from the images.
	Both images have the original data in red and the persistence-subtracted data in blue.  The first image is
	a plot of flux vs the persisence model, the second is flux as a function of the stimulus. Ideally the blue 
	curves would all center around 0. The utility of these plots depends on how isolated the persistence peaks
	are from stars in the image. If these plots are empty, no good regions for evaluation persistence were found.'''

    page = page + html.paragraph(string)

    xname = dataset + '.sum1.png'
    if os.path.exists(fig_dir + xname):
        # page.img(src='Figs/'+xname,width=300,height=300,alt="Thumbnails")
        page = page + html.image(
            'Figs/' + xname, width=300, height=300, alt="Thumbnails")
    else:
        # page.p('''The first evaluation image showing the subtraction is missing''')
        page = page + '''The first evaluation image showing the subtraction is missing'''

    xname = dataset + '.sum2.png'
    if os.path.exists(fig_dir + xname):
        # page.img(src='Figs/'+xname,width=300,height=300,alt="Thumbnails")
        page = page + html.image(
            'Figs/' + xname, width=300, height=300, alt="Thumbnails")
    else:
        # page.p('''The second evaluation image showing the subtraction is missing''')
        page = page + html.paragraph(
            '''The second evaluation image showing the subtraction is missing'''
        )

    # page.hr(size='3',width='100%')
    page = page + html.hline(size=3, width=100)

    # Look for the peaks summary

    string = '''This figures indicates what regions were selected for evaluation. The two panels are
	identical except the regions selected are indicated in the lower panel. '''

    page = page + html.paragraph(string)

    xname = dataset + '_persist.peaks.png'
    if os.path.exists(fig_dir + xname):
        # page.img(src='Figs/'+xname,width=600,height=1000,alt="Thumbnails")
        page = page + html.image(
            'Figs/' + xname, width=900, height=900, alt="Thumbnails")
    else:
        # page.p('''The summary figure for peak identification is missing''')
        page = page + html.paragraph(
            '''The summary figure for peak identification is missing''')

    # Now find all of the individual peak files:

    searchstring = fig_dir + dataset + '.peak.*.1.png'
    print searchstring

    try:
        peaks_file = work_dir + dataset + '_persist.peaks.dat'
        p = open(peaks_file, 'r')
        lines = p.readlines()
        p.close
    except IOError:
        print 'Warning: %s not found' % peaks_file
        lines = []

    xlines = []
    for one in lines:
        one = one.strip()
        if one[0] != '#' and len(one) > 0:
            xlines.append(one)

    if len(xlines) > 0:
        string = '''The results for individual regions are shown below. The four panels are a subsection of the original flt file, the predicted persistence in that region, the persistence subtracted flt file, and a plot of pixel values as a function of predicted persistence in the region. Green points are the original values; yellow point are the corrected values. The red and blue lines show the mean values in the original and corrected and corrected images, respectively.'''
        page = page + html.paragraph(string)
        page = page + html.hline(size='3', width='100')

        for one in xlines:
            word = one.split()
            x = int(word[0])
            y = int(word[1])
            z = eval(word[2])
            zz = eval(word[3])
            # page.p('Persistence at x = %3d, y=%3d' %(x,y))
            page = page + html.paragraph(
                'Persistence at x = %3d, y=%3d is about %6.3f e/s compared to science image flux of %6.3f e/s'
                % (x, y, z, zz))
            xname = '%s.peak.%03d_%03d.1.png' % (dataset, x, y)
            if os.path.exists(fig_dir + xname):
                # page.img(src='Figs/'+xname,width=400,height=400,alt="Thumbnails")
                page = page + html.image(
                    'Figs/' + xname, width=400, height=400, alt="Thumbnails")
            else:
                # page.p('Figure %s not present' % (work_dir+xname))
                page = page + html.paragraph('Figure %s not present' %
                                             (work_dir + xname))
            # page.hr(size='3',width='100%')
            page = page + html.hline(size='3', width='100')
    else:
        string = '''Unfortunately, no good regions for evaluating persistence were found.'''
        page = page + html.paragraph(string)
        page = page + html.hline(size='3', width='100')

    page = page + html.end()

    # Open the html file with the appropriate permissions, and then write it
    g = per_list.open_file(html_filename)
    g.write('%s' % page)
    g.close()

    return 'OK: subtract_html: %s' % html_filename
Beispiel #3
0
def main_page(state):
    global page

    startPage("Highly Experimental RIF Demonstration Page")	
    page << h.h2("Highly Experimental RIF Demonstration Page")
    page << h.p("This page currently only does translations between RIF XML and RIF PS, but the idea is to have various non-RIF languages supported as well")

    #for k in state.keys():
    #    page << h.p(`k`, '=', `state[k]`)

    form = h.form(method="GET", class_="f")	
    
    form << h.h3("Step 1: Select Input Processor") 
    select_input_processor(form, state)

    form << h.h3("Step 2: Provide Input") 
    select_input(form, state)

    form << h.h3("Step 3: (Optional) Select transform or analysis plugins") 
    select_middle(form, state)
    
    analysis_div = h.div()
    page << analysis_div

    form << h.h3("Step 4: Select Output Processor") 
    select_output_processor(form, state)

    form << h.h3("Step 5: Begin Processing") 

    form << h.br()

    output_div = h.div()
    output_done = run(output_div, state, analysis_div)
    page << form
    page << output_div

    if output_done:
        form <<  h.input(type="submit",  name="action", 
                         value="Update Output Below")
    else:
        form <<  h.input(type="submit",  name="action", 
                         value="Generate Output Below")

    #form <<  h.Raw("&nbsp;")
    #form <<  h.Raw("&nbsp;")
    #form <<  h.Raw("&nbsp;")
    #form <<  h.Raw("&nbsp;")
    #form <<  h.Raw("&nbsp;")
    #form <<  h.input(type="submit",  name="action", value="Generate Output on New Page")



    if 0:
        page << h.h3('Translates to...')

        input = input.replace("\r\n", "\n")
        action=args.getfirst("action") 
        if action:
            (notes, output) = translate(input, action)
        else:
            notes = "select a processing option"
            output = ""

        if notes:
            page << h.h4('Processor Message:')
            page << h.pre(notes, style="padding:0.5em; border: 2px solid red;")


        if output:
            page << h.pre(output, style="padding:0.5em; border: 2px solid black;")
        else:
            page << h.p("-- No Output --")

    page << h.hr()

    page << h.p("This page/software was developed by [email protected].   It's too buggy right now to use.   Please don't even bother to report bugs.")

    print page
Beispiel #4
0
def make_pages(dbx, dirname):
    """ Nageneruj stranky a obrazky do adresare dirname """
    def add_stat_to_group(groups, groupname, statid):
        try:
            groups[groupname].append(statid)
        except KeyError:
            groups[groupname] = [statid]

    def stat_min_date(stat):
        ''' vrat nejmensi datum v datove rade statistiky stat = [ (datum, hodnota), (datum, hodnota) ...] '''
        return min(func.lmap(lambda x: x[0], stat)) if stat else None

    def stat_max_date(stat):
        ''' obdobne vrat nejvetsi datum '''
        return max(func.lmap(lambda x: x[0], stat)) if stat else None

    # priprava adresare
    try:
        shutil.rmtree(dirname)
    except:
        pass
    try:
        func.makedir(dirname)
    except:
        pass
    try:
        func.makedir(dirname + "/img")
    except:
        pass

    s = func.clsMyStat(dbx, '')
    stats = s.getAllStats()

    i, statnames, statnames_index, groups = 0, {}, {}, {}

    # vytvor seznam vsech generovanych grafu:
    mixed_graphs = {}

    # pridej automaticky vytvareny seznam nejvice tweetujicich uzivatelu
    best_twitters = {}
    for stat in stats:
        if re.search(r'TWITTER_(.+?)_TWEETS', stat):
            mystat = Stat(stat, get_stat_for_graph(dbx, stat))
            best_twitters[stat] = mystat.max()
    sorted_twitters = sorted(best_twitters.items(),
                             key=operator.itemgetter(1))[-7:]
    stat_id = 'BEST_TWITTERS'
    mixed_graphs[stat_id] = [x[0] for x in sorted_twitters]
    add_stat_to_group(groups, 'Porovnání', stat_id)

    # 1) nacti ty z konfigurace, preved na hashtabulku
    for line in func.getconfig('config/graphs'):
        lineparts = func.lmap(str.strip, line.split(' '))
        mixed_graphs[lineparts[0]] = lineparts[1:]
        statnames[lineparts[0]] = lineparts[0]
        add_stat_to_group(groups, 'Porovnání', lineparts[0])

    # 2) pridej automaticky vytvarene twitter kombinovane grafy
    # TWEETS, FOLLOWERS a LIKES
    for stat in stats:
        found = re.search(r'TWITTER_(.+?)_TWEETS', stat)
        if found:
            statid = "TWITTER_%s" % found.group(1)
            mixed_graphs[statid] = [
                stat,
                "TWITTER_%s_FOLLOWERS" % found.group(1),
                "TWITTER_%s_LIKES" % found.group(1)
            ]
            statnames[statid] = "Twitter %s" % found.group(1)  # default jmeno
            statnames_index[statid] = "%s" % found.group(
                1)  # default jmeno na titulni stranku
            add_stat_to_group(groups, 'Twitteři', statid)

    # 3) pridej vsechny ostatni statistiky, vynechej TWITTERY
    # vytvor ponekud nesystemove defaultni nazvy
    for stat in stats:
        if not re.search(r'TWITTER_(.+)', stat):
            mixed_graphs[stat] = [stat]
            found = re.search(r'BALANCE_(.+)', stat)
            if found:
                statnames[stat] = "Zůstatek %s" % found.group(1)
                add_stat_to_group(groups, 'Finance', stat)
                continue
            found = re.search(r'PI_MEMBERS_(.+)', stat)
            if found:
                statnames[stat] = "Počet členů %s" % found.group(1)
                add_stat_to_group(groups, 'Členové', stat)
                continue
            found = re.search(r'YOUTUBE_(.+)', stat)
            if found:
                statnames[stat] = "Youtube %s" % found.group(1)
                add_stat_to_group(groups, 'Youtube', stat)
                continue
            found = re.search(r'PP_(.+)', stat)
            if found:
                add_stat_to_group(groups, 'Finanční tým', stat)
                continue
            found = re.search(r'REDMINE_(.+)', stat)
            if found:
                add_stat_to_group(groups, 'Odbory a složky strany na Redmine',
                                  stat)
                continue
            add_stat_to_group(groups, 'Ostatní', stat)

    # donacti jmena statistik z konfigurace
    for line in func.getconfig('config/statnames'):
        try:
            (a, b) = line.split('\t', 2)
            statnames[a] = b
        except ValueError:
            pass

    # titulni stranka & assets
    mybody = ""
    for groupname in groups:
        paragraph = []
        for statid in groups[groupname]:
            if statid in statnames_index.keys():
                statname = statnames_index[statid]
            elif statid in statnames.keys():
                statname = statnames[statid]
            else:
                statname = statid
            paragraph.append(html.a("%s.delta.htm" % statid, statname))
        paragraph.sort()
        mybody += html.h2(groupname) + html.p(",\n".join(paragraph))

    page = func.replace_all(
        func.readfile('templates/index.htm'), {
            '%body%': mybody,
            '%stat_date%': '{0:%d.%m.%Y %H:%M:%S}'.format(
                datetime.datetime.now())
        })
    func.writefile(page, "%s/index.htm" % dirname)
    shutil.copytree('templates/assets', "%s/assets" % dirname)

    # Vytvor vsechny kombinovane grafy, vynech statistiky s nejvyse jednou hodnotou
    for statid in mixed_graphs:

        if arg('s') and statid != arg('s'):
            continue

        i += 1

        # graf
        involved_stats, involved_deltas = {}, {}
        statInstances = []
        for invstat in mixed_graphs[statid]:
            tmpstat = get_stat_for_graph(dbx, invstat)
            involved_stats[invstat] = tmpstat
            statInstances.append(Stat(invstat, involved_stats[invstat]))

            # spocitej delta statistiku
            deltastat, lastvalue = [], None
            for entry in tmpstat:
                deltastat.append([
                    entry[0], 0 if lastvalue is None else entry[1] - lastvalue
                ])
                lastvalue = entry[1]
            involved_deltas[invstat] = deltastat

        singlestat = (len(involved_stats.values()) == 1)

        if max(func.lmap(len, involved_stats.values(
        ))) > 0:  # involved_stats musi obsahovat aspon 1 radu o >=1 hodnotach

            print("[%s/%s]: Creating %s                       \r" %
                  (i, len(mixed_graphs), statid),
                  end='\r')

            # zakladni a delta graf
            make_graph(involved_stats,
                       "%s/img/%s.png" % (dirname, statid),
                       delta=False)
            make_graph(involved_deltas,
                       "%s/img/%s.delta.png" % (dirname, statid),
                       delta=True)

            # metody ziskani dat
            method_list = ""
            for stat in involved_stats:
                try:
                    desc = involved_stats[stat][-1:][0][2]
                except IndexError:
                    desc = "Neznámá metoda"
                method_list += "%s: %s<br>" % (stat, desc)

            # html stranka
            statname = statnames[statid] if statid in statnames.keys(
            ) else statid
            min_date = min(
                func.lmap(stat_min_date,
                          filter(lambda x: x,
                                 involved_stats.values())))  # rozsah dat
            max_date = max(
                func.lmap(stat_max_date,
                          filter(lambda x: x, involved_stats.values())))
            bottom_links = html.h2("Metody získání dat") + \
                html.p("Vypsána je vždy poslední použitá metoda, úplný seznam je v CSV souboru." + html.br()*2 + method_list) + \
                ((html.a("%s.csv" % statid, "Zdrojová data ve formátu CSV") + html.br()) if singlestat else "") + \
                html.a("index.htm", "Všechny metriky")

            try:
                min_value = str(min(map(lambda x: x.min(), statInstances)))
            except TypeError:
                min_value = '-'
            try:
                max_value = str(max(map(lambda x: x.max(), statInstances)))
            except TypeError:
                max_value = '-'

            common_replaces = {
                '%stat_name%':
                statname,
                '%stat_desc%':
                '',
                '%stat_id%':
                statid,
                '%stat_date%':
                '{0:%d.%m.%Y %H:%M:%S}'.format(datetime.datetime.now()),
                '%bottomlinks%':
                bottom_links,
                '%daterange%':
                '%s - %s' % (min_date, max_date),
                '%max%':
                max_value,
                '%min%':
                min_value
            }

            page = func.replace_all(
                func.readfile('templates/stat.htm'),
                merge_dicts(
                    common_replaces, {
                        '%stat_image%': "img/%s.png" % statid,
                        '%stat_type%': "Absolutní hodnoty"
                    }))
            func.writefile(page, "%s/%s.htm" % (dirname, statid))
            page = func.replace_all(
                func.readfile('templates/stat.htm'),
                merge_dicts(
                    common_replaces, {
                        '%stat_image%': "img/%s.delta.png" % statid,
                        '%stat_type%': "Denní přírůstky (delta)"
                    }))
            func.writefile(page, "%s/%s.delta.htm" % (dirname, statid))

            # vytvor CSV soubor se zdrojovymi daty
            if singlestat:
                csv_rows = [
                    "%s;%s;%s;%s;" %
                    (statid, "{:%d.%m.%Y}".format(x[0]), x[1], x[2])
                    for x in list(involved_stats.values())[0]
                ]
                func.writefile(
                    "stat_id;date;value;method;\n" + "\n".join(csv_rows),
                    "%s/%s.csv" % (dirname, statid))