示例#1
0
文件: webapp.py 项目: sandhawke/riftr
def select_input(div, args):

    input_location=args.getfirst("input_location") or ""
    div << h.p('(Method 1) Web Address of Input:',h.br(),
               h.input(type="text", name="input_location",
                       size="80",
                       value=input_location))
    
    input_text=args.getvalue("input_text", "")
    div << h.p(('(Method 2) Input Text:'), h.br(),
               h.textarea(input_text,
                          cols="90", rows="10", name="input_text"))
示例#2
0
文件: webapp.py 项目: sandhawke/riftr
def select_processor(div, state, method, field_name):

    for p in plugin.registry:
        if hasattr(p, method):
            desc = []
            desc.append(p.__doc__)
            if hasattr(p, 'spec'):
                desc.append(h.span('  (See ', h.a('language specification', 
                                                href=p.spec), ")"))

            if cgi_args.getfirst(field_name) == p.id:
                button = h.input(type="radio",
                            name=field_name,
                            checked='YES',
                            value=p.id)
            else:
                button = h.input(type="radio",
                            name=field_name,
                            value=p.id)

            examples = h.span()
            if getattr(p, 'examples', []):
                examples << h.br()
                examples << "Load example input: "
                for (name, text) in p.examples:
                    examples << h.input(type="submit", name="load_example", 
                                        value=name)

            div << h.p(button, desc, examples)
示例#3
0
文件: webapp.py 项目: sandhawke/riftr
def main_page(state):
    global page

    startPage("Highly Experimental RIF Demonstration Page")	
    page << h.h2("Highly Experimental RIF Demonstration Page")
    page << h.p("This page currently only does translations between RIF XML and RIF PS, but the idea is to have various non-RIF languages supported as well")

    #for k in state.keys():
    #    page << h.p(`k`, '=', `state[k]`)

    form = h.form(method="GET", class_="f")	
    
    form << h.h3("Step 1: Select Input Processor") 
    select_input_processor(form, state)

    form << h.h3("Step 2: Provide Input") 
    select_input(form, state)

    form << h.h3("Step 3: (Optional) Select transform or analysis plugins") 
    select_middle(form, state)
    
    analysis_div = h.div()
    page << analysis_div

    form << h.h3("Step 4: Select Output Processor") 
    select_output_processor(form, state)

    form << h.h3("Step 5: Begin Processing") 

    form << h.br()

    output_div = h.div()
    output_done = run(output_div, state, analysis_div)
    page << form
    page << output_div

    if output_done:
        form <<  h.input(type="submit",  name="action", 
                         value="Update Output Below")
    else:
        form <<  h.input(type="submit",  name="action", 
                         value="Generate Output Below")

    #form <<  h.Raw("&nbsp;")
    #form <<  h.Raw("&nbsp;")
    #form <<  h.Raw("&nbsp;")
    #form <<  h.Raw("&nbsp;")
    #form <<  h.Raw("&nbsp;")
    #form <<  h.input(type="submit",  name="action", value="Generate Output on New Page")



    if 0:
        page << h.h3('Translates to...')

        input = input.replace("\r\n", "\n")
        action=args.getfirst("action") 
        if action:
            (notes, output) = translate(input, action)
        else:
            notes = "select a processing option"
            output = ""

        if notes:
            page << h.h4('Processor Message:')
            page << h.pre(notes, style="padding:0.5em; border: 2px solid red;")


        if output:
            page << h.pre(output, style="padding:0.5em; border: 2px solid black;")
        else:
            page << h.p("-- No Output --")

    page << h.hr()

    page << h.p("This page/software was developed by [email protected].   It's too buggy right now to use.   Please don't even bother to report bugs.")

    print page
示例#4
0
def make_pages(dbx, dirname):
    """ Nageneruj stranky a obrazky do adresare dirname """
    def add_stat_to_group(groups, groupname, statid):
        try:
            groups[groupname].append(statid)
        except KeyError:
            groups[groupname] = [statid]

    def stat_min_date(stat):
        ''' vrat nejmensi datum v datove rade statistiky stat = [ (datum, hodnota), (datum, hodnota) ...] '''
        return min(func.lmap(lambda x: x[0], stat)) if stat else None

    def stat_max_date(stat):
        ''' obdobne vrat nejvetsi datum '''
        return max(func.lmap(lambda x: x[0], stat)) if stat else None

    # priprava adresare
    try:
        shutil.rmtree(dirname)
    except:
        pass
    try:
        func.makedir(dirname)
    except:
        pass
    try:
        func.makedir(dirname + "/img")
    except:
        pass

    s = func.clsMyStat(dbx, '')
    stats = s.getAllStats()

    i, statnames, statnames_index, groups = 0, {}, {}, {}

    # vytvor seznam vsech generovanych grafu:
    mixed_graphs = {}

    # pridej automaticky vytvareny seznam nejvice tweetujicich uzivatelu
    best_twitters = {}
    for stat in stats:
        if re.search(r'TWITTER_(.+?)_TWEETS', stat):
            mystat = Stat(stat, get_stat_for_graph(dbx, stat))
            best_twitters[stat] = mystat.max()
    sorted_twitters = sorted(best_twitters.items(),
                             key=operator.itemgetter(1))[-7:]
    stat_id = 'BEST_TWITTERS'
    mixed_graphs[stat_id] = [x[0] for x in sorted_twitters]
    add_stat_to_group(groups, 'Porovnání', stat_id)

    # 1) nacti ty z konfigurace, preved na hashtabulku
    for line in func.getconfig('config/graphs'):
        lineparts = func.lmap(str.strip, line.split(' '))
        mixed_graphs[lineparts[0]] = lineparts[1:]
        statnames[lineparts[0]] = lineparts[0]
        add_stat_to_group(groups, 'Porovnání', lineparts[0])

    # 2) pridej automaticky vytvarene twitter kombinovane grafy
    # TWEETS, FOLLOWERS a LIKES
    for stat in stats:
        found = re.search(r'TWITTER_(.+?)_TWEETS', stat)
        if found:
            statid = "TWITTER_%s" % found.group(1)
            mixed_graphs[statid] = [
                stat,
                "TWITTER_%s_FOLLOWERS" % found.group(1),
                "TWITTER_%s_LIKES" % found.group(1)
            ]
            statnames[statid] = "Twitter %s" % found.group(1)  # default jmeno
            statnames_index[statid] = "%s" % found.group(
                1)  # default jmeno na titulni stranku
            add_stat_to_group(groups, 'Twitteři', statid)

    # 3) pridej vsechny ostatni statistiky, vynechej TWITTERY
    # vytvor ponekud nesystemove defaultni nazvy
    for stat in stats:
        if not re.search(r'TWITTER_(.+)', stat):
            mixed_graphs[stat] = [stat]
            found = re.search(r'BALANCE_(.+)', stat)
            if found:
                statnames[stat] = "Zůstatek %s" % found.group(1)
                add_stat_to_group(groups, 'Finance', stat)
                continue
            found = re.search(r'PI_MEMBERS_(.+)', stat)
            if found:
                statnames[stat] = "Počet členů %s" % found.group(1)
                add_stat_to_group(groups, 'Členové', stat)
                continue
            found = re.search(r'YOUTUBE_(.+)', stat)
            if found:
                statnames[stat] = "Youtube %s" % found.group(1)
                add_stat_to_group(groups, 'Youtube', stat)
                continue
            found = re.search(r'PP_(.+)', stat)
            if found:
                add_stat_to_group(groups, 'Finanční tým', stat)
                continue
            found = re.search(r'REDMINE_(.+)', stat)
            if found:
                add_stat_to_group(groups, 'Odbory a složky strany na Redmine',
                                  stat)
                continue
            add_stat_to_group(groups, 'Ostatní', stat)

    # donacti jmena statistik z konfigurace
    for line in func.getconfig('config/statnames'):
        try:
            (a, b) = line.split('\t', 2)
            statnames[a] = b
        except ValueError:
            pass

    # titulni stranka & assets
    mybody = ""
    for groupname in groups:
        paragraph = []
        for statid in groups[groupname]:
            if statid in statnames_index.keys():
                statname = statnames_index[statid]
            elif statid in statnames.keys():
                statname = statnames[statid]
            else:
                statname = statid
            paragraph.append(html.a("%s.delta.htm" % statid, statname))
        paragraph.sort()
        mybody += html.h2(groupname) + html.p(",\n".join(paragraph))

    page = func.replace_all(
        func.readfile('templates/index.htm'), {
            '%body%': mybody,
            '%stat_date%': '{0:%d.%m.%Y %H:%M:%S}'.format(
                datetime.datetime.now())
        })
    func.writefile(page, "%s/index.htm" % dirname)
    shutil.copytree('templates/assets', "%s/assets" % dirname)

    # Vytvor vsechny kombinovane grafy, vynech statistiky s nejvyse jednou hodnotou
    for statid in mixed_graphs:

        if arg('s') and statid != arg('s'):
            continue

        i += 1

        # graf
        involved_stats, involved_deltas = {}, {}
        statInstances = []
        for invstat in mixed_graphs[statid]:
            tmpstat = get_stat_for_graph(dbx, invstat)
            involved_stats[invstat] = tmpstat
            statInstances.append(Stat(invstat, involved_stats[invstat]))

            # spocitej delta statistiku
            deltastat, lastvalue = [], None
            for entry in tmpstat:
                deltastat.append([
                    entry[0], 0 if lastvalue is None else entry[1] - lastvalue
                ])
                lastvalue = entry[1]
            involved_deltas[invstat] = deltastat

        singlestat = (len(involved_stats.values()) == 1)

        if max(func.lmap(len, involved_stats.values(
        ))) > 0:  # involved_stats musi obsahovat aspon 1 radu o >=1 hodnotach

            print("[%s/%s]: Creating %s                       \r" %
                  (i, len(mixed_graphs), statid),
                  end='\r')

            # zakladni a delta graf
            make_graph(involved_stats,
                       "%s/img/%s.png" % (dirname, statid),
                       delta=False)
            make_graph(involved_deltas,
                       "%s/img/%s.delta.png" % (dirname, statid),
                       delta=True)

            # metody ziskani dat
            method_list = ""
            for stat in involved_stats:
                try:
                    desc = involved_stats[stat][-1:][0][2]
                except IndexError:
                    desc = "Neznámá metoda"
                method_list += "%s: %s<br>" % (stat, desc)

            # html stranka
            statname = statnames[statid] if statid in statnames.keys(
            ) else statid
            min_date = min(
                func.lmap(stat_min_date,
                          filter(lambda x: x,
                                 involved_stats.values())))  # rozsah dat
            max_date = max(
                func.lmap(stat_max_date,
                          filter(lambda x: x, involved_stats.values())))
            bottom_links = html.h2("Metody získání dat") + \
                html.p("Vypsána je vždy poslední použitá metoda, úplný seznam je v CSV souboru." + html.br()*2 + method_list) + \
                ((html.a("%s.csv" % statid, "Zdrojová data ve formátu CSV") + html.br()) if singlestat else "") + \
                html.a("index.htm", "Všechny metriky")

            try:
                min_value = str(min(map(lambda x: x.min(), statInstances)))
            except TypeError:
                min_value = '-'
            try:
                max_value = str(max(map(lambda x: x.max(), statInstances)))
            except TypeError:
                max_value = '-'

            common_replaces = {
                '%stat_name%':
                statname,
                '%stat_desc%':
                '',
                '%stat_id%':
                statid,
                '%stat_date%':
                '{0:%d.%m.%Y %H:%M:%S}'.format(datetime.datetime.now()),
                '%bottomlinks%':
                bottom_links,
                '%daterange%':
                '%s - %s' % (min_date, max_date),
                '%max%':
                max_value,
                '%min%':
                min_value
            }

            page = func.replace_all(
                func.readfile('templates/stat.htm'),
                merge_dicts(
                    common_replaces, {
                        '%stat_image%': "img/%s.png" % statid,
                        '%stat_type%': "Absolutní hodnoty"
                    }))
            func.writefile(page, "%s/%s.htm" % (dirname, statid))
            page = func.replace_all(
                func.readfile('templates/stat.htm'),
                merge_dicts(
                    common_replaces, {
                        '%stat_image%': "img/%s.delta.png" % statid,
                        '%stat_type%': "Denní přírůstky (delta)"
                    }))
            func.writefile(page, "%s/%s.delta.htm" % (dirname, statid))

            # vytvor CSV soubor se zdrojovymi daty
            if singlestat:
                csv_rows = [
                    "%s;%s;%s;%s;" %
                    (statid, "{:%d.%m.%Y}".format(x[0]), x[1], x[2])
                    for x in list(involved_stats.values())[0]
                ]
                func.writefile(
                    "stat_id;date;value;method;\n" + "\n".join(csv_rows),
                    "%s/%s.csv" % (dirname, statid))
        googlevideosearchattribute(y)
        for y in x[1]]),
                                        ])]),x[0][0])
        for x in a],2)

def googlenewsurl(s):
    return "".join(["http://www.google.com/search?q=",
                    s.replace(" ","+"),"&hl=en&gl=us&tbm=nws"])

def googlenewsurls():
    return [h.newtabopen(googlenewsurl(x),x)
            for x in u.readfilenn("/home/umar/googlenewssearches")]

import html as h
import umarutils as u
br = h.br()
def html():
    return h.h("my bookmarks",[
        h.returntohome(),
        [[h.newtabopen(x[1],x[0]),h.space()] for x in bookmarks()],br,
        [[br,h.newtabopen(x,x)]
         for x in
         u.readfilenn("/home/umar/addedbookmarks")],br,
        "reddits",br,
        h.tabularize([h.newtabopen("http://reddit.com/r/"+"+".join(x[1]),
                                  x[0]) for x in reddits()],2),br,
        "google news searches",br,
        h.tabularize(googlenewsurls(),2),
        "google video searches",br,
        googlevideosearches(),
        h.returntohome()