示例#1
0
def make_links_readable(html):
    """
    Goes through links making them readable
    If they are too long, they are turned into goo.gl links
    timing stats:
    before multiprocess = 0m18.063s
    """
    soup = BeautifulSoup(html)
    for link in soup.findAll('a'):#links:
        oldlink = link
        if link and len(link.get('href', '')) > 90 and options.use_short_links:
            #make into goo.gl link
            short_link = shorten_link(soup, link)
            if short_link != None:
                link = short_link

        if validate_link(link) and link.get('href', None):
            if not link.text:
                oldlink.replaceWith(link.get('href', "No href link to replace with"))
            else:
                div = Tag(soup, 'div')
                div.setString(link.text)
                br = Tag(soup, 'br')
                new_link = Tag(soup, 'a')
                new_link.setString("(%s)" % (link.get('href')) )
                div.append(br)
                div.append(new_link)
                oldlink.replaceWith(div)
            print

    return soup
示例#2
0
def make_links_readable(html):
    """
    Goes through links making them readable
    If they are too long, they are turned into goo.gl links
    timing stats:
    before multiprocess = 0m18.063s
    """
    soup = BeautifulSoup(html)
    for link in soup.findAll('a'):  #links:
        oldlink = link
        if link and len(link.get('href', '')) > 90 and options.use_short_links:
            #make into goo.gl link
            short_link = shorten_link(soup, link)
            if short_link != None:
                link = short_link

        if validate_link(link) and link.get('href', None):
            if not link.text:
                oldlink.replaceWith(
                    link.get('href', "No href link to replace with"))
            else:
                div = Tag(soup, 'div')
                div.setString(link.text)
                br = Tag(soup, 'br')
                new_link = Tag(soup, 'a')
                new_link.setString("(%s)" % (link.get('href')))
                div.append(br)
                div.append(new_link)
                oldlink.replaceWith(div)
            print

    return soup
示例#3
0
def shorten_link(soup, link):
    api = googl.Googl(API_KEY)
    googl_link = api.shorten(link.get('href'))
    new_link = Tag(soup, 'a')
    new_link['href'] = googl_link.get('id', None)
    if new_link.get('href', None):
        new_link.setString(link.text)
        return new_link
    else:
        return None
示例#4
0
def shorten_link(soup, link):
    api = googl.Googl(API_KEY)
    googl_link = api.shorten(link.get('href'))
    new_link = Tag(soup, 'a')
    new_link['href'] = googl_link.get('id', None)
    if new_link.get('href', None):
        new_link.setString(link.text)
        return new_link
    else:
        return None
示例#5
0
def get_slides(args):
    contents = get_file_contents(args.file)
    soup = BeautifulSoup(markdown(contents))

    hsoup = BeautifulSoup()
    html = Tag(hsoup, 'html')
    hsoup.append(html)

    head = Tag(hsoup, 'head')
    title = Tag(hsoup, 'title')
    title.setString(args.file)
    head.append(title)

    link = Tag(hsoup, 'link')
    link['rel'] = 'stylesheet'
    link['type'] = 'text/css'
    if args.offline:
        link['href'] = 'default.css'
    else:
        link[
            'href'] = 'http://gdg-xian.github.io/html5slides-markdown/themes/default.css'
    head.append(link)

    script = Tag(hsoup, 'script')
    if args.offline:
        script['src'] = 'html5slides.js'
    else:
        script[
            'src'] = 'http://gdg-xian.github.io/html5slides-markdown/javascripts/html5slides.js'
    head.append(script)
    html.append(head)

    body = Tag(hsoup, 'body')
    body['style'] = 'display:none'
    section = Tag(hsoup, 'section')
    section['class'] = 'slides layout-regular template-default'
    body.append(section)
    elements = []
    elements.append(soup.first())
    elements.extend(soup.first().findNextSiblings())
    article = Tag(hsoup, 'article')
    section.append(article)
    for element in elements:
        if element.name == 'hr':
            article = Tag(hsoup, 'article')
            section.append(article)
        else:
            article.append(element)

    html.append(body)

    return prettify(html)
def get_slides(args):
    contents = get_file_contents(args.file)
    soup = BeautifulSoup(markdown(contents))

    hsoup = BeautifulSoup()
    html = Tag(hsoup, 'html')
    hsoup.append(html)

    head = Tag(hsoup, 'head')
    title = Tag(hsoup, 'title')
    title.setString(args.file)
    head.append(title)

    link = Tag(hsoup, 'link')
    link['rel'] = 'stylesheet'
    link['type'] = 'text/css'
    if args.offline:
        link['href'] = 'default.css'
    else:
        link['href'] = 'http://gdg-xian.github.io/html5slides-markdown/themes/default.css'
    head.append(link)

    script = Tag(hsoup, 'script')
    if args.offline:
        script['src'] = 'html5slides.js'
    else:
        script['src'] = 'http://gdg-xian.github.io/html5slides-markdown/javascripts/html5slides.js'
    head.append(script)
    html.append(head)

    body = Tag(hsoup, 'body')
    body['style'] = 'display:none'
    section = Tag(hsoup, 'section')
    section['class'] = 'slides layout-regular template-default'
    body.append(section)
    elements = []
    elements.append(soup.first())
    elements.extend(soup.first().findNextSiblings())
    article = Tag(hsoup, 'article')
    section.append(article)
    for element in elements:
        if element.name == 'hr':
            article = Tag(hsoup, 'article')
            section.append(article)
        else:
            article.append(element)

    html.append(body)

    return prettify(html)
示例#7
0
    def parse(self):
        soup = BeautifulSoup(self.content)

        hsoup = BeautifulSoup()
        html = Tag(hsoup, 'html')
        hsoup.append(html)

        head = Tag(hsoup, 'head')
        title = Tag(hsoup, 'title')
        title.setString(self.title)
        head.append(title)

        link1 = Tag(hsoup, 'link')
        link1['rel'] = 'stylesheet'
        link1['type'] = 'text/css'
        link1['href'] = 'http://imakewebthings.com/deck.js/core/deck.core.css'
        head.append(link1)

        link2 = Tag(hsoup, 'link')
        link2['rel'] = 'stylesheet'
        link2['type'] = 'text/css'
        link2['href'] = 'http://imakewebthings.com/deck.js/themes/style/swiss.css'
        head.append(link2)

        link3 = Tag(hsoup, 'link')
        link3['rel'] = 'stylesheet'
        link3['type'] = 'text/css'
        link3['href'] = 'http://yandex.st/highlightjs/7.3/styles/monokai_sublime.min.css'
        head.append(link3)

        link3 = Tag(hsoup, 'link')
        link3['rel'] = 'stylesheet'
        link3['type'] = 'text/css'
        link3['href'] = 'http://imakewebthings.com/deck.js/themes/transition/fade.css'
        head.append(link3)

        script1 = Tag(hsoup, 'script')
        script1['src'] = 'http://imakewebthings.com/deck.js/jquery-1.7.min.js'
        head.append(script1)

        script2 = Tag(hsoup, 'script')
        script2['src'] = 'http://imakewebthings.com/deck.js/core/deck.core.js'
        head.append(script2)

        script3 = Tag(hsoup, 'script')
        script3['src'] = 'http://yandex.st/highlightjs/7.3/highlight.min.js'
        head.append(script3)

        script3 = Tag(hsoup, 'script')
        script3['type'] = 'text/javascript'
        script3.setString(DECK_JS)
        head.append(script3)

        html.append(head)

        body = Tag(hsoup, 'body')
        body['class'] = 'deck-container'
        elements = []
        elements.append(soup.first())
        elements.extend(soup.first().findNextSiblings())
        section = Tag(hsoup, 'section')
        section['class'] = 'slide'
        body.append(section)
        for element in elements:
            if element.name == 'hr':
                section = Tag(hsoup, 'section')
                section['class'] = 'slide'
                body.append(section)
            else:
                section.append(element)

        html.append(body)

        self.html_content = html
示例#8
0
def modFile(soup):
    sum_pwr_min = 9999
    sum_pwr_max = -9999
    sum_pwr_avg = 0
    count = 0
    pwr_min = 0
    pwr_max = 0
    pwr_avg = 0
    average_window = []
    averages = []

    #samples
    for seg in soup.contents[2].findAll("sample"):
        spd = float(seg.find("spd").string)
        tp = Tag(soup, "pwr")
        tn = NavigableString(u"\n")
        seg.insert(7, tp)
        seg.insert(8, tn)
        pwr = POWER(spd)
        tp.setString(unicode(pwr))
        #print "spd: %s => pwr: %s" % (spd * 2.23694, pwr)

        # This implicitly assumes samples are every 1 sec for 30 sec average
        average_window.append(pwr)
        if ( len(average_window) >= 30 ):
            pwr_ave = sum(average_window[-30:]) / 30
            averages.append(pwr_ave)
            average_window = average_window[-29:]

    #segments
    for seg in soup.contents[2].findAll("segment"):
        count += 1

        s = float(seg.find("spd")['min'])
        pwr_min = POWER(s)
        if (pwr_min < sum_pwr_min): sum_pwr_min = pwr_min

        s = float(seg.find("spd")['max'])
        pwr_max = POWER(s)
        if (pwr_max > sum_pwr_max): sum_pwr_max = pwr_max

        s = float(seg.find("spd")['avg'])
        pwr_avg = POWER(s)
        sum_pwr_avg = sum_pwr_avg + pwr_avg

        seg.find("pwr")['min'] = unicode(pwr_min)
        seg.find("pwr")['max'] = unicode(pwr_max)
        seg.find("pwr")['avg'] = unicode(pwr_avg)

        #print "%s => Min: %s Max: %s Avg: %s" % (s*2.23694, pwr_min, pwr_max, pwr_avg)

    #summarydata
    sum_pwr_avg = sum_pwr_avg / count
    soup.contents[2].find("summarydata").find("pwr")['min'] = unicode(sum_pwr_min)
    soup.contents[2].find("summarydata").find("pwr")['max'] = unicode(sum_pwr_max)
    soup.contents[2].find("summarydata").find("pwr")['avg'] = unicode(sum_pwr_avg)

    #print "S Min: %s Max: %s Avg: %s", (sum_pwr_min, sum_pwr_max, sum_pwr_avg)

    quad_averages = map(lambda x: math.pow(x, 4), averages)
    ave_quad = sum(quad_averages) / len(quad_averages)
    norm_pwr = math.pow(ave_quad, 1/4.0)
    soup.contents[2].find("summarydata").normalizedpower = norm_pwr
    #print "ave pwr: %s" % (norm_pwr)

#    1) starting at the 30 s mark, calculate a rolling 30 s average (of the preceeding time points, obviously).
#    2) raise all the values obtained in step #1 to the 4th power.
#    3) take the average of all of the values obtained in step #2.
#    4) take the 4th root of the value obtained in step #3.
#    http://home.trainingpeaks.com/blog/article/normalized-power,-intensity-factor-training-stress
#    http://cyclingtips.com.au/2009/07/average-vs-normalized-power/
#    http://www.endurancecorner.com/wko_definitions

# AveragePower: http://www.slowtwitch.com/Training/General_Physiology/Measuring_Power_and_Using_the_Data_302.html

    """
<sample>
<timeoffset>3852</timeoffset>
<hr>133</hr>
<spd>4.906667</spd>
<cad>79</cad>
<dist>23015.523438</dist>
</sample>
"""
    return