コード例 #1
0
ファイル: views.py プロジェクト: fevell/library
def signin():
    search_form = SearchForm()
    form = SigninForm()
    if request.method == 'POST':
        if form.validate() == False:
            rendered_html = render_template('signin.html',
                                    form=form,
                                    search_form=search_form,
                                    books_count=books_count,
                                    now_year=year,
                                    admin=admin)
            return minify(rendered_html,
                          remove_optional_attribute_quotes=False,
                          remove_empty_space=True)
        else:
            return redirect(url_for('index'))
    elif request.method == 'GET':
        rendered_html = render_template('signin.html',
                                form=form,
                                search_form=search_form,
                                books_count=books_count,
                                now_year=year,
                                admin=admin)
        return minify(rendered_html,
                      remove_optional_attribute_quotes=False,
                      remove_empty_space=True)
コード例 #2
0
 def join_mysql_google(self):
     for g_line in self.body_value:
         #Verificando se SKU da planilha foi feito no wordpress
         print(g_line)
         if g_line[0] in self.w_sku:
            index = self.w_sku.index(g_line[0])
            r_desc,desc_comp = self.w_desc[index].split("<!--more-->")
            
            #adicionando os conteudos na lista final
            #verificar se existe a descricao resumida
            if desc_comp:
               g_line[-2]= htmlmin.minify(desc_comp, remove_comments=True, remove_empty_space=True)
               g_line[-1]= htmlmin.minify(r_desc, remove_comments=True, remove_empty_space=True)
            else:
               f_line[-2]=r_desc
コード例 #3
0
ファイル: competition.py プロジェクト: cubells/SINERGIA
 def get_prices(self, cr, uid, ids, context=None):
     asoc_obj = self.pool.get("competition.asoc")
     for product in self.browse(cr, uid, ids, context=context):
         for asoc in product.asoc_id:
             res = {}
             web = asoc.web_product
             regex = asoc.linked_to.regex
             htmlfile = urllib2.urlopen(web)
             html = htmlfile.read()
             htmlencoded = html.decode("iso8859-1")
             htmltext = htmlmin.minify(
                 htmlencoded,
                 remove_comments=True,
                 remove_empty_space=True,
                 remove_all_empty_space=False,
                 reduce_empty_attributes=True,
                 reduce_boolean_attributes=True,
                 remove_optional_attribute_quotes=False,
                 keep_pre=False,
                 pre_tags=(u"pre", u"textarea"),
                 pre_attr="pre",
             )
             pattern = re.compile(regex)
             price = re.findall(pattern, htmltext)
             asoc_obj.write(cr, uid, asoc.id, {"online_price": price[0]}, context=context)
コード例 #4
0
ファイル: main.py プロジェクト: LAouini19/Website
def home():
    cities = tb.read_json('{}/cities.json'.format(informationFolder))
    names = tb.read_json('{}/names.json'.format(informationFolder))
    return minify(render_template('index.html',
                                  title=gettext('Maps - OpenBikes'),
                                  cities_file=cities, names_file=names,
                                  lang_code='en'))
コード例 #5
0
ファイル: profile.py プロジェクト: Y-Lab/Y-System
def manage_assignment(id):
    '''profile.manage_assignment(id)'''
    tab = 'manage_assignment'
    user = User.query.get_or_404(id)
    if not user.created or user.deleted:
        abort(404)
    if not current_user.can_access_profile(user=user):
        abort(403)
    if not (user.is_staff or user.is_suspended_staff):
        return redirect(url_for('profile.overview', id=user.id))
    page = request.args.get('page', 1, type=int)
    pagination = AssignmentScore.query\
        .join(User, User.id == AssignmentScore.user_id)\
        .filter(User.created == True)\
        .filter(User.activated == True)\
        .filter(User.deleted == False)\
        .filter(AssignmentScore.modified_by_id == user.id)\
        .order_by(AssignmentScore.modified_at.desc())\
        .paginate(page, per_page=current_app.config['RECORD_PER_PAGE'], error_out=False)
    scores = pagination.items
    return minify(render_template(
        'profile/manage/assignment.html',
        profile_tab=tab,
        user=user,
        pagination=pagination,
        scores=scores
    ))
コード例 #6
0
ファイル: profile.py プロジェクト: Y-Lab/Y-System
def manage_scheme(id):
    '''profile.manage_scheme(id)'''
    tab = 'manage_scheme'
    user = User.query.get_or_404(id)
    if not user.created or user.deleted:
        abort(404)
    if not current_user.can_access_profile(user=user):
        abort(403)
    if not (user.is_staff or user.is_suspended_staff):
        return redirect(url_for('profile.overview', id=user.id))
    page = request.args.get('page', 1, type=int)
    pagination = Supervision.query\
        .join(User, User.id == Supervision.user_id)\
        .filter(User.created == True)\
        .filter(User.activated == True)\
        .filter(User.deleted == False)\
        .filter(Supervision.supervisor_id == user.id)\
        .order_by(Supervision.timestamp.desc())\
        .paginate(page, per_page=current_app.config['RECORD_PER_PAGE'], error_out=False)
    supervisions = pagination.items
    return minify(render_template(
        'profile/manage/scheme.html',
        profile_tab=tab,
        user=user,
        pagination=pagination,
        supervisions=supervisions
    ))
コード例 #7
0
ファイル: profile.py プロジェクト: Y-Lab/Y-System
def booking(id):
    '''profile.booking(id)'''
    tab = 'booking'
    user = User.query.get_or_404(id)
    if not user.created or user.deleted:
        abort(404)
    if not current_user.can_access_profile(user=user):
        abort(403)
    if not (user.is_student or user.is_suspended_student):
        return redirect(url_for('profile.overview', id=user.id))
    page = request.args.get('page', 1, type=int)
    pagination = Booking.query\
        .join(Schedule, Schedule.id == Booking.schedule_id)\
        .filter(Booking.user_id == user.id)\
        .order_by(Schedule.date.desc())\
        .order_by(Schedule.period_id.asc())\
        .paginate(page, per_page=current_app.config['RECORD_PER_PAGE'], error_out=False)
    bookings = pagination.items
    return minify(render_template(
        'profile/booking.html',
        profile_tab=tab,
        user=user,
        pagination=pagination,
        bookings=bookings
    ))
コード例 #8
0
ファイル: run.py プロジェクト: cpprefjp/site_generator
def convert(path, template, context, hrefs, global_qualify_list):
    md_data = unicode(open(make_md_path(path)).read(), encoding='utf-8')
    body, info = md_to_html(md_data, path, hrefs, global_qualify_list)
    meta = info['meta_result']
    codes = info['example_codes']
    mdinfo = {
        'meta': meta,
        'sources': [{'id': code['id'], 'source': code['code']} for code in codes],
        'page_id': path.split('/'),
    }

    if 'class' in meta:
        context['title'] = meta['class'][0] + '::' + context['title']
    context['keywords'] += ',' + ','.join(value[0] for value in meta.values())
    context['mdinfo'] = json.dumps(mdinfo)

    if context['description'] is None:
        context['description'] = remove_tags(body)
    context['description'] = context['description'].replace('\n', ' ')[:200]

    context['mathjax'] = info['mathjax_enabled']
    dst_dir = os.path.dirname(os.path.join(settings.OUTPUT_DIR, path))
    if not os.path.exists(dst_dir):
        os.makedirs(dst_dir)
    html_data = template.render(body=body, **context)
    if settings.USE_MINIFY:
        import htmlmin
        html_data = htmlmin.minify(html_data)
    open(make_html_path(path), 'w').write(html_data.encode('utf-8'))
コード例 #9
0
ファイル: status.py プロジェクト: Y-Lab/Y-System
def booking():
    '''status.booking()'''
    show_today_booking_status = True
    show_tomorrow_booking_status = False
    if current_user.is_authenticated:
        show_today_booking_status = bool(request.cookies.get('show_today_booking_status', '1'))
        show_tomorrow_booking_status = bool(request.cookies.get('show_tomorrow_booking_status', ''))
    if show_today_booking_status:
        query = Schedule.query\
            .filter(Schedule.date == date_now(utc_offset=current_app.config['UTC_OFFSET']))\
            .order_by(Schedule.period_id.asc())
    if show_tomorrow_booking_status:
        query = Schedule.query\
            .filter(Schedule.date == date_next(days=1, utc_offset=current_app.config['UTC_OFFSET']))\
            .order_by(Schedule.period_id.asc())
    try:
        schedules = query
    except NameError:
        return redirect(url_for('status.today_booking'))
    return minify(render_template(
        'status/booking.html',
        show_today_booking_status=show_today_booking_status,
        show_tomorrow_booking_status=show_tomorrow_booking_status,
        schedules=schedules,
    ))
コード例 #10
0
ファイル: __init__.py プロジェクト: blakev/sowing-seasons
    def render_template(self, name, **kwargs):
        template_path = self.settings.get('template_path', DEFAULT_TEMPLATE_PATH)
        env = Environment(loader=FileSystemLoader([template_path]))

        # filters
        custom_filters = [
            ('calculate_pagination', calculate_pagination),
            ('datetime_format', datetime_format),
            ('highlight', code_highlighter),
            ('markdown', fn_markdown),
            ('slugify', document_slug),
            ('split', fn_split),
            ('strip', fn_strip)
        ]

        for fname, fn in custom_filters:
            env.filters[fname] = fn

        try:
            template = env.get_template(name)
        except TemplateNotFound:
            logger.error('Could not find template, %s' % name)
            raise TemplateNotFound(name)
        else:
            logger.info('render_template(%s)' % name)

        page_html = template.render(kwargs)
        o_len = len(page_html)

        # compress HTML repsonse
        if self.settings.get('compress_response', False):
            page_html = htmlmin.minify(page_html, remove_comments=True, keep_pre=True)
            logger.info('compressed html %0.2f percent of original' % ((len(page_html) / float(o_len))*100))

        return page_html
コード例 #11
0
ファイル: main.py プロジェクト: Y-Lab/Y-System
def forbidden(e):
    '''main.forbidden(e)'''
    if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html:
        response = jsonify({'error': 'forbidden'})
        response.status_code = 403
        return response
    return minify(render_template('error/403.html')), 403
コード例 #12
0
ファイル: main.py プロジェクト: Y-Lab/Y-System
def page_not_found(e):
    '''main.page_not_found(e)'''
    if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html:
        response = jsonify({'error': 'not found'})
        response.status_code = 404
        return response
    return minify(render_template('error/404.html')), 404
コード例 #13
0
ファイル: main.py プロジェクト: Y-Lab/Y-System
def home():
    '''main.home()'''
    if current_user.is_authenticated:
        if current_user.can('管理'):
            return redirect(request.args.get('next') or url_for('status.home'))
        return redirect(request.args.get('next') or url_for('profile.overview', id=current_user.id))
    return minify(render_template('home.html'))
コード例 #14
0
ファイル: main.py プロジェクト: Y-Lab/Y-System
def internal_server_error(e):
    '''main.internal_server_error(e)'''
    if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html:
        response = jsonify({'error': 'internal server error'})
        response.status_code = 500
        return response
    return minify(render_template('error/500.html')), 500
コード例 #15
0
ファイル: inbox.py プロジェクト: Y-Lab/Y-System
def message(id):
    '''inbox.message(id)'''
    message = Message.query.get_or_404(id)
    if message.deleted:
        abort(404)
    user_message = UserMessage.query.filter_by(user_id=current_user.id, message_id=message.id).first()
    if user_message is None:
        abort(404)
    if not user_message.read:
        user_message.mark_read()
        db.session.commit()
    unread_inbox_messages_num = UserMessage.query\
        .join(Message, Message.id == UserMessage.message_id)\
        .filter(UserMessage.user_id == current_user.id)\
        .filter(UserMessage.read == False)\
        .filter(Message.deleted == False)\
        .count()
    all_inbox_messages_num = UserMessage.query\
        .join(Message, Message.id == UserMessage.message_id)\
        .filter(UserMessage.user_id == current_user.id)\
        .filter(Message.deleted == False)\
        .count()
    return minify(render_template(
        'inbox/message.html',
        unread_inbox_messages_num=unread_inbox_messages_num,
        all_inbox_messages_num=all_inbox_messages_num,
        message=message
    ))
コード例 #16
0
    def __init__(self, pelican):
        """
        Minifies the files.
        :param pelican: the pelican object
        :type pelican: pelican.Pelican
        """
        for path, subdirs, files in os.walk(pelican.output_path):
            for name in files:
                path_file = os.path.join(path, name)

                if fnmatch(name, '*.html'):
                    self.write_to_file(
                        path_file,
                        lambda content: htmlmin.minify(
                            content,
                            remove_comments=True,
                            remove_empty_space=True,
                            reduce_boolean_attributes=True,
                            keep_pre=True,
                        )
                    )
                elif fnmatch(name, '*.css'):
                    self.write_to_file(
                        path_file,
                        lambda content: csscompressor.compress(content)
                    )
コード例 #17
0
ファイル: auth.py プロジェクト: Y-Lab/Y-System
def change_email_request():
    '''auth.change_email_request()'''
    form = ChangeEmailForm()
    if form.validate_on_submit():
        if current_user.verify_password(form.password.data):
            new_email = form.email.data.strip().lower()
            token = current_user.generate_email_change_token(new_email)
            send_email(
                recipient=new_email,
                subject='确认您的邮箱账户',
                template='auth/mail/change_email',
                user=current_user._get_current_object(),
                token=token
            )
            flash('一封确认邮件已经发送至您的邮箱', category='info')
            add_user_log(
                user=current_user._get_current_object(),
                event='请求修改邮箱为:{}'.format(new_email),
                category='auth'
            )
            return redirect(url_for('auth.change_email_request'))
        flash('无效的用户名或密码', category='error')
        return redirect(url_for('auth.change_email_request'))
    return minify(render_template(
        'auth/change_email.html',
        form=form
    ))
コード例 #18
0
ファイル: auth.py プロジェクト: Y-Lab/Y-System
def unconfirmed():
    '''auth.unconfirmed()'''
    if current_user.is_anonymous:
        return redirect(url_for('main.home'))
    if current_user.confirmed:
        return redirect(current_user.index_url)
    return minify(render_template('auth/unconfirmed.html'))
コード例 #19
0
ファイル: profile.py プロジェクト: Y-Lab/Y-System
def rating(id):
    '''profile.rating(id)'''
    tab = 'rating'
    user = User.query.get_or_404(id)
    if not user.created or user.deleted:
        abort(404)
    if not current_user.can_access_profile(user=user):
        abort(403)
    form = UserRatingForm()
    if form.validate_on_submit() and strip_html_tags(form.body.data):
        user_rating = UserRating(
            body_html=sanitize_html(form.body.data),
            author_id=current_user.id,
            user_id=user.id
        )
        db.session.add(user_rating)
        db.session.commit()
        flash('已给“{}”添加用户评价'.format(user.name_email), category='success')
        add_user_log(
            user=current_user._get_current_object(),
            event='给“{}”添加用户评价:{}'.format(
                user.name_email,
                strip_html_tags(user_rating.body_html)
            ),
            category='manage'
        )
        return redirect(url_for('profile.rating', id=user.id))
    return minify(render_template(
        'profile/rating.html',
        profile_tab=tab,
        form=form,
        user=user
    ))
コード例 #20
0
 def minify(self):
     """
     Returns the minified version of a template.
     """
     return htmlmin.minify(self.render().decode('utf8'),
                           remove_comments=True,
                           remove_empty_space=True)
コード例 #21
0
ファイル: views.py プロジェクト: fevell/library
def index(page=1):
    search_form = SearchForm()
    per_page = BOOKS_PER_PAGE

    books = session.query(Book, Author).join(
        books_authors, Author).order_by(Author.name, Book.title).group_by(Book.id)

    page_from = int(page * per_page)
    page_next = int(page_from + per_page)
    books_page = books.slice(page_from, page_next)
    books_limit = abs(books_count/per_page)

    rendered_html = render_template('catalog_list.html',
                                    meta_title="The Library",
                                    books=books_page,
                                    page=page,
                                    page_from=page_from,
                                    page_next=page_next,
                                    books_count=books_count,
                                    per_page=per_page,
                                    books_limit=books_limit,
                                    now_year=year,
                                    admin=admin,
                                    search_form=search_form)
    return minify(rendered_html,
                  remove_optional_attribute_quotes=False,
                  remove_empty_space=True)
コード例 #22
0
ファイル: bucket.py プロジェクト: vrypan/bucket3
 def util_write_html(self, file_dir, file_content, file_name='index.html'):
     if self.minify_html:
         file_content = minify(file_content, remove_comments=True, remove_empty_space=False)
     if not os.path.exists(file_dir):
             os.makedirs(file_dir)
     f = open(os.path.join(file_dir, file_name), 'w')
     f.write(file_content.encode('utf8'))
     f.close()
コード例 #23
0
ファイル: map.py プロジェクト: LAouini19/Website
def map(city):
    names = tb.read_json('{}/names.json'.format(informationFolder))
    centers = tb.read_json('{}/centers.json'.format(informationFolder))
    predictions = tb.read_json('{}/predictions.json'.format(informationFolder))
    geojson = str(url_for('static', filename='geojson/{}.geojson'.format(city)))
    return minify(render_template('map.html', city=city, city_name=names[city],
                                  center=centers[city], geojson=geojson,
                                  predict=predictions[city]))
コード例 #24
0
 def decorated_function(*args, **kwargs):
     if current_app.config.get('DEBUG'):
         return func(*args, **kwargs)
     else:
         return minify(
             func(*args, **kwargs),
             remove_optional_attribute_quotes=False
         )
コード例 #25
0
ファイル: static_minify.py プロジェクト: yubang/modular_front
def handle_html(html_data):
    """
    压缩html文件
    :param html_data: html字符串
    :return:
    """
    html = htmlmin.minify(html_data, remove_comments=True, remove_empty_space=True, remove_all_empty_space=True)
    return html
コード例 #26
0
def get_templates(dirname=None):
    rtn = {}
    dirname = dirname or BASE_TEMPLATE_DIR
    full_dirname = os.path.realpath(dirname)
    if os.path.exists(full_dirname):
        templates = map(lambda x: (os.path.join(dirname,x),open(os.path.join(full_dirname,x),'r').read()),filter(lambda x: os.path.isfile(os.path.join(full_dirname,x)),os.listdir(full_dirname)))
        for t in  templates:
            rtn['/'+'/'.join(t[0].split('/')[1:])] = minify(unicode(t[1].decode('utf-8')),remove_empty_space=True)
    return rtn
コード例 #27
0
def beautify_page(html, lang):
    soup = BeautifulSoup(html, "html")
    for x in soup.find_all():
        if len(x.text.strip()) == 0:
            x.extract()
    soup = remove_bad_sections(soup, lang)
    html = str(soup.prettify())
    html = htmlmin.minify(html, remove_empty_space=True)
    return html
コード例 #28
0
    def test_named_sheet(self):
        test_html = '<table class="dataframe"><thead><tr><th>One</th><th>Two</th><th>Three</th></tr></thead>' \
                  + '<tbody><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr><tr>' \
                  + '<td>7</td><td>8</td><td>9</td></tr></tbody></table>'

        html = htmlmin.minify(fp.excel_to_html(EXCEL_DIR + 'test_named_sheet.xlsx', sheetname='Diablo'), \
            remove_empty_space=True, remove_optional_attribute_quotes=False)

        self.assertEqual(html, test_html)
コード例 #29
0
 def tween_view(request):
     response = handler(request)
     try:
         if (response.content_type and
                 response.content_type.startswith('text/html')):
             response.text = minify(response.text, **htmlmin_opts)
     except Exception:
         log.exception('Unexpected exception while minifying content')
     return response
コード例 #30
0
ファイル: models.py プロジェクト: andrewgodwin/thiuff
 def send_stream_thread(self):
     """
     Sends a notification of us to our thread's stream.
     """
     data = {
         "id": str(self.id),
         "thread_id": str(self.thread_id),
     }
     if self.parent:
         data['type'] = "reply"
         data['discussion_id'] = str(self.parent_id)
         data['html'] = htmlmin.minify(self.reply_html())
     else:
         data['type'] = "discussion"
         data['html'] = htmlmin.minify(self.discussion_html())
     channels.Group("stream-thread-%s" % self.thread.id).send({
         "content": json.dumps(data),
     })
コード例 #31
0
ファイル: render.py プロジェクト: qdqmedia/leela
def _minify_html(html):
    return htmlmin.minify(html,
                          remove_comments=True,
                          reduce_empty_attributes=False,
                          reduce_boolean_attributes=False,
                          remove_optional_attribute_quotes=False).strip()
コード例 #32
0
 def read_task_from_hypertext_body(self, filename, file):
     logger.debug('Read 1 task from hypertext file {}'.format(filename))
     data = file.read()
     body = htmlmin.minify(data.decode('utf8'), remove_all_empty_space=True)
     tasks = [{'data': {settings.UPLOAD_DATA_UNDEFINED_NAME: body}}]
     return tasks
コード例 #33
0
ファイル: simplegen.py プロジェクト: rawteech/simplegen
 def wrapper(*args):
     if MINIFY_HTML: # noqa
         return htmlmin.minify(func(*args))
     return func(*args)
コード例 #34
0
def process_word_entry(entry):
    """
    Processes a single row from www.mbdg.net and returns it in a dictionary

    :param bs4.element.Tag entry: This is equivalent to one row in the results from www.mdbg.net
    :return: Returns a list of dictionary items containing each of the possible results
    :rtype: list of dicts
    """

    organized_entry = {"characters": []}  # type: dict

    organized_entry. \
        update({"traditional": entry.find("td", {"class": "head"}).find("div", {"class": "hanzi"}).text})

    # I didn't investigate why, but for some reason the site was adding u200b so I just manually stripped that
    # whitespace out.
    organized_entry. \
        update({"pinyin": str(entry.find("div", {"class": "pinyin"}).text).lower().strip().replace(u'\u200b', "")})

    # The entries come separated by /'s which is why we have the split here
    # The map function here just gets rid of the extra whitespace on each word_to_process before assignment
    organized_entry. \
        update({"defs": list(map(str.strip, str(entry.find("div", {"class": "defs"}).text).split('/')))})

    tail = entry.find("td", {"class": "tail"})
    simplified = tail.find("div", {"class": "hanzi"})  # type: bs4.element.Tag
    hsk = tail.find("div", {"class": "hsk"})  # type: bs4.element.Tag

    character_string = ""

    if simplified is not None:
        organized_entry.update({"simplified": simplified.text})

        for i, character in enumerate(organized_entry["traditional"], start=0):

            if character == organized_entry["simplified"][i]:
                character_string = character_string + character
            else:
                character_string = character_string + character + organized_entry["simplified"][i]
    else:
        organized_entry.update({"simplified": ""})
        character_string = organized_entry["traditional"]

    organized_entry["history"] = ""

    for i, character in enumerate("".join(dict.fromkeys(organized_entry["traditional"]))):
        r = requests.put("http://" + args.api_address + "/api/lookup",
                         data=json.dumps({"characters_to_lookup": character}),
                         headers={'Content-Type': 'application/json'})

        if r.status_code != 404:
            history = re.sub("([\u4e00-\u9FFF])", "<a href=\"http://charserver.lan:4200/\\1\">\\1</a>",
                             r.json()[0]["explanation"])

            if i == 0:
                organized_entry["history"] = organized_entry["history"] + history
            else:
                organized_entry["history"] = organized_entry["history"] + "<br><br>" + history
        else:
            organized_entry["history"] = ""

    # Get words from hanzicraft
    url_string = "https://hanzicraft.com/character/" + quote("".join(dict.fromkeys(character)))  # type: str

    driver.get(url_string)  # type: str

    soup = BeautifulSoup(driver.page_source, 'html.parser').find(id="display")  # type: bs4.BeautifulSoup

    for favorite_button in soup.find_all('button', id="addfav"):
        favorite_button.decompose()

    for character_nav in soup.find_all('div', {"class": "character-nav"}):
        character_nav.decompose()

    for index, word_block in enumerate(soup.find_all('div', {"class": "wordblock"})):
        if index > 4:
            word_block.decompose()

    # If there are no examples then delete the example words block so it doesn't consume space
    if not soup.find_all('div', {"class": "wordblock"}):
        for example in soup.find_all('div', {"class": "examples"}):
            example.decompose()

    for a in soup.findAll('a'):

        if "href" in a.attrs:
            # If the character reference is actually a word_to_process, send us to mdbg instead
            if "character" in a['href'] and len(a['href'].replace('/character/', "")) > 1:
                a['href'] = "https://www.mdbg.net/chinese/dictionary?page=worddict&wdrst=1&wdqb=" + \
                            a['href'].split('/')[2]
            else:
                # The links start is relative. We want them to be FQDNs so they reach out to Hanzicraft
                a['href'] = "https://hanzicraft.com" + a['href']

            # Remove target because it causes the links to fail.
            if "target" in a.attrs:
                a.attrs.pop("target")

    organized_entry["characters"].append(str(soup))

    for i, character in enumerate(organized_entry["characters"], start=0):
        organized_entry["characters"][i] = htmlmin.minify(character, remove_empty_space=True, remove_comments=True,
                                                          remove_optional_attribute_quotes=True)

    if hsk is not None:
        organized_entry.update({"hsk": hsk.text})
    else:
        organized_entry.update({"hsk": ""})

    if organized_entry["simplified"].strip() == "":
        organized_entry["simplified"] = HanziConv.toSimplified(organized_entry["traditional"])

    return organized_entry
コード例 #35
0
ファイル: pagetropes.py プロジェクト: flipperbw/tropes
    # if want to do fresh...
    cursor.execute("""select 1 from media where type = %s and title = %s;""", (group, title))
    if cursor.rowcount != 0:
        logger.warning('Skipping {}/{}'.format(group, title))
    else:
        link = '{}/{}/{}'.format(baseurl, group, title)
        print(link)

        try:
            htmlData = session.get(link).text
            soup = BeautifulSoup(htmlData, 'lxml')

            for script in soup(["script", "link", "style", "noscript", "img", "meta"]):
                script.extract()
            for x in soup.find_all(text=lambda text: isinstance(text, Comment)):
                x.extract()

            htmlData = htmlmin.minify(str(soup), remove_empty_space=True)
        except:
            logger.error('Error with fetching: %s' % link)
        else:
            try:
                cursor.execute("""INSERT INTO media (type, title, name, key, data) VALUES (%s, %s, %s, %s, %s);""", (group, title, name, key, htmlData))
                db.commit()
            except:
                logger.error('Error with db: %s' % link)

        time.sleep(0.34)

db.close()
コード例 #36
0
def minify_html(content):
    return htmlmin.minify(content)
コード例 #37
0
def minify_html(s_html):
    print("++++++++++\nNow in minify_html module ...")
    # returns string of minified html
    return htmlmin.minify(s_html,
                          remove_comments=True,
                          remove_empty_space=True)
コード例 #38
0
def tasks_from_file(filename, file, project):
    file_format = None
    try:
        if filename.endswith('.csv') and not is_time_series_only(project):
            tasks = pd.read_csv(file).fillna('').to_dict('records')
            tasks = [{'data': task} for task in tasks]
            file_format = os.path.splitext(filename)[-1]
        elif filename.endswith('.tsv') and not is_time_series_only(project):
            tasks = pd.read_csv(file, sep='\t').fillna('').to_dict('records')
            tasks = [{'data': task} for task in tasks]
            file_format = os.path.splitext(filename)[-1]
        elif filename.endswith('.txt'):
            lines = file.read().splitlines()
            tasks = [{
                'data': {
                    settings.UPLOAD_DATA_UNDEFINED_NAME: line.decode('utf-8')
                }
            } for line in lines]
            file_format = os.path.splitext(filename)[-1]
        elif filename.endswith('.json'):
            raw_data = file.read()
            # Python 3.5 compatibility fix https://docs.python.org/3/whatsnew/3.6.html#json
            try:
                tasks = json.loads(raw_data)
            except TypeError:
                tasks = json.loads(raw_data.decode('utf8'))
            file_format = os.path.splitext(filename)[-1]

        # no drag & drop support
        elif project is None:
            raise ValidationError('No tasks found in: ' + filename)

        # upload file via drag & drop
        elif len(project.data_types) > 1 and not is_time_series_only(project):
            raise ValidationError(
                'Your label config has more than one data keys, direct file upload supports only'
                ' one data key. To import data with multiple data keys use JSON or CSV'
            )
        # convert html file to json task
        elif filename.endswith('.html') or filename.endswith(
                '.htm') or filename.endswith('.xml'):
            data = file.read()
            body = htmlmin.minify(data.decode('utf8'),
                                  remove_all_empty_space=True)
            tasks = [{'data': {settings.UPLOAD_DATA_UNDEFINED_NAME: body}}]
            file_format = os.path.splitext(filename)[-1]
        # hosting for file
        else:
            # read as text or binary file
            data = open(filename, 'rb').read() if isinstance(
                file, io.TextIOWrapper) else file.read()

            upload_dir = os.path.join(project.path, 'upload')
            os.makedirs(upload_dir, exist_ok=True)
            filename = hashlib.md5(data).hexdigest() + '-' + os.path.basename(
                filename)
            path = os.path.join(upload_dir, filename)
            open(path, 'wb').write(data)
            # prepare task

            path = get_full_hostname() + '/data/upload/' + filename
            tasks = [{'data': {settings.UPLOAD_DATA_UNDEFINED_NAME: path}}]
            file_format = os.path.splitext(filename)[-1]

    except Exception as exc:
        raise ValidationError('Failed to parse input file ' + filename + ': ' +
                              str(exc))

    # null in file
    if tasks is None:
        raise ValidationError('null in ' + filename + ' is not allowed')

    # one task as dict
    elif isinstance(tasks, dict):
        tasks = [tasks]

    # list
    elif isinstance(tasks, list):
        pass

    # something strange
    else:
        raise ValidationError('Incorrect task type in ' + filename + ': "' +
                              str(str(tasks)[0:100]) + '". '
                              'It is allowed "dict" or "list of dicts" only')

    return tasks, file_format
コード例 #39
0
ファイル: main.py プロジェクト: JefferyQ/c3nav-32c3
def main(origin=None, destination=None):
    if os.environ.get('WIFIONLY'):
        return ''

    src = request.args if request.method == 'GET' else request.form

    _('Sorry, an error occured =(')
    _('select origin…')
    _('select destination…')
    _('Edit Settings')
    _('swap')
    _('close')
    _('share')
    _('create shortcut')
    _('wifi positioning is currently not (yet) available')
    _('determining your position…')

    ctx = {
        'location_select':
        sorted(graph.selectable_locations.values(),
               key=lambda l: (0 - l.priority, l.title)),
        'titles': {
            name: titles.get(get_locale(), name)
            for name, titles in graph.data['titles'].items()
        },
        'mobile_client':
        request.headers.get('User-Agent').startswith('c3navClient'),
        'fake_mobile_client':
        'fakemobile' in request.args,
        'graph':
        graph
    }

    # Select origins

    origin = graph.get_selectable_location(src.get('o', origin))
    destination = graph.get_selectable_location(src.get('d', destination))
    ctx.update({'origin': origin, 'destination': destination})
    if request.method == 'POST':
        if origin is None:
            return 'missing origin'

        if destination is None:
            return 'missing destination'

    # Load Settings
    settingscookie = request.cookies.get('settings')
    cookie_settings = {}
    if settingscookie is not None:
        try:
            cookie_settings = json.loads(settingscookie)
        except:
            pass
        else:
            ctx['had_settings_cookie'] = True

    setsettings = {}
    for name, default_value in Router.default_settings.items():
        if not isinstance(default_value, str) and isinstance(
                default_value, Iterable):
            value = src.getlist(name)
            cookie_value = cookie_settings.get(name)
            if value or ('force-' + name) in src:
                setsettings[name] = value
            elif isinstance(cookie_value, list):
                setsettings[name] = cookie_value
        elif name in src:
            setsettings[name] = src.get(name)
        elif name in cookie_settings:
            cookie_value = cookie_settings.get(name)
            if (isinstance(cookie_value, Iterable)
                    and isinstance(default_value, str)
                    or isinstance(cookie_value, int)
                    and isinstance(default_value, int)):
                setsettings[name] = cookie_value

    router = Router(graph, setsettings)
    ctx['settings'] = router.settings

    settings_flat = sum(
        [(sum([[(n, vv)]
               for vv in v], []) if isinstance(v, Iterable) else [(n, v)])
         for n, v in router.settings.items()], [])
    ctx['settings_fields'] = [(n, v) for n, v in settings_flat if n in src]

    # parse what is avoided
    avoid = []
    for ctype in ('steps', 'stairs', 'escalators', 'elevators'):
        s = router.settings[ctype]
        if s == 'yes':
            continue
        else:
            avoid.append(ctype + {'no': '↕', 'up': '↓', 'down': '↑'}[s])
    for e in router.settings['e']:
        avoid.append(graph.titles.get(e, {}).get('en', e))
    ctx['avoid'] = avoid

    if request.method == 'GET':
        resp = make_response(minify(render_template('main.html', **ctx)))
        if 'lang' in request.cookies or 'lang' in request.args:
            resp.set_cookie('lang',
                            g.locale,
                            expires=datetime.now() + timedelta(days=30))
        return resp
    """
    Now lets route!
    """
    messages, route = router.get_route(origin, destination)
    if route is not None:
        route_description, has_avoided_ctypes = route.describe()
        if has_avoided_ctypes:
            messages.append(
                ('warn',
                 _('This route contains way types that you wanted to avoid '
                   'because otherwise no route would be possible.')))
        total_duration = sum(rp['duration'] for rp in route_description)

        ctx.update({
            'routeparts':
            route_description,
            'origin_title':
            None
            if isinstance(route.points[0], Node) else route.points[0].title,
            'destination_title':
            None
            if isinstance(route.points[-1], Node) else route.points[-1].title,
            'total_distance':
            round(sum(rp['distance'] for rp in route_description) / 100, 1),
            'total_duration':
            (int(total_duration / 60), int(total_duration % 60)),
            'jsonfoo':
            json.dumps(route_description, indent=4)
        })

    ctx.update({
        'messages': messages,
        'isresult': True,
        'resultsonly': src.get('ajax') == '1'
    })

    resp = make_response(minify(render_template('main.html', **ctx)))
    if src.get('savesettings') == '1':
        resp.set_cookie('settings',
                        json.dumps(router.settings),
                        expires=datetime.now() + timedelta(days=30))
    if 'lang' in request.cookies or 'lang' in request.args:
        resp.set_cookie('lang',
                        g.locale,
                        expires=datetime.now() + timedelta(days=30))
    return resp
コード例 #40
0
import sass
from staticjinja import Site
import os
import htmlmin

if __name__ == "__main__":
    sass.compile(dirname=("sass/", "../assets/css/"), output_style='compressed')
    site = Site.make_site(outpath="../",
                        #   env_globals={'greeting':'Hello world!',}
                          )
    site.render()
    # os.rename("../index.jinja","../index.html")
    htmlmin.minify('../index.html')
    with open('../index.jinja', 'r') as jinja_file:
                content = htmlmin.minify(jinja_file.read(),
                                         remove_empty_space=True,
                                         remove_comments=True)
    if os.path.exists('../index.html'):
        os.remove('../index.html')
    os.remove('../index.jinja')
    with open('../index.html', 'w') as html_file:
        html_file.write(content)

コード例 #41
0
 def wrapper(*args):
     # return html_minify(func(*args))
     return minify(func(*args))
コード例 #42
0
def minify_html(i):
    return htmlmin.minify(i, remove_empty_space=False)
コード例 #43
0
            examples_html += tutorials

        command_html = command_html.replace("{$examples}", examples_html)

        # Strip 'mml' namespace from MathML tags so that MathJax can find them
        command_html = re.sub(r'<(/?)mml:(.*?)>', r'<\1\2>', command_html)

        output_html = header_for_command + command_html + footer_for_command

        output = open(output_dir + version_dir + "/" + command, "w")
        output_string = output_html
        if args.buildmode == 'full':
            output_string = htmlmin.minify(
                output_html,
                remove_comments=True,
                reduce_boolean_attributes=True,
                remove_optional_attribute_quotes=False).encode(
                    'ascii', 'xmlcharrefreplace')
        output.write(output_string)
        output.close()

        written += 1

    if os.path.exists("html/404.html"):
        header_for_page = header_for_version
        footer_for_page = footer_for_version

        api_commands = ""
        for category in opengl.command_categories:
            api_commands += spew_category(category,
                                          opengl.command_categories[category],
コード例 #44
0
    items.append("""
<div class="item" qari="{qari_key}">
    <div class="speed"><span style="width: {speed_score}%">&nbsp;</span></div>
    <div class="controls">{name}<span class="control-button-group">
        <a class="fa fa-play-circle control-button audio-player" href="http://download.quranicaudio.com/quran/{rel_path}078.mp3"></a>
        <a class="fa fa-cloud-download control-button" href="https://quranicaudio.com/quran/{id}" target="_blank"></a>
    </span></div>
    <div class="register"><span style="width: {register_score}%">&nbsp;</span></div>
</div>""".format(
        qari_key=qari_key,
        speed_score=qstats["speed_score"],
        name=name + (" <span class=\"sub\">%s</span>" % sub if sub else ""),
        rel_path=qari_metadata[qari_key]["relative_path"],
        id=qari_metadata[qari_key]["id"],
        register_score=qstats["register_score"],
    ))

    qari_stats_lite[qari_key] = {
        "speed": qstats["speed_score"],
        "register": qstats["register_score"],
    }

rendered_site = template.format(items="".join(items),
                                qari_stats=json.dumps(qari_stats_lite))

minified_site = htmlmin.minify(rendered_site,
                               remove_empty_space=True,
                               remove_optional_attribute_quotes=False)

open("site/index.html", "w").write(minified_site)
コード例 #45
0
 def response_minify(response):
     """用 htmlmin 压缩 HTML,减轻带宽压力"""
     if app.config[
             'HTML_MINIFY'] and response.content_type == u'text/html; charset=utf-8':
         response.set_data(minify(response.get_data(as_text=True)))
     return response
コード例 #46
0
def minify_html(filename: str) -> str:
    """ Read the HTML, minify it, and return the minified content. """
    with open(filename) as fin:
        return htmlmin.minify(fin.read(),
                              remove_empty_space=True).replace('\n', '')
コード例 #47
0
import re
import subprocess
import htmlmin
template = open("html/index.html", "r")
page_html = template.read()
page_html = htmlmin.minify(page_html, remove_empty_space=True)
print("Loaded Default Theme.")
#regex for identifying attributes
exp = r"{{\w+}}"
attributes = re.findall(exp, page_html)
r_attributes = attributes[:]
print()
print("Detected following attributes")
#removing curly brackets from attribute
for i in range(0, len(attributes)):
    attributes[i] = attributes[i][2:-2]
    print(str(i + 1) + ". " + attributes[i])

values = []
print("Input Values for new certificate")
for i in range(0, len(attributes)):
    inp = input(attributes[i] + " : ")
    values.append(inp)

#replacing values in the code
for i in range(0, len(values)):
    page_html = page_html.replace(r_attributes[i], values[i])

print(len(page_html))

receiver = input("Enter Receiver's Address: ")
コード例 #48
0
    def minify_and_render(self, content: typing.Any) -> bytes:
        if isinstance(content, str):
            minified_content = htmlmin.minify(content, **minification_config)
            return original_render(self, minified_content)

        return original_render(self, content)
コード例 #49
0
ファイル: command.py プロジェクト: vbstreetz/mythx-cli
def render(
    ctx,
    target: str,
    user_template: str,
    aesthetic: bool,
    markdown: bool,
    min_severity: Optional[str],
    swc_blacklist: Optional[List[str]],
    swc_whitelist: Optional[List[str]],
) -> None:
    """Render an analysis job or group report as HTML.

    \f
    :param ctx: Click context holding group-level parameters
    :param target: Group or analysis ID to fetch the data for
    :param user_template: User-defined template string
    :param aesthetic: DO NOT TOUCH IF YOU'RE BORING
    :param markdown: Flag to render a markdown report
    :param min_severity: Ignore SWC IDs below the designated level
    :param swc_blacklist: A comma-separated list of SWC IDs to ignore
    :param swc_whitelist: A comma-separated list of SWC IDs to include
    """

    client: Client = ctx["client"]
    # normalize target
    target = target.lower()
    default_template = DEFAULT_MD_TEMPLATE if markdown else DEFAULT_HTML_TEMPLATE
    # enables user to include library templates in their own
    template_dirs = [default_template.parent]

    if user_template:
        LOGGER.debug(f"Received user-defined template at {user_template}")
        user_template = Path(user_template)
        template_name = user_template.name
        template_dirs.append(user_template.parent)
    else:
        LOGGER.debug(f"Using default template {default_template.name}")
        template_name = default_template.name

    env_kwargs = {
        "trim_blocks": True,
        "lstrip_blocks": True,
        "keep_trailing_newline": True,
    }
    if not markdown:
        env_kwargs = {
            "trim_blocks": True,
            "lstrip_blocks": True,
            "keep_trailing_newline": True,
        }
        if aesthetic:
            LOGGER.debug(f"Overwriting template to go A E S T H E T I C")
            template_name = "aesthetic.html"

    LOGGER.debug("Initializing Jinja environment")
    env = jinja2.Environment(
        loader=jinja2.FileSystemLoader(template_dirs), **env_kwargs
    )
    template = env.get_template(template_name)

    issues_list: List[
        Tuple[
            AnalysisStatusResponse,
            DetectedIssuesResponse,
            Optional[AnalysisInputResponse],
        ]
    ] = []
    if len(target) == 24:
        LOGGER.debug(f"Identified group target {target}")
        list_resp = client.analysis_list(group_id=target)
        offset = 0

        LOGGER.debug(f"Fetching analyses in group {target}")
        while len(list_resp.analyses) < list_resp.total:
            offset += len(list_resp.analyses)
            list_resp.analyses.extend(
                client.analysis_list(group_id=target, offset=offset)
            )

        for analysis in list_resp.analyses:
            click.echo("Fetching report for analysis {}".format(analysis.uuid), err=True)
            status, resp, inp = get_analysis_info(
                client=client,
                uuid=analysis.uuid,
                min_severity=min_severity,
                swc_blacklist=swc_blacklist,
                swc_whitelist=swc_whitelist,
            )
            issues_list.append((status, resp, inp))
    elif len(target) == 36:
        LOGGER.debug(f"Identified analysis target {target}")
        click.echo("Fetching report for analysis {}".format(target), err=True)
        status, resp, inp = get_analysis_info(
            client=client,
            uuid=target,
            min_severity=min_severity,
            swc_blacklist=swc_blacklist,
            swc_whitelist=swc_whitelist,
        )
        issues_list.append((status, resp, inp))
    else:
        LOGGER.debug(f"Could not identify target with length {len(target)}")
        raise click.UsageError(
            "Invalid target. Please provide a valid group or analysis job ID."
        )

    LOGGER.debug(f"Rendering template for {len(issues_list)} issues")
    rendered = template.render(issues_list=issues_list, target=target)
    if not markdown:
        LOGGER.debug(f"Minifying HTML report")
        rendered = htmlmin.minify(rendered, remove_comments=True)

    write_or_print(rendered, mode="w+")
コード例 #50
0
def create_calculator_page(calculator_name):
    calculator_folder = os.path.join("output", calculator_name)
    source_folder = os.path.join("resource_lists", calculator_name)
    if not os.path.exists(calculator_folder):
        os.makedirs(calculator_folder)
    else:
        oldest_output = get_oldest_modified_time(calculator_folder)
        newest_resource = get_newest_modified_time(source_folder)
        newest_corelib = get_newest_modified_time("core")
        newest_build_script = os.path.getctime("build.py")
        if oldest_output > max(newest_resource, newest_corelib, newest_build_script):
            print("Skipping", calculator_name, "Nothing has changed since the last build")
            return

    print("Generating", calculator_name, "into", calculator_folder)

    # Create a packed image of all the item images
    image_width, image_height, resource_image_coordinates = create_packed_image(calculator_name)

    # Load in the yaml resources file
    with open(os.path.join("resource_lists", calculator_name, "resources.yaml"), 'r', encoding="utf_8") as f:
        yaml_data = ordered_load(f, yaml.SafeLoader)

    resources = yaml_data["resources"]

    authors = yaml_data["authors"]

    recipe_types = yaml_data["recipe_types"]

    stack_sizes = None
    if "stack_sizes" in yaml_data:
        stack_sizes = yaml_data["stack_sizes"]

    default_stack_size = None
    if "default_stack_size" in yaml_data:
        default_stack_size = yaml_data["default_stack_size"]

    # run some sanity checks on the resources
    lint_resources(calculator_name, resources, recipe_types)
    # TODO: Add linting for stack sizes here
    recipe_type_format_js = uglify_js_string(generate_recipe_type_format_js(calculator_name, recipe_types))

    recipe_js = mini_js_data(get_recipes_only(resources))

    html_resource_data = generate_resource_html_data(resources)

    item_styles = generate_resource_offset_classes(resources, resource_image_coordinates)

    # Generate some css to allow us to center the list
    content_width_css = generate_content_width_css(image_width, yaml_data)

    stack_sizes_json = json.dumps(stack_sizes)

    # Generate the calculator from a template
    env = Environment(loader=FileSystemLoader('core'))
    template = env.get_template("calculator.html")
    output_from_parsed_template = template.render(
        # A simplified list used for creating the item selector HTML
        resources=html_resource_data,
        # the javascript/json object used for calculations
        recipe_json=recipe_js,
        # The size and positions of the image
        item_width=image_width,
        item_height=image_height,
        item_styles=item_styles,
        # The name of the calculator
        resource_list=calculator_name,
        # Javascript formatting functions for recipe instructions # TODO this should be made into format strings to save space
        recipe_type_format_js=recipe_type_format_js,
        # The list of authors and emails to display in the authors sections
        authors=authors,
        # Additional CSS to center the list when resizing
        content_width_css=content_width_css,
        # Used to build the stack size selector UI
        stack_sizes=stack_sizes,
        default_stack_size=default_stack_size,
        # Used to do calculations to divide counts into stacks
        stack_sizes_json=stack_sizes_json)

    minified = htmlmin.minify(output_from_parsed_template, remove_comments=True, remove_empty_space=True)

    with open(os.path.join(calculator_folder, "index.html"), "w", encoding="utf_8") as f:
        f.write(minified)

    # Sanity Check Warning, is there an image that does not have a recipe
    simple_resources = [x["simplename"] for x in html_resource_data]
    for simple_name in resource_image_coordinates:
        if simple_name not in simple_resources:
            print("WARNING:", simple_name, "has an image but no recipe and will not appear in the calculator")
コード例 #51
0
ファイル: html.py プロジェクト: Myzel394/django-common-utils
 def minify_html(html: str, opts: Optional[Kwargs] = None) -> str:
     if opts is None:
         opts = HTMLOptimizerDefault.minify_opts
     
     return htmlmin.minify(html, **opts)
コード例 #52
0
ファイル: bs_Html2Json.py プロジェクト: gawema/bs-rgx-tests
        json[el.name] = {'attributes': el.attrs}
        json[el.name] = convetToJson(el.contents, json[el.name])
        return json[el.name]
    elif len(el):
        elements = []
        text = ''
        for el in el:
            if type(el) == Element.Tag:
                value = convetToJson(el, json)
                elements.append({el.name: value})
            else:
                text += el.strip()
        json['text'] = text
        return json
    else:
        return json


with open("index.html", 'r') as file:
    html = file.read()
    minified_html = htmlmin.minify(html)
    soup = BeautifulSoup(minified_html, "html.parser")
    components = soup.findAll("editable-paragraph")
    jComponents = []
    jComponent = {}
    for component in components:
        jComponent[component.name] = convetToJson(component)
        jComponents.append(jComponent.copy())

    pprint.pprint(jComponents)
コード例 #53
0
def loop_looko2(czas):
    all_values = set()

    try:
        looko2_all_stations = all_data()

        if looko2_all_stations != 0:
            result_ids_looko2 = [ids["Device"] for ids in looko2_all_stations]
            LOGGER.debug("result_ids_looko2 %s", result_ids_looko2)

            result_latitude_looko2 = [
                latitude["Lat"] for latitude in looko2_all_stations
            ]
            LOGGER.debug("result_latitude_looko2 %s", result_latitude_looko2)

            result_longitude_looko2 = [
                longitude["Lon"] for longitude in looko2_all_stations
            ]
            LOGGER.debug("result_longitude_looko2 %s", result_longitude_looko2)

            merged_ids_lat_long_looko2 = list(
                zip(result_ids_looko2, result_latitude_looko2,
                    result_longitude_looko2))
            LOGGER.debug("merged_ids_lat_long_looko2 %s",
                         merged_ids_lat_long_looko2)

            for row_looko2 in merged_ids_lat_long_looko2:
                lat = row_looko2[1]
                long = row_looko2[2]

                try:
                    pm10_points_value_looko2 = query("pm10", lat, long)
                    pm10_points_value_looko2 = points_value(
                        pm10_points_value_looko2)

                    if pm10_points_value_looko2 != 0 and pm10_points_value_looko2 != "Brak danych":
                        LOGGER.debug("Look02 PM10 %s",
                                     pm10_points_value_looko2)
                        pass

                    pm25_points_value_looko2 = query("pm25", lat, long)
                    pm25_points_value_looko2 = points_value(
                        pm25_points_value_looko2)
                    if pm25_points_value_looko2 != 0 and pm25_points_value_looko2 != "Brak danych":
                        LOGGER.debug("Look02 PM25 %s",
                                     pm25_points_value_looko2)
                        pass

                    pm1_points_value_looko2 = query("pm1", lat, long)
                    pm1_points_value_looko2 = points_value(
                        pm1_points_value_looko2)
                    if pm1_points_value_looko2 != 0 and pm1_points_value_looko2 != "Brak danych":
                        LOGGER.debug("Look02 PM1 %s", pm1_points_value_looko2)
                        pass

                    returned_value_from_custom_sensors_pm10_looko2 = float(
                        pm10_points_value_looko2)
                    pm10_points_percentage_looko2 = float(
                        pm10_points_value_looko2) * 2
                    returned_value_from_custom_sensors_pm25_looko2 = float(
                        pm25_points_value_looko2)
                    pm25_points_percentage_looko2 = float(
                        pm25_points_value_looko2) * 4
                    returned_value_from_custom_sensors_pm1_looko2 = float(
                        pm1_points_value_looko2)

                    if (returned_value_from_custom_sensors_pm10_looko2 != 0
                        ) or (returned_value_from_custom_sensors_pm25_looko2 !=
                              0):
                        LOGGER.debug(
                            "Look02 Calculated values %s %s %s %s ",
                            returned_value_from_custom_sensors_pm10_looko2,
                            pm10_points_percentage_looko2,
                            returned_value_from_custom_sensors_pm25_looko2,
                            pm25_points_percentage_looko2,
                        )

                        font_colour_pm10 = pins(pm10_points_percentage_looko2)
                        font_colour_pm25 = pins(pm25_points_percentage_looko2)
                        map_icon_colour = map_pins(
                            pm10_points_percentage_looko2,
                            pm25_points_percentage_looko2)
                        icon = map_icon_colour[0]
                        icon_colour = map_icon_colour[1]

                        font_colour_pm10 = str(font_colour_pm10[0])
                        font_colour_pm25 = str(font_colour_pm25[0])
                        lat = str(row_looko2[1])
                        long = str(row_looko2[2])
                        pm10_points_percentage = str(
                            int(pm10_points_percentage_looko2))
                        pm10_points = str(
                            int(returned_value_from_custom_sensors_pm10_looko2)
                        )
                        pm25_points_percentage = str(
                            int(pm25_points_percentage_looko2))
                        pm25_points = str(
                            int(returned_value_from_custom_sensors_pm25_looko2)
                        )
                        pm1_points = str(
                            int(returned_value_from_custom_sensors_pm1_looko2))

                        html_looko2 = HtmlTemplates.airmonitor_sensors_html_out(
                            CZAS=czas,
                            font_colour_pm10=font_colour_pm10,
                            font_colour_pm25=font_colour_pm25,
                            lat=lat,
                            long=long,
                            pm10_points_percentage=pm10_points_percentage,
                            returned_value_from_custom_sensors_pm10=pm10_points,
                            pm25_points_percentage=pm25_points_percentage,
                            returned_value_from_custom_sensors_pm25=pm25_points,
                            returned_value_from_custom_sensors_pm1=pm1_points,
                            particle_sensor="LOOK02",
                        )
                        html_looko2 = htmlmin.minify(html_looko2,
                                                     remove_comments=True,
                                                     remove_empty_space=True)

                        single_values = (lat, long, icon, icon_colour,
                                         html_looko2)
                        all_values.add(single_values)

                except ValueError:
                    pass
        else:
            pass
    except ValueError:
        pass
    return all_values
コード例 #54
0
def minify_html(s_html):
    # returns string of minified html
    return htmlmin.minify(s_html, remove_comments=True, remove_empty_space=True)
コード例 #55
0
 def render(self, template, context):
     output = self.environment.get_template(template).render(
         context)
     return minify(output)
コード例 #56
0
ファイル: models.py プロジェクト: songfj/label-studio
 def read_task_from_hypertext_body(self):
     logger.debug('Read 1 task from hypertext file {}'.format(self.filepath))
     data = self.content
     body = htmlmin.minify(data.decode('utf8'), remove_all_empty_space=True)
     tasks = [{'data': {settings.DATA_UNDEFINED_NAME: body}}]
     return tasks
コード例 #57
0
ファイル: views.py プロジェクト: vinokurov/salty_tickets
def register_checkout(event_key, validate='novalidate'):
    event = Event.query.filter_by(event_key=event_key).first()
    form = create_event_form(event)()

    return_dict = dict(errors={})

    if validate == 'validate':
        form_check = form.validate_on_submit
    else:
        form_check = form.is_submitted

    if form_check():
        if event_key == 'mind_the_shag_2018':
            form_controller = MtsSignupFormController(form)
            return_dict['signup_form_html'] = minify(render_template(
                'events/mind_the_shag_2018/mts_signup_form.html',
                event=event,
                form=form,
                config=config,
                form_controller=form_controller),
                                                     remove_comments=True,
                                                     remove_empty_space=True)
        registration = get_registration_from_form(form)
        registration.event_id = event.id
        partner_registration = get_partner_registration_from_form(form)
        partner_registration.event_id = event.id
        if event_key == 'mind_the_shag_2018':
            user_order = mts_get_order_for_event(event, form, registration,
                                                 partner_registration)
        else:
            user_order = get_order_for_event(event, form, registration,
                                             partner_registration)
        return_dict['stripe'] = get_stripe_properties(event, user_order, form)
        if form.comment.data and form.comment.data.lower().strip() in [
                'sunny side of the street'
        ]:
            return_dict['stripe'] = 0
        order_summary_controller = OrderSummaryController(user_order)
        return_dict['order_summary_html'] = minify(render_template(
            'order_summary.html',
            order_summary_controller=order_summary_controller),
                                                   remove_comments=True,
                                                   remove_empty_space=True)
        return_dict['validated_partner_tokens'] = get_validated_partner_tokens(
            form)
        return_dict['disable_checkout'] = user_order.order_products.count(
        ) == 0
        return_dict['order_summary_total'] = price_filter(
            order_summary_controller.total_to_pay)
        print(form.name.data, request.remote_addr,
              [(payment_item.product.name, price_filter(
                  payment_item.amount), payment_item.product.status)
               for payment_item in order_summary_controller.payment_items])
    else:
        form_errors_controller = FormErrorController(form)
        return_dict['order_summary_html'] = render_template(
            'form_errors.html', form_errors=form_errors_controller)
        return_dict['errors'] = {
            v: k
            for v, k in form_errors_controller.errors
        }
        return_dict['disable_checkout'] = True
    return jsonify(return_dict)
コード例 #58
0
ファイル: views.py プロジェクト: vinokurov/salty_tickets
def register_checkout_vue(event_key, validate='novalidate'):
    event = Event.query.filter_by(event_key=event_key).first()
    form = create_event_form(event)()

    return_dict = dict(errors={})

    if validate == 'validate':
        form_check = form.validate_on_submit
    else:
        form_check = form.is_submitted

    if form_check():
        reg_dict = {
            'token_valid': None,
            'registration_name': None,
            'registration_email': None,
            'partner_registration_name': None,
            'partner_registration_email': None,
            'order_products': [],
        }
        if form.registration_token.data:
            raise NotImplementedError
            # try:
            #     reg_token_helper = RegistrationToken()
            #     registration = reg_token_helper.deserialize(form.registration_token.data)
            #     reg_dict['token_valid'] = True
            #     reg_dict['registration_name'] = registration.name
            #     reg_dict['registration_email'] = registration.email
            #     # reg_dict['order_products'] =
            #     partner_registration = MtsSignupFormController.get_regular_partner_registration(registration)
            #     if partner_registration:
            #         reg_dict['partner_registration_name'] = partner_registration.name
            #         reg_dict['partner_registration_email'] = partner_registration.email
            #
            # except:
            #     reg_dict['token_valid'] = False
            #     registration = get_registration_from_form(form)
            #     registration.event_id = event.id
            #     partner_registration = get_partner_registration_from_form(form)
            #     partner_registration.event_id = event.id

        else:
            registration = get_registration_from_form(form)
            registration.event_id = event.id
            partner_registration = get_partner_registration_from_form(form)
            partner_registration.event_id = event.id

        if not partner_registration:
            partner_registration = get_partner_registration_from_form(form)
            partner_registration.event_id = event.id

        if event_key == 'mind_the_shag_2018':
            user_order = mts_get_order_for_event(event, form, registration,
                                                 partner_registration)

        return_dict['existing_registration'] = reg_dict
        # else:
        #     user_order = get_order_for_event(event, form, registration, partner_registration)
        return_dict['stripe'] = get_stripe_properties(event, user_order, form)
        order_summary_controller = OrderSummaryController(user_order)
        return_dict['order_summary_html'] = minify(render_template(
            'order_summary.html',
            order_summary_controller=order_summary_controller),
                                                   remove_comments=True,
                                                   remove_empty_space=True)
        return_dict['validated_partner_tokens'] = get_validated_partner_tokens(
            form)
        return_dict['disable_checkout'] = user_order.order_products.count(
        ) == 0
        return_dict['order_summary_total'] = price_filter(
            order_summary_controller.total_to_pay)

        # adding validate form errors to the response to get the CSRF error early
        if not form.validate():
            form_errors_controller = FormErrorController(form)
            return_dict['errors'] = {
                v: k
                for v, k in form_errors_controller.errors
            }

        if event_key == 'mind_the_shag_2018':
            form_controller = MtsSignupFormController(form)
            return_dict['state_data'] = form_controller.get_state_dict(event)

            if return_dict['state_data']['group'].get('group_token_error'):
                return_dict['errors']['Group token'] = return_dict[
                    'state_data']['group'].get('group_token_error')

            if return_dict['state_data']['group'].get('group_new_error'):
                return_dict['errors']['New group'] = return_dict['state_data'][
                    'group'].get('group_new_error')

            if form_controller.not_enough_stations_selected:
                return_dict['errors'][
                    'Full weekend pass'] = '******' % form_controller.selected_stations_count

        if validate == 'validate' and not return_dict['errors']:
            return_dict['checkout_success'] = True
        print(form.name.data, request.remote_addr,
              [(payment_item.product.name, price_filter(
                  payment_item.amount), payment_item.product.status)
               for payment_item in order_summary_controller.payment_items])
    else:
        form_errors_controller = FormErrorController(form)
        return_dict['order_summary_html'] = render_template(
            'form_errors.html', form_errors=form_errors_controller)
        return_dict['errors'] = {
            v: k
            for v, k in form_errors_controller.errors
        }
        return_dict['disable_checkout'] = True
    return jsonify(return_dict)
コード例 #59
0
ファイル: main.py プロジェクト: yiouyou/BaiduSpider
    def search_web(self, query: str, pn: int = 1) -> dict:
        r"""百度网页搜索

        - 简单搜索:
            >>> BaiduSpider().search_web('搜索词')
            {
                'results': [
                    {
                        'result': int, 总计搜索结果数,
                        'type': 'total'  # type用来区分不同类别的搜索结果
                    },
                    {
                        'results': [
                            'str, 相关搜索建议',
                            '...',
                            '...',
                            '...',
                            ...
                        ],
                        'type': 'related'
                    },
                    {
                        'process': 'str, 算数过程',
                        'result': 'str, 运算结果',
                        'type': 'calc'
                        # 这类搜索结果仅会在搜索词涉及运算时出现,不一定每个搜索结果都会出现的
                    },
                    {
                        'results': [
                            {
                                'author': 'str, 新闻来源',
                                'time': 'str, 新闻发布时间',
                                'title': 'str, 新闻标题',
                                'url': 'str, 新闻链接'
                            },
                            { ... },
                            { ... },
                            { ... },
                            ...
                        ],
                        'type': 'news'
                        # 这类搜索结果仅会在搜索词有相关新闻时出现,不一定每个搜索结果都会出现的
                    },
                    {
                        'results': [
                            {
                                'cover': 'str, 视频封面图片链接',
                                'origin': 'str, 视频来源',
                                'length': 'str, 视频时长',
                                'title': 'str, 视频标题',
                                'url': 'str, 视频链接'
                            },
                            { ... },
                            { ... },
                            { ... },
                            ...
                        ],
                        'type': 'video'
                        # 这类搜索结果仅会在搜索词有相关视频时出现,不一定每个搜索结果都会出现的
                    },
                    {
                        'result': {
                                'cover': 'str, 百科封面图片/视频链接',
                                'cover-type': 'str, 百科封面类别,图片是image,视频是video',
                                'des': 'str, 百科简介',
                                'title': 'str, 百科标题',
                                'url': 'str, 百科链接'
                        },
                        'type': 'baike'
                        # 这类搜索结果仅会在搜索词有相关百科时出现,不一定每个搜索结果都会出现的
                    },
                    {
                        'des': 'str, 搜索结果简介',
                        'origin': 'str, 搜索结果的来源,可能是域名,也可能是名称',
                        'time': 'str, 搜索结果的发布时间',
                        'title': 'str, 搜索结果标题',
                        'type': 'result',  # 正经的搜索结果
                        'url': 'str, 搜索结果链接'
                    },
                    { ... },
                    { ... },
                    { ... },
                    ...
                ],
                'total': int, 总计的搜索结果页数,可能会因为当前页数的变化而随之变化
            }

        - 带页码:
            >>> BaiduSpider().search_web('搜索词', pn=2)
            {
                'results': [ ... ],
                'total': ...
            }

        Args:
            query (str): 要爬取的query
            pn (int, optional): 爬取的页码. Defaults to 1.

        Returns:
            dict: 爬取的返回值和搜索结果
        """
        text = quote(query, 'utf-8')
        url = 'https://www.baidu.com/s?&wd=%s&pn=%d' % (text, (pn - 1) * 10)
        # 获取响应
        response = requests.get(url, headers=self.headers)
        text = bytes(response.text, response.encoding).decode('utf-8')
        soup = BeautifulSoup(self._minify(text), 'html.parser')
        # 尝试获取搜索结果总数
        try:
            num = int(
                str(soup.find('span', class_='nums_text').text).strip(
                    '百度为您找到相关结果约').strip('个').replace(',', ''))
        except:
            num = 0
        # 查找运算窗口
        calc = soup.find('div', class_='op_new_cal_screen')
        # 定义预结果(运算以及相关搜索)
        pre_results = []
        # 预处理相关搜索
        try:
            _related = soup.find('div', id='rs').find('table').find_all('th')
        except:
            _related = []
        related = []
        # 预处理新闻
        news = soup.find('div',
                         class_='result-op',
                         tpl='sp_realtime_bigpic5',
                         srcid='19')
        # 确认是否有新闻块
        try:
            news_title = self._format(
                news.find('h3', class_='t').find('a').text)
        except:
            news_title = None
            news_detail = []
        else:
            news_rows = news.findAll('div', class_='c-row')
            news_detail = []
            for row in news_rows:
                # 因为新闻会有介绍,但是不是每个都有,所以碰到介绍这里用try-except捕获
                try:
                    row_title = self._format(row.find('a').text)
                except AttributeError:
                    continue
                else:
                    row_time = self._format(
                        row.find('span', style='color:#666;float:right').text)
                    row_author = self._format(
                        row.find('span', style='color:#008000').text)
                    row_url = self._format(row.find('a')['href'])
                    news_detail.append({
                        'title': row_title,
                        'time': row_time,
                        'author': row_author,
                        'url': row_url
                    })
        # 预处理短视频
        video = soup.find('div', class_='op-short-video-pc')
        if video:
            video_rows = video.findAll('div', class_='c-row')
            video_results = []
            for row in video_rows:
                row_res = []
                videos = row.findAll('div', class_='c-span6')
                for v in videos:
                    v_link = v.find('a')
                    v_title = v_link['title']
                    v_url = self._format(v_link['href'])
                    v_img = v_link.find('img')['src']
                    v_len = self._format(
                        v.find('div',
                               class_='op-short-video-pc-duration-wrap').text)
                    v_from = self._format(
                        v.find('div', class_='op-short-video-pc-clamp1').text)
                    row_res.append({
                        'title': v_title,
                        'url': v_url,
                        'cover': v_img,
                        'length': v_len,
                        'origin': v_from
                    })
                video_results += row_res
        else:
            video_results = []
        # 一个一个append相关搜索
        for _ in _related:
            if _.text:
                related.append(_.text)
        # 预处理百科
        baike = soup.find('div', class_='c-container', tpl='bk_polysemy')
        if baike:
            b_title = self._format(baike.find('h3').text)
            b_url = baike.find('a')['href']
            b_des = self._format(
                baike.find('div', class_='c-span-last').find('p').text)
            try:
                b_cover = baike.find('div',
                                     class_='c-span6').find('img')['src']
                b_cover_type = 'image'
            except (TypeError, AttributeError):
                try:
                    b_cover = baike.find(
                        'video', class_='op-bk-polysemy-video')['data-src']
                    b_cover_type = 'video'
                except TypeError:
                    b_cover = None
                    b_cover_type = None
            baike = {
                'title': b_title,
                'url': b_url,
                'des': b_des,
                'cover': b_cover,
                'cover-type': b_cover_type
            }
        # 加载搜索结果总数
        if num != 0:
            pre_results.append(dict(type='total', result=num))
        # 加载运算
        if calc:
            pre_results.append(
                dict(type='calc',
                     process=str(
                         calc.find('p',
                                   class_='op_new_val_screen_process').find(
                                       'span').text),
                     result=str(
                         calc.find('p',
                                   class_='op_new_val_screen_result').find(
                                       'span').text)))
        # 加载相关搜索
        if related:
            pre_results.append(dict(type='related', results=related))
        # 加载资讯
        if news_detail:
            pre_results.append(dict(type='news', results=news_detail))
        # 加载短视频
        if video_results:
            pre_results.append(dict(type='video', results=video_results))
        # 加载百科
        if baike:
            pre_results.append(dict(type='baike', result=baike))
        # 预处理源码
        try:
            soup = BeautifulSoup(
                minify(soup.find_all(id='content_left')[0].prettify(),
                       remove_all_empty_space=True), 'html.parser')
        # 错误处理
        except IndexError:
            return {'results': None, 'total': None}
        results = BeautifulSoup(self._minify(response.text),
                                'html.parser').findAll(class_='c-container')
        res = []
        for result in results:
            des = None
            soup = BeautifulSoup(self._minify(result.prettify()),
                                 'html.parser')
            # 链接
            href = soup.find_all('a', target='_blank')[0].get('href').strip()
            # 标题
            title = self._format(soup.find_all('a', target='_blank')[0].text)
            # 时间
            try:
                time = self._format(
                    soup.find_all('div', class_='c-abstract')[0].find(
                        'span', class_='c-color-gray2').text)
            except (AttributeError, IndexError):
                time = None
            try:
                # 简介
                des = soup.find_all('div', class_='c-abstract')[0].text
                soup = BeautifulSoup(result.prettify(), 'html.parser')
                des = self._format(des).lstrip(str(time)).strip()
            except IndexError:
                try:
                    des = des.replace('\n', '')
                except (UnboundLocalError, AttributeError):
                    des = None
            # 因为百度的链接是加密的了,所以需要一个一个去访问
            # 由于性能原因,分析链接部分暂略
            # if href is not None:
            #     try:
            #         # 由于性能原因,这里设置1秒超时
            #         r = requests.get(href, timeout=1)
            #         href = r.url
            #     except:
            #         # 获取网页失败,默认换回原加密链接
            #         href = href
            #     # 分析链接
            #     if href:
            #         parse = urlparse(href)
            #         domain = parse.netloc
            #         prepath = parse.path.split('/')
            #         path = []
            #         for loc in prepath:
            #             if loc != '':
            #                 path.append(loc)
            #     else:
            #         domain = None
            #         path = None
            try:
                is_not_special = result['tpl'] not in [
                    'short_video_pc', 'sp_realtime_bigpic5', 'bk_polysemy'
                ]
            except KeyError:
                is_not_special = False
            if is_not_special:  # 确保不是特殊类型的结果
                # 获取可见的域名
                try:
                    domain = self._format(
                        result.find('div', class_='c-row').find(
                            'div', class_='c-span-last').find(
                                'div', class_='se_st_footer').find(
                                    'a', class_='c-showurl').text)
                except Exception as error:
                    try:
                        domain = self._format(
                            result.find('div', class_='c-row').find(
                                'div', class_='c-span-last').find(
                                    'p', class_='op-bk-polysemy-move').find(
                                        'span', class_='c-showurl').text)
                    except Exception as error:
                        try:
                            domain = self._format(
                                result.find('div', class_='se_st_footer').find(
                                    'a', class_='c-showurl').text)
                        except:
                            domain = None
            else:
                domain = None
            # 加入结果
            if title and href and is_not_special:
                res.append({
                    'title': title,
                    'des': des,
                    'origin': domain,
                    'url': href,
                    'time': time,
                    'type': 'result'
                })
        soup = BeautifulSoup(text, 'html.parser')
        soup = BeautifulSoup(
            soup.find_all('div', id='page')[0].prettify(), 'html.parser')
        # 分页
        pages_ = soup.find_all('span', class_='pc')
        pages = []
        for _ in pages_:
            pages.append(int(_.text))
        # 设置最终结果
        result = pre_results
        result.extend(res)
        return {
            'results': result,
            # 最大页数
            'total': max(pages)
        }
コード例 #60
0
def run(languages, styles=None) -> str:
    """
    :param languages: A dictionary with language-key -> nginx regex value
    :param styles: A list of styles, or None for all.
    :return A complete Nginx config segment. Use inside a server block or an include file.

    :type languages: Dict[str, str]
    :type styles: Optional[Iterable[str]]
    :rtype str
    """
    jquery_version, jquery_files = get_cdn_files('jquery')
    highlight_version, highlight_files = get_cdn_files('highlight.js')
    line_numbers_version, line_numbers_files = get_cdn_files('highlightjs-line-numbers.js')

    logging.info('jquery v%s: %r', jquery_version, jquery_files)
    logging.info('highlight v%s: %r', highlight_version, highlight_files)
    logging.info('line_numbers v%s: %r', line_numbers_version, line_numbers_files)

    # Cut off 'languages/' and '.min.js'
    possible_languages = {x[10:-7] for x in highlight_files if x.startswith('languages/')}
    # Cut off 'style/' and '.min.css'
    possible_styles = {x[7:-8] for x in highlight_files if x.startswith('styles/')}

    logging.info('Possible languages: %r', possible_languages)
    logging.info('Possible styles: %r', possible_styles)

    if styles is None:
        styles = [*sorted(possible_styles)]
        styles.remove('default')
        styles.insert(0, 'default')

    missing_languages = set(languages.keys()) - possible_languages
    missing_styles = set(styles) - possible_styles

    if missing_languages:
        logging.warning('!!! Missing languages: %r', missing_languages)
    if missing_styles:
        logging.warning('!!! Missing styles: %r', missing_styles)

    scripts = [
        ('jquery', jquery_version, 'jquery.min.js'),
        ('highlight.js', highlight_version, 'highlight.min.js'),
        ('highlightjs-line-numbers.js', line_numbers_version, 'highlightjs-line-numbers.min.js'),
    ]

    logging.info('Creating the HTML...')
    template = string.Template(MAGIC_HTML)
    html = template.substitute(
        css=MINIFIED_CSS,
        js=MINIFIED_JS,
        styles=json.dumps(styles, separators=(',', ':')),
        scripts='\n    '.join(create_script_tag(lib, v, file) for lib, v, file in scripts),
        highlight_version=highlight_version,
    )
    # logging.info('HTML output: \n%s', html)

    if '\'' in html:
        raise ValueError('Single quotes in the HTML. This is a problem!')

    # todo: Ideally add a check here for any $ that is not $uri or $url...

    old_size = len(html)
    html = htmlmin.minify(html, remove_comments=True, remove_empty_space=True, reduce_boolean_attributes=True, remove_optional_attribute_quotes=True)
    new_size = len(html)
    logging.info('Minified HTML: %d -> %d', old_size, new_size)
    if new_size > 4096-20:  # -20 for the rest of the option line.
        raise ValueError('Minified HTML longer than 4096-20 characters. Nginx will not load it.')

    location_gen = ('location ~* %s { if ($arg_raw) {break;} set $lang %s; try_files @highlight @highlight; }' % (regex, language)
                    for language, regex in languages.items() if language not in missing_languages)

    logging.info('Creating the config snippet...')
    return '\n'.join(filter(None, (
        '# NginxSourceViewer',
        '# -----------------',
        '# Requested languages: ' + ', '.join('%s: %s' % (k, v) for k, v in languages.items()),
        '# Requested styles: ' + ', '.join(styles),
        '# Missing languages: ' + (', '.join('%s: %s' % (k, v) for k, v in languages.items() if k in missing_languages) if missing_languages else 'None'),
        '# Missing styles: ' + (', '.join(missing_styles) if missing_styles else 'None'),
        *location_gen,
        'location @highlight {',
        '    if (!-f $request_filename) {',
        '        return 404;',
        '    }',
        '    charset UTF-8;',
        '    override_charset on;',
        '    source_charset UTF-8;',
        '    default_type text/html;',
        '    add_header Content-Type text/html;',
        '    return 200 \'%s\';' % html,
        '}',
    )))