Exemplo n.º 1
0
def update_num_comment(action, boker_key, current_user):
    """ Update number of comment per Boker """

    boker = Boker.get(boker_key)
    if boker:
        if action=='inc_comment':
            boker.num_comment += 1
            boker.put()

            # Notify Boker owner
            notify_user = True
            if current_user:
                if current_user['id'] == boker.user.id:
                    notify_user = False

            if notify_user and not settings.DEBUG:
                try:
                    boker_url = encode_url("/boker/%s" % boker.key().id())
                    graph = facebook.GraphAPI(settings.FACEBOOK_APP_ACCESS_TOKEN)
                    graph.request('%s/notifications' % boker.user.id,
                                   post_args={
                                            'href': '?to='+boker_url,
                                            'template': 'Ada komentar baru untuk boker anda.',
                                            })
                except Exception as e:
                    print e
            else:
                logging.info('Runtask: notify new comment...')

        if action=='dec_comment':
            if boker.num_comment > 0:
                boker.num_comment -= 1
                boker.put()
Exemplo n.º 2
0
def institution_detail(iid):
    institution_cursor = g.conn.execute(text("""
    SELECT *
    FROM Institutions I
    WHERE I.iid = :iid
    """), iid=iid)
    details = institution_cursor.fetchone()
    authors_cursor = g.conn.execute(text("""
        SELECT A.aid, A.first_name, A.last_name
        FROM Authors A
        INNER JOIN Works_At WA ON WA.aid = A.aid
        INNER JOIN Institutions I ON I.iid = WA.iid
        WHERE I.iid = :iid
        ORDER BY A.last_name
        """), iid=iid)
    authors = authors_cursor.fetchall()
    p_cursor = g.conn.execute(text("""
        SELECT DISTINCT P.title, P.purl, P.number_of_citations
        FROM Papers P
        INNER JOIN Published_By PB ON PB.purl = P.purl
        INNER JOIN Authors A ON PB.aid = A.aid
        INNER JOIN Works_At WA ON WA.aid = A.aid
        INNER JOIN Institutions I ON I.iid = WA.iid
        WHERE I.iid = :iid
        ORDER BY P.number_of_citations DESC
        """), iid=iid)
    papers = []
    for p in p_cursor:
        papers.append({'title': p.title, 'purl': utils.encode_url(p.purl), 'citations': p.number_of_citations})
    context = {'details': details, 'authors': authors, 'papers': papers}
    institution_cursor.close()
    authors_cursor.close()
    p_cursor.close()
    return render_template('institution_details.html', **context)
Exemplo n.º 3
0
def search_term(search):
    string_match = '%%' + search.upper().replace(' ', '%%') + '%%'
    cursor = g.conn.execute(text("""WITH FullTable AS
          (SELECT P.purl, P.title, P.model, K.keyword, A.first_name,
          A.last_name, I.type, I.name, I.country, I.city
          FROM Papers P
          LEFT OUTER JOIN Is_Related_To IRT ON P.purl = IRT.purl
          LEFT OUTER JOIN Keywords K ON IRT.keyword = K.keyword
          LEFT OUTER JOIN Published_By PB ON P.purl = PB.purl
          LEFT OUTER JOIN Authors A ON PB.aid = A.aid
          LEFT OUTER JOIN Works_At WA ON WA.aid = A.aid
          LEFT OUTER JOIN Institutions I ON I.iid = WA.iid)
          SELECT DISTINCT FT.title, FT.purl
          FROM FullTable FT
          WHERE
          upper(FT.title) LIKE :string_match OR
          upper(FT.model) LIKE :string_match  OR
          upper(FT.keyword) LIKE :string_match OR
          upper(FT.first_name) LIKE :string_match OR
          upper(FT.last_name) LIKE :string_match OR
          upper(FT.name) LIKE :string_match OR
          upper(FT.type) LIKE :string_match OR
          upper(FT.country) LIKE :string_match OR
          upper(FT.city) LIKE :string_match;"""), string_match=string_match)
    results = []
    for r in cursor:
        results.append({'title': r.title, 'purl': utils.encode_url(r.purl)})
    return results
Exemplo n.º 4
0
def advanced():
    search_form = AdvancedSearchForm()
    if search_form.validate_on_submit():
        if search_form.title:
            cursor = g.conn.execute(text("""
      WITH FullTable AS
      (SELECT P.purl, P.title, P.model, P.number_of_citations, R.programming_language, K.keyword, A.first_name,
      A.last_name, I.type, I.name, I.country, I.city, I.street, I.street_number, I.zip, R.rdate_published, P.date_published
      FROM Papers P
      LEFT OUTER JOIN Published_On PO ON P.purl = PO.purl
      LEFT OUTER JOIN Repositories R ON PO.url = R.url
      LEFT OUTER JOIN Is_Related_To IRT ON PO.purl = IRT.purl
      LEFT OUTER JOIN Keywords K ON IRT.keyword = K.keyword
      LEFT OUTER JOIN Published_By PB ON P.purl = PB.purl
      LEFT OUTER JOIN Authors A ON PB.aid = A.aid
      LEFT OUTER JOIN Works_At WA ON WA.aid = A.aid
      LEFT OUTER JOIN Institutions I ON I.iid = WA.iid)
      SELECT DISTINCT FT.title, FT.purl, FT.programming_language, FT.rdate_published
      FROM FullTable FT
      WHERE
      upper(FT.title) LIKE '%%' || :title || '%%' AND
      upper(FT.model) LIKE '%%' || :model || '%%' AND
      FT.date_published >= :pdate AND 
      FT.number_of_citations >= :citations AND
      upper(FT.first_name) LIKE '%%' || :first || '%%' AND
      upper(FT.last_name) LIKE '%%' || :last || '%%' AND
      upper(FT.name) LIKE '%%' || :institution || '%%' AND
      FT.type IN :insttype AND
      upper(FT.country) LIKE '%%' || :instcountry || '%%' AND
      upper(FT.city) LIKE '%%' || :instcity || '%%' AND
      upper(FT.zip) LIKE '%%' || :instzip || '%%' AND
      upper(FT.street) LIKE '%%' || :inststreet || '%%' AND
      upper(FT.street_number) LIKE '%%' || :instno || '%%';
      """), title=search_form.title.data.upper(), model=search_form.model.data.upper(),
            pdate=str(
                search_form.published_year.data if search_form.published_year.data else 1900) + '01' + '01',
            citations=search_form.minimum_citations.data if search_form.minimum_citations.data else 0,
            prog=search_form.repo_programming_language.data.upper(),
            rdate=str(
                search_form.repo_published_year.data if search_form.repo_published_year.data else 1900) + '01' + '01',
            first=search_form.author_first_name.data.upper(),
            last=search_form.author_last_name.data.upper(), institution=search_form.inst_name.data.upper(),
            insttype=tuple(search_form.inst_type.data.split(' ')),
            instcountry=search_form.inst_country.data.upper(), instcity=search_form.inst_city.data.upper(),
            instzip=search_form.inst_zip.data.upper(),
            inststreet=search_form.inst_street.data.upper(), instno=search_form.inst_street_no.data.upper())
        results = []
        for r in cursor:
            if (search_form.repo_programming_language.data != '' and
                    (not r.programming_language or
                    r.programming_language.upper() != search_form.repo_programming_language.data.upper())):
                continue
            if (search_form.repo_published_year.data is not None and
                    (not r.rdate_published or
                    r.rdate_published.year < search_form.repo_published_year.data)):
                continue
            results.append({'title': r.title, 'purl': utils.encode_url(r.purl)})
        return render_template('advancedsearch.html', results=results)
    return render_template('advanced.html', form=search_form)
async def search(query, obj_type='track', limit=5):
    if not var.spotify_token or time() > var.spotify_token_expires:
        await authorize()
    data = {'type': obj_type, 'limit': limit, 'q': query}
    headers = {'Authorization': f'Bearer {var.spotify_token}'}
    r = await request_get(encode_url(
        'https://api.spotify.com/v1/search', data=data), headers=headers)
    json = await r.json()
    return [AttrDict(track) for track in json['tracks']['items']]
Exemplo n.º 6
0
def index(request):
    context = RequestContext(request)

    category_list = Category.objects.order_by('-likes')[:5]
    pages_list = Page.objects.order_by('-views')[:5]
    context_dict = {'categories':category_list,
                    'pages':pages_list}

    for category in category_list:
        category.url = encode_url(category.name)

    return render_to_response('rango/index.html', context_dict, context)
Exemplo n.º 7
0
async def search(obj='track', q=''):
	encoded_url = utils.encode_url(f'{api_url}/search/{obj}/', {'q': q, 'limit': 50})
	results = await (await var.session.get(encoded_url)).json()
	try:
		if obj == 'artist':
			result = [Artist(result) for result in results['data']]
		elif obj == 'album':
			result = [Album(result) for result in results['data']]
		else:
			result = [AttrDict(result) for result in results['data']]
		return result or []
	except KeyError:
		print(obj)
Exemplo n.º 8
0
	async def search(self, query, obj_type='track', limit=5):
		if self.expires_in < time():
			self.restart()
		data = {'type': obj_type, 'limit': limit, 'q': query}
		headers = {'Authorization': f'Bearer {self.token}'}
		r = await var.session.get(encode_url(
			'https://api.spotify.com/v1/search', data=data), headers=headers)
		json = await r.json(content_type=None)
		result = []
		if json['tracks']['total'] != 0:
			for item in json['tracks']['items']:
				result.append(AttrDict(item))
		return result
Exemplo n.º 9
0
def create_html_attachment_index(attachments):
    """ Creates a HTML list for a list of attachments.

    :param attachments: List of attachments.
    :returns: Attachment list as HTML.
    """
    html_content = '\n\n<h2>Attachments</h2>'
    if len(attachments) > 0:
        html_content += '<ul>\n'
        for attachment in attachments:
            relative_file_path = '/'.join(attachment['file_path'].split('/')[2:])
            relative_file_path = utils.encode_url(relative_file_path)
            html_content += '\t<li><a href="%s">%s</a></li>\n' % (relative_file_path, attachment['file_name'])
        html_content += '</ul>\n'
    return html_content
Exemplo n.º 10
0
def create_html_index(index_content):
    """ Creates an HTML index (mainly to navigate through the exported pages).

    :param index_content: Dictionary which contains file paths, page titles and their children recursively.
    :returns: Content index as HTML.
    """
    file_path = utils.encode_url(index_content['file_path'])
    page_title = index_content['page_title']
    page_children = index_content['child_pages']

    html_content = '<a href="%s">%s</a>' % (utils.sanitize_for_filename(file_path), page_title)

    if len(page_children) > 0:
        html_content += '<ul>\n'
        for child in page_children:
            html_content += '\t<li>%s</li>\n' % create_html_index(child)
        html_content += '</ul>\n'

    return html_content
Exemplo n.º 11
0
def index():
    global USER
    search_form = SearchForm()
    recommendations = recommender()
    models = g.conn.execute("""
    SELECT DISTINCT P.model FROM Papers P;
    """).fetchall()
    applications = ['security', 'finance', 'medicine', 'recognition']
    context = {
        'title': 'Home',
        'search_form': search_form,
        'user': USER['first_name'] if USER else '',
        'recommendations': recommendations,
        'models': models,
        'applications': applications,
    }
    if search_form.validate_on_submit():
        return redirect('/results/' + utils.encode_url(search_form.searchTerms.data))
    return render_template('index.html', **context)
Exemplo n.º 12
0
def like_boker(user_key, boker_key, explicitly_shared=False):
    """Like a boker"""

    user = User.get_by_key_name(user_key)
    boker = Boker.get(boker_key)
    boker_owner = boker.user

    if user and boker and not Like.already_like(user, boker):
        # Create like
        like = Like(user=user, boker=boker)
        like.put()

        # Update like numbers
        boker.num_like += 1
        boker.put()

        if not settings.DEBUG:
            # Post to FB
            try:
                boker_url = "%s/boker/%s" % (settings.APP_DOMAIN, boker.key().id())
                graph = facebook.GraphAPI(user.access_token)
                graph.request('me/og.likes',
                        post_args={'object': boker_url, 'fb:explicitly_shared': str(explicitly_shared).lower()})
            except Exception as e:
                print e

            # Notify Boker owner
            if user != boker_owner:
                try:
                    boker_url = encode_url("/boker/%s" % boker.key().id())
                    graph = facebook.GraphAPI(settings.FACEBOOK_APP_ACCESS_TOKEN)
                    graph.request('%s/notifications' % boker_owner.id,
                                   post_args={
                                            'href': '?to=' + boker_url,
                                            'template': '@[%s] menyukai boker anda.' % user.id,
                                            })
                except Exception as e:
                    print e

        else:
            logging.info('Runtask: post_like_story...')
Exemplo n.º 13
0
def author_detail(aid):
    cursor = g.conn.execute(text("""
    SELECT A.first_name, A.last_name, I.name as inst_name, I.iid
    FROM Authors A INNER JOIN Works_At WA ON WA.aid = A.aid
    INNER JOIN Institutions I ON I.iid = WA.iid
    WHERE A.aid = :aid
    """), aid=aid)
    details = cursor.fetchone()
    cursor = g.conn.execute(text("""
    SELECT DISTINCT P.purl, P.title, P.number_of_citations
    FROM Authors A INNER JOIN Published_By PB ON A.aid = PB.aid
    INNER JOIN Papers P ON P.purl = PB.purl
    WHERE A.aid = :aid
    ORDER BY P.number_of_citations DESC
    """), aid=aid)
    papers = []
    for p in cursor:
        new_p = {'purl': utils.encode_url(p.purl), 'title': p.title, 'citations': p.number_of_citations}
        papers.append(new_p)
    context = {'author': details, 'papers': papers}
    return render_template('author_details.html', **context)
Exemplo n.º 14
0
def recommender():
    global USER
    if not USER:
        return []
    s = text("""
    SELECT P1.title, P1.purl
    FROM Papers P1 NATURAL JOIN Is_Related_To I 
    WHERE P1.purl = I.purl AND I.keyword IN (SELECT IR.keyword FROM Is_Related_To IR WHERE IR.purl IN (SELECT P.purl
    FROM Papers P NATURAL JOIN Have_Read HR
    WHERE P.purl = HR.purl AND HR.user_name = :user_name))
    EXCEPT 
    SELECT SUB.title, SUB.purl
    FROM (SELECT * FROM Papers P2 NATURAL JOIN Have_Read HR2 
    WHERE P2.purl = HR2.purl AND HR2.user_name = :user_name
    ORDER BY HR2.date) AS SUB
    LIMIT 3;
    """)
    cursor = g.conn.execute(s, user_name=USER['user_name'])
    recommendations = []
    for r in cursor:
        recommendations.append({'title': r.title, 'purl': utils.encode_url(r.purl)})
    return recommendations
Exemplo n.º 15
0
def my_account():
    global USER
    if not USER:
        return redirect('/login')
    user_cursor = g.conn.execute(text("""
    SELECT *
    FROM Users U
    WHERE U.user_name = :username;
    """), username=USER['user_name'])
    user = user_cursor.fetchone()
    history_cursor = g.conn.execute(text("""
    SELECT P.purl, P.title, HR.date
    FROM Have_Read HR INNER JOIN Papers P ON P.purl = HR.purl
    WHERE HR.user_name = :username
    ORDER BY HR.date DESC
    """), username=USER['user_name'])
    recommendations = recommender()
    history = []
    for h in history_cursor:
        h = {'title': h.title, 'purl': utils.encode_url(h.purl), 'date': h.date}
        history.append(h)
    context = {'user': user, 'history': history, 'recommendations': recommendations}
    return render_template("my_account.html", **context)
Exemplo n.º 16
0
 def post(self):
     to = self.request.get('to', encode_url('/'))
     # self.redirect(self.uri_for('onfb') + '?next=' + to)
     self.redirect(decode_url(to))
Exemplo n.º 17
0
def handle_html_references(html_content,
                           page_duplicate_file_names,
                           page_file_matching,
                           depth=0):
    """ Repairs links in the page contents with local links.

    :param html_content: Confluence HTML content.
    :param page_duplicate_file_names: A dict in the structure {'<sanitized filename>': amount of duplicates}
    :param page_file_matching: A dict in the structure {'<page title>': '<used offline filename>'}
    :param depth: (optional) Hierarchy depth of the handled Confluence page.
    :returns: Fixed HTML content.
    """
    try:
        html_tree = html.fromstring(html_content)
    except ParserError:
        print('page is empty')
        return html_content
    except XMLSyntaxError:
        print(
            '%sWARNING: Could not parse HTML content of last page. Original content will be downloaded as it is.'
            % ('\t' * (depth + 1)))
        return html_content

    # Fix links to other Confluence pages
    # Example: /display/TES/pictest1
    #       => pictest1.html
    # TODO: This code does not work for "Recent space activity" areas in space pages because of a different url format.
    xpath_expr = '//a[contains(@href, "/display/")]'
    for link_element in html_tree.xpath(xpath_expr):
        if not link_element.get('class'):
            page_title = link_element.attrib['href'].split('/')[3]
            page_title = page_title.replace('+', ' ')
            decoded_page_title = utils.decode_url(page_title)
            offline_link = provide_unique_file_name(
                page_duplicate_file_names,
                page_file_matching,
                decoded_page_title,
                explicit_file_extension='html')
            link_element.attrib['href'] = utils.encode_url(offline_link)

    # Fix links to other Confluence pages when page ids are used
    xpath_expr = '//a[contains(@href, "/pages/viewpage.action?pageId=")]'
    for link_element in html_tree.xpath(xpath_expr):
        if not link_element.get('class'):
            page_id = link_element.attrib['href'].split(
                '/pages/viewpage.action?pageId=')[1]
            offline_link = '%s.html' % utils.sanitize_for_filename(page_id)
            link_element.attrib['href'] = utils.encode_url(offline_link)

    # Fix attachment links
    xpath_expr = '//a[contains(@class, "confluence-embedded-file")]'
    for link_element in html_tree.xpath(xpath_expr):
        file_url = link_element.attrib['href']
        file_name = derive_downloaded_file_name(file_url)
        relative_file_path = '%s/%s' % (settings.DOWNLOAD_SUB_FOLDER,
                                        file_name)
        #link_element.attrib['href'] = utils.encode_url(relative_file_path)
        link_element.attrib['href'] = relative_file_path

    # Fix file paths for img tags
    # TODO: Handle non-<img> tags as well if necessary.
    # TODO: Support files with different versions as well if necessary.
    possible_image_xpaths = [
        '//img[contains(@src, "/download/")]',
        '//img[contains(@src, "/rest/documentConversion/latest/conversion/thumbnail/")]'
    ]
    xpath_expr = '|'.join(possible_image_xpaths)
    for img_element in html_tree.xpath(xpath_expr):
        # Replace file path
        file_url = img_element.attrib['src']
        file_name = derive_downloaded_file_name(file_url)
        relative_file_path = '%s/%s' % (settings.DOWNLOAD_SUB_FOLDER,
                                        file_name)
        img_element.attrib['src'] = relative_file_path

        # Add alt attribute if it does not exist yet
        if not 'alt' in img_element.attrib.keys():
            img_element.attrib['alt'] = relative_file_path

    return html.tostring(html_tree)
Exemplo n.º 18
0
def fetch_page_recursively(page_id,
                           folder_path,
                           download_folder,
                           html_template,
                           depth=0,
                           page_duplicate_file_names=None,
                           page_file_matching=None,
                           attachment_duplicate_file_names=None,
                           attachment_file_matching=None):
    """ Fetches a Confluence page and its child pages (with referenced downloads).

    :param page_id: Confluence page id.
    :param folder_path: Folder to place downloaded pages in.
    :param download_folder: Folder to place downloaded files in.
    :param html_template: HTML template used to export Confluence pages.
    :param depth: (optional) Hierarchy depth of the handled Confluence page.
    :param page_duplicate_file_names: A dict in the structure {'<sanitized page filename>': amount of duplicates}
    :param page_file_matching: A dict in the structure {'<page title>': '<used offline filename>'}
    :param attachment_duplicate_file_names: A dict in the structure {'<sanitized attachment filename>': amount of \
                                            duplicates}
    :param attachment_file_matching: A dict in the structure {'<attachment title>': '<used offline filename>'}
    :returns: Information about downloaded files (pages, attachments, images, ...) as a dict (None for exceptions)
    """
    if not page_duplicate_file_names:
        page_duplicate_file_names = {}
    if not page_file_matching:
        page_file_matching = {}
    if not attachment_duplicate_file_names:
        attachment_duplicate_file_names = {}
    if not attachment_file_matching:
        attachment_file_matching = {}

    page_url = '%s/rest/api/content/%s?expand=children.page,children.attachment,body.view.value' \
               % (settings.CONFLUENCE_BASE_URL, page_id)
    try:
        response = utils.http_get(
            page_url,
            auth=settings.HTTP_AUTHENTICATION,
            headers=settings.HTTP_CUSTOM_HEADERS,
            verify_peer_certificate=settings.VERIFY_PEER_CERTIFICATE,
            proxies=settings.HTTP_PROXIES)
        page_content = response['body']['view']['value']

        page_title = response['title']
        print('%sPAGE: %s (%s)' % ('\t' * (depth + 1), page_title, page_id))

        # Construct unique file name
        file_name = provide_unique_file_name(page_duplicate_file_names,
                                             page_file_matching,
                                             str(page_id),
                                             explicit_file_extension='html')

        # Remember this file and all children
        path_collection = {
            'file_path': file_name,
            'page_title': page_title,
            'child_pages': [],
            'child_attachments': []
        }

        # Download attachments of this page
        # TODO: Outsource/Abstract the following two while loops because of much duplicate code.
        page_url = '%s/rest/api/content/%s/child/attachment?limit=25' % (
            settings.CONFLUENCE_BASE_URL, page_id)
        counter = 0
        while page_url:
            response = utils.http_get(
                page_url,
                auth=settings.HTTP_AUTHENTICATION,
                headers=settings.HTTP_CUSTOM_HEADERS,
                verify_peer_certificate=settings.VERIFY_PEER_CERTIFICATE,
                proxies=settings.HTTP_PROXIES)
            counter += len(response['results'])
            for attachment in response['results']:
                download_url = attachment['_links']['download']
                attachment_id = attachment['id'][3:]
                attachment_info = download_attachment(
                    download_url,
                    download_folder,
                    attachment_id,
                    attachment_duplicate_file_names,
                    attachment_file_matching,
                    depth=depth + 1)
                path_collection['child_attachments'].append(attachment_info)

            if 'next' in response['_links'].keys():
                page_url = response['_links']['next']
                page_url = '%s%s' % (settings.CONFLUENCE_BASE_URL, page_url)
            else:
                page_url = None

        # Export HTML file
        page_content = handle_html_references(page_content,
                                              page_duplicate_file_names,
                                              page_file_matching,
                                              depth=depth + 1)
        file_path = '%s/%s' % (folder_path, file_name)
        page_content += create_html_attachment_index(
            path_collection['child_attachments'])
        utils.write_html_2_file(file_path, page_title, page_content,
                                html_template)

        # Save another file with page id which forwards to the original one
        id_file_path = '%s/%s.html' % (folder_path, page_id)
        id_file_page_title = 'Forward to page %s' % page_title
        original_file_link = utils.encode_url(
            utils.sanitize_for_filename(file_name))
        #  id_file_page_content = settings.HTML_FORWARD_MESSAGE % (original_file_link, page_title)
        #  id_file_forward_header = '<meta http-equiv="refresh" content="0; url=%s" />' % original_file_link
        #  utils.write_html_2_file(id_file_path, id_file_page_title, id_file_page_content, html_template,
        #  additional_headers=[id_file_forward_header])

        # Iterate through all child pages
        page_url = '%s/rest/api/content/%s/child/page?limit=25' % (
            settings.CONFLUENCE_BASE_URL, page_id)
        counter = 0
        while page_url:
            response = utils.http_get(
                page_url,
                auth=settings.HTTP_AUTHENTICATION,
                headers=settings.HTTP_CUSTOM_HEADERS,
                verify_peer_certificate=settings.VERIFY_PEER_CERTIFICATE,
                proxies=settings.HTTP_PROXIES)
            counter += len(response['results'])
            for child_page in response['results']:
                paths = fetch_page_recursively(
                    child_page['id'],
                    folder_path,
                    download_folder,
                    html_template,
                    depth=depth + 1,
                    page_duplicate_file_names=page_duplicate_file_names,
                    page_file_matching=page_file_matching)
                if paths:
                    path_collection['child_pages'].append(paths)

            if 'next' in response['_links'].keys():
                page_url = response['_links']['next']
                page_url = '%s%s' % (settings.CONFLUENCE_BASE_URL, page_url)
            else:
                page_url = None
        return path_collection

    except utils.ConfluenceException as e:
        error_print('%sERROR: %s' % ('\t' * (depth + 1), e))
        return None