Пример #1
0
def get_results(project_name, page=1):
    res = []
    pagination = paginate(Mongo.get()['result_' + project_name].find().sort('_id', -1), page, 20)
    if pagination:
        for doc in pagination.result():
            del doc['_id']
            res.append(doc)

    return json.dumps({'result': res, 'render_json': pagination.render_json(10)})
Пример #2
0
def get_results(project_name, page=1):
    res = []
    pagination = paginate(Mongo.get()["result_" + project_name].find().sort("_id", -1), page, 20)
    if pagination:
        for doc in pagination.result():
            del doc["_id"]
            res.append(doc)

    return json.dumps({"result": res, "render_json": pagination.render_json(10)})
Пример #3
0
def get_results(project_name, page=1):
    res = []
    pagination = paginate(
        Mongo.get()['result_' + project_name].find().sort('_id', -1), page, 20)
    if pagination:
        for doc in pagination.result():
            del doc['_id']
            res.append(doc)

    return json.dumps({
        'result': res,
        'render_json': pagination.render_json(10)
    })
Пример #4
0
def search():
    artist = request.args.get('artist').title()
    # get page from query params or default to first page
    page = request.args.get('page') if request.args.get('page') else 1

    # search by artist name
    split_words = artist.split()

    last_word = split_words[1:]

    prefix = ' '.join(split_words[:-1])

    suggestions = list(root.query(artist))

    full_suggestions = []

    indx = 0

    for suggestion in suggestions:
        full_suggestions.append({
            "id":
            indx + 1,
            "artistName":
            '{}'.format(suggestion[0]).strip(),
            "rank":
            suggestion[1],
        })
        indx = indx + 1

    del indx

    sorted_suggestions = sorted(full_suggestions,
                                key=lambda k: k['rank'],
                                reverse=True)

    # get pager object for specified page
    pager = json.loads(paginate(len(sorted_suggestions), int(page)))

    # get page of items from suggestions
    pageOfItems = sorted_suggestions[pager['startIndex']:pager['endIndex'] + 1]

    resp = jsonify({'pager': pager, 'pageOfItems': pageOfItems})
    resp.headers.add("Access-Control-Allow-Headers", "*")
    resp.status_code = 200
    return resp
Пример #5
0
def api_test():
    try:
        client = MongoClient('localhost', 27017)
        res = client['pyfetch']['result_cnbeta'].find().limit(10)

        # print GlobalHelper.get('salve_record')
        # time.sleep(10)
        pagination = paginate(Mongo.get()['result_cnbeta'].find(), 1, 30)
        if pagination:
            for row in pagination.current_page():
                print row

                # print pagination.next()
                # print pagination.prev()

    except:
        print traceback.format_exc()
    return jsonify({'fd': 1})
Пример #6
0
def api_test():
    try:
        client = MongoClient('localhost', 27017)
        res = client['pyfetch']['result_cnbeta'].find().limit(10)

        # print GlobalHelper.get('salve_record')
        # time.sleep(10)
        pagination = paginate(Mongo.get()['result_cnbeta'].find(), 1, 30)
        if pagination:
            for row in pagination.current_page():
                print row

                # print pagination.next()
                # print pagination.prev()

    except:
        print traceback.format_exc()
    return jsonify({'fd': 1})
Пример #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('file_path', help='path to the CSV input file')
    parser.add_argument('--sort-by',
                        help="name of column to sort by",
                        required=True)
    parser.add_argument('--page-size',
                        type=int,
                        help="number of records per page (minimum 1)",
                        required=True)
    parser.add_argument('--page-num',
                        type=int,
                        help="page number to show (minimum 1)",
                        required=True)
    args = parser.parse_args()

    with open(args.file_path) as f:
        file_text = f.read()
    output_text = paginate(file_text,
                           sort_by=args.sort_by,
                           page_size=args.page_size,
                           page_num=args.page_num)
    print(output_text)
Пример #8
0
def search():
    search_form = Search(request.args)
    voters = Voter.search(search_form.query.data, search_form.type.data)
    context = {"form": search_form}
    context.update(paginate(voters))
    return render_template("voter_list.search.html", **context)
Пример #9
0
def voters_by_precinct(precinct):
    voters = Voter.select().where(registeredprecinct=precinct)
    context = {"precinct": precinct}
    context.update(paginate(voters))
    return render_template("voter_list.precinct.html", **context)
Пример #10
0
    def _get_tree_lines(self):
        tree = self._tree
        depth = 0
        max_depth = depth
        is_last_child = True
        dfs_stack = [(tree.get_node(self._root), depth, is_last_child)]
        lines = []
        siblings_of_selected = None
        children_of_root = self._children(self._tree, self._tree.root)
        children_of_root.sort(self._compare_nodes)
        children_of_root.reverse()
        while dfs_stack:
            node, depth, is_last_child = dfs_stack.pop()
            max_depth = max(depth, max_depth)
            lines.append((node, depth, is_last_child))
            if depth < self._max_allowed_depth:
                children = self._children(tree, node.identifier)
                if node.identifier == self._node_shown_as_selected.bpointer:
                    siblings_of_selected = children
                if children:
                    children.sort(self._compare_nodes)
                    dfs_stack.append((children[0], depth + 1, True))
                    dfs_stack.extend([(child, depth + 1, False)
                                      for child in children[1:]])
        does_parent_in_height_n_has_more_nodes = [False] * (max_depth + 1)
        if siblings_of_selected is None:
            siblings_of_selected = self._get_siblings_of_node(
                tree, self._node_shown_as_selected)
            siblings_of_selected.sort(self._compare_nodes)
        siblings_of_selected.reverse()
        index_of_selected = siblings_of_selected.index(
            self._node_shown_as_selected)
        nr_items_to_remove_at_beginning, nr_items_to_remove_at_end = (
            pagination.paginate(len(siblings_of_selected), index_of_selected,
                                self._max_nr_lines))
        index_in_siblings_of_selected = 0
        for node, depth, is_last_child in lines:
            if not self._including_root and node.identifier == self._tree.root:
                continue
            if node.identifier == self._node_shown_as_selected.identifier:
                color = "blue" if node.identifier in self._picked_nodes else "green"
            elif node.identifier in self._picked_nodes:
                color = "red"
            else:
                color = None
            pre_line = ">" if node.identifier == self._node_shown_as_selected.identifier else " "
            pre_line += " "
            pre_line += "X" if node.identifier in self._picked_nodes else " "
            if hasattr(node, 'original_matching'
                       ) and node.original_matching and self._search_pattern:
                if color is None:
                    color = "yellow"
                pre_line += "~"
            else:
                pre_line += " "
            does_parent_in_height_n_has_more_nodes[depth] = not is_last_child
            lower_depth_marks = ""
            current_depth_marks = ""
            if node.identifier != self._root:
                for lower_depth in xrange(1, depth):
                    if does_parent_in_height_n_has_more_nodes[lower_depth]:
                        lower_depth_marks += "\xe2\x94\x82   "
                    else:
                        lower_depth_marks += "    "
                if is_last_child:
                    current_depth_marks += '\xe2\x94\x94'
                else:
                    if not self._including_root and node == children_of_root[0]:
                        current_depth_marks += '\xe2\x94\x8c'
                    else:
                        current_depth_marks += '\xe2\x94\x9c'
                current_depth_marks += '\xe2\x94\x80' * 2
                current_depth_marks += " "
            prefix = pre_line + lower_depth_marks + current_depth_marks
            if isinstance(node.tag, str):
                tag_lines = [[[node_line, color, False]]
                             for node_line in node.tag.splitlines()]
            else:
                tag_lines = list(node.tag)

            lines = list()
            lines.append((prefix, color, False))
            lines.extend(tag_lines[0])
            lines.append(('\n', None, False))
            non_first_lines_addition = ' ' * len(pre_line) + lower_depth_marks
            if is_last_child:
                non_first_lines_addition += '   '
            else:
                non_first_lines_addition += '\xe2\x94\x82' + '  '
            for tag_line in tag_lines[1:]:
                lines.append((non_first_lines_addition, None, False))
                lines.extend(tag_line)
                lines.append(('\n', None, False))

            if node.bpointer == self._node_shown_as_selected.bpointer:
                index_in_siblings_of_selected += 1
                if nr_items_to_remove_at_beginning and index_in_siblings_of_selected == 1:
                    tag = prefix + "... (%d more)\n" % (
                        nr_items_to_remove_at_beginning)
                    yield tag, None, False
                    continue
                elif index_in_siblings_of_selected <= nr_items_to_remove_at_beginning:
                    continue
                elif (nr_items_to_remove_at_end
                      and index_in_siblings_of_selected
                      == len(siblings_of_selected)):
                    tag = prefix + "... (%d more)\n" % (
                        nr_items_to_remove_at_end)
                    yield tag, None, False
                    continue
                elif index_in_siblings_of_selected > len(
                        siblings_of_selected) - nr_items_to_remove_at_end:
                    continue

            if self._children(
                    tree,
                    node.identifier) and depth == self._max_allowed_depth:
                if len(tag_lines) > 1:
                    addition = '\n' + non_first_lines_addition
                else:
                    addition = ' '
                lines[-1] = (
                    lines[-1][0][:-1] + addition + "(...)" + "\n",
                    lines[-1][1],
                    lines[-1][2],
                )

            for line, color, is_bold in lines:
                yield line, color, is_bold
Пример #11
0
 def process_list(self, data):
     """Processes whole errata_list/errata_schema and returns info."""
     modified_since = data.get("modified_since", "")
     errata_to_process = data.get("errata_list", None)
     errata_to_search = data.get("errata_search", None)
     page = data.get("page", None)
     page_size = data.get("page_size", None)
     response = {"errata_list": {}}
     if modified_since:
         response["modified_since"] = modified_since
     if not errata_to_process and errata_to_search:
         errata_to_process = self._fill_errata(errata_to_search)
     elif not errata_to_process and not errata_to_search:
         return response
     result = {}
     errata_page_to_process, pagination_response = paginate(
         errata_to_process, page, page_size)
     db_connection = self.db_pool.get_connection()
     with db_connection.get_cursor() as cursor:
         cursor.execute(
             """select distinct e.updated, es.name as severity,
                               e.issued, e.description,
                               e.solution, e.summary, e.name as url, e.synopsis,
                               et.name as type
                               from errata e
                               left join errata_severity es on e.severity_id = es.id
                               left join errata_type et on e.errata_type_id = et.id
                               where e.name in %s
                            """, [tuple(errata_page_to_process, )])
         for i, errata in enumerate(cursor):
             result[errata_page_to_process[i]] = {}
             result[errata_page_to_process[i]] = {
                 "updated":
                 errata[ERRATA_UPDATED],
                 "severity":
                 errata[ERRATA_SEVERITY],
                 "reference_list":
                 self._build_references(errata_page_to_process),
                 "issued":
                 errata[ERRATA_ISSUED],
                 "description":
                 errata[ERRATA_DESCRIPTION],
                 "solution":
                 errata[ERRATA_SOLUTION],
                 "summary":
                 errata[ERRATA_SUMMARY],
                 "url":
                 "https://access.redhat.com/errata/%s" %
                 str(errata[ERRATA_URL]),
                 "synopsis":
                 errata[ERRATA_SYNOPSIS],
                 "cve_list":
                 self._build_cve_list(errata_page_to_process),
                 "bugzilla_list":
                 self._build_references(errata_page_to_process,
                                        bugzilla=True),
                 "package_list":
                 self._build_package_list(errata_page_to_process),
                 "source_package_list":
                 self._build_package_list(errata_page_to_process,
                                          source=True),
                 "type":
                 errata[ERRATA_TYPE]
             }
         response["errata_list"].update(result)
     response.update(pagination_response)
     self.db_pool.return_connection(db_connection)
     return response