예제 #1
0
파일: test.py 프로젝트: jeaf/idxbeast
def test_search(expected_nb_results, conn, query):
    """
    Executes a search on the given connection, and check the result against
    expected data.
    """
    nb_results, cur = core.search(conn, query, expected_nb_results, 0)
    assert_eq(expected_nb_results, nb_results)
    for relev, freq, avg_idx, id, type_, loc, title in cur:
        break
    else:
        assert_fail("At least one result should be available")
예제 #2
0
파일: to_dont.py 프로젝트: fhightower/tasks
def search(query: str, ignore_done_results=True):
    """."""
    results = core.search(query)
    # remove tasks on the 'deleted' list
    updated_results = [
        result for result in results
        if result['metadata'][TO_DONT_METADTA_KEY]['list'] != 'deleted'
    ]
    if ignore_done_results:
        # remove tasks on the 'done' list
        updated_results = [
            result for result in updated_results
            if result['metadata'][TO_DONT_METADTA_KEY]['list'] != 'done'
        ]
    return updated_results
예제 #3
0
    def search_weather(self):

        city = self.CityEdit.text()
        if not city:
            self.statusBar().setStyleSheet("color: red;")
            self.statusBar().showMessage("请输入想要查询的城市名称!")
            return False

        city_code = get_city_code(city)
        if not city_code:
            self.statusBar().setStyleSheet("color: red;")
            self.statusBar().showMessage("没有找到您想要查询的城市名称,请检查名称是否错误!")
            return False

        try:
            response = search(city_code)
            data = parse(response)
            self.WeatherResult.setText("\n".join(data))
        except Exception as error:
            self.statusBar().setStyleSheet("color: red;")
            self.statusBar().showMessage(str(error))
예제 #4
0
def coauthors(name, depth=1, author_repr=None):
    """Build a network of co-authors based on the name provided,
    to a level depth as provided.

    Inputs
    ------
    name : str
        The author name to focus on.

    depth : int
        The number of depth levels to progress from `name`.

    author_repr : callable function
        A function that formats the author name (generally LastName, Firstname I.)

    max_papers : int
        The maximum number of papers to query for a given author.
    """

    try:
        depth = int(depth)
    except TypeError:
        raise TypeError("depth must be an integer-like type")

    if depth < 1:
        raise ValueError("depth must be a positive integer")

    if author_repr is None:
        author_repr = lambda x: x

    all_articles = []
    level_authors = [name]
    for i in xrange(depth):

        next_level_authors = []
        for j, author in enumerate(level_authors):

            # If this is a "Collaboration" or "Team",
            # we should ignore it and just deal with
            # *real* people.

            if "collaboration" in author.lower() \
            or " team" in author.lower():
                continue

            # Get 5 top articles by this author
            articles = search(u'"{author}"'.format(author=author),
                fl="author,citation_count", filter="property:refereed", rows="all",
                order="desc", sort="citations")

            # Add these articles to the list
            all_articles.extend(articles)

            # Put these authors into the next level
            next_level_authors.extend(sum([article.author for article in articles], []))

        level_authors = []
        level_authors.extend(next_level_authors)

    # Initialise a group with the name input
    links = []
    groups = [0]
    values = []
    nodes = [author_repr(name)]

    # Remove article double-ups?

    # Go through all articles
    for group_number, article in enumerate(all_articles, start=1):

        # Make sure each co-author has a node
        for co_author in article.author:

            co_author = author_repr(co_author)
            if co_author not in nodes:

                # Create the node for this author
                groups.append(group_number)
                nodes.append(author_repr(co_author))

        # Links should be drawn between all these article.author's,
        # since they all published together.
        for (author_one, author_two) in itertools.combinations(article.author, 2):
            if author_repr(author_one) == author_repr(author_two):
                continue

            source = nodes.index(author_repr(author_one))
            target = nodes.index(author_repr(author_two))

            link = (source, target)
            knil = (target, source)
            if link not in links and knil not in links:
                links.append(link)
                values.append(1)

            else:
                try:
                    index = links.index(link)
                except:
                    index = links.index(knil)

                values[index] += 1

    # Build formatted nodes and links
    formatted_nodes = []
    for author, group in zip(nodes, groups):
        formatted_nodes.append({"name": author, "group": group})

    formatted_links = []
    for (source, target), value in zip(links, values):
        formatted_links.append({
            "source": source,
            "target": target,
            "value": value
            })

    output = {
        "nodes": formatted_nodes,
        "links": formatted_links
    }

    return output
예제 #5
0
파일: druscan.py 프로젝트: JulienD/DruScan
		exit
	elif o.update_themes:
		update("themes", o.limit)
		message("Update themes. Done." ,core.M_UPDATE,"OK")
		exit
	
	
	
	if o.url != None:
		
		site_version = detect_version(o.url)
		message("Posible version " + str(site_version),core.M_VERSION, "WARNING")
		message("\t" + o.url + "/CHANGELOG.txt",core.M_FILE, "WARNING")
		
		if o.modules:
			search ( o.url, "modules" , version = site_version )
			
		if o.themes:
			search ( o.url, "themes"  , version = site_version )
		
		if o.users:
			if o.limit != 3:
				ulimit = o.limit
			else:
				ulimit = 20
		
			search_users(o.url, ulimit)
		
		if o.paths:
			search_urls(o.url)
			
예제 #6
0
def __twistdl__():

    session = requests.Session()

    parser = argparse.ArgumentParser(
        description="twistdl is a downloader for anime from https://twist.moe."
    )

    parser.add_argument('-s',
                        '--search',
                        required=False,
                        default='',
                        help="Query for searching from Twist APIs.")
    parser.add_argument(
        '--iafl',
        action="store_true",
        help=
        "'I am feeling lucky' mode, that is, the first result (if search is used) will be enqueued for download."
    )

    parser.add_argument('-dl',
                        '--download',
                        required=False,
                        default='',
                        help="Download an anime using slug name.")
    parser.add_argument('-r',
                        '--range',
                        required=False,
                        default='1',
                        help="Range for downloading episodes from.")

    parser.add_argument('-g',
                        '--grab',
                        required=False,
                        default='',
                        help="Grab the stream link(s) without downloading.")
    parser.add_argument('-q',
                        '--quiet',
                        action="store_true",
                        help="Quiet mode, basically for disabling tqdm.")

    parsed = parser.parse_args()
    dl_check = range_check_parser(parsed.range)

    if parsed.search:
        print("Searching for '%s' on Twist:" % parsed.search)

        results = [*core.search(session, parsed.search)]

        if not results:
            print("Could not find anything from that identifier.")

        for index, content in enumerate(results, 1):

            print(("{0:02d}: {title}" + (" ({alt_title})" if content.get('alt_title') else "") + \
                " \x1b[33m{slug[slug]}\x1b[0m").format(index, **content))

        if not parsed.iafl:
            return

        if not results:
            return print(
                "'I am feeling lucky' cannot continue since there are no results."
            )

        first_result = results.pop(0)
        slug = first_result.get('slug', {}).get('slug', '')

        for episode, content in enumerate(get_episodes(session, slug), 1):
            if dl_check(episode):
                cli_download(session, content.get('source'), episode, slug,
                             parsed.quiet)

    if parsed.download:
        slug = slug_converter(parsed.download)
        for episode, content in enumerate(get_episodes(session, slug), 1):
            if dl_check(episode):
                cli_download(session, content.get('source'), episode, slug,
                             parsed.quiet)

    if parsed.grab:
        slug = slug_converter(parsed.grab)
        for episode, content in enumerate(get_episodes(session, slug), 1):
            if dl_check(episode):
                print("Episode {0:02d}: {1}".format(episode,
                                                    content.get('source')))
예제 #7
0
def coauthors(name, depth=1, author_repr=None):
    """Build a network of co-authors based on the name provided,
    to a level depth as provided.

    Inputs
    ------
    name : str
        The author name to focus on.

    depth : int
        The number of depth levels to progress from `name`.

    author_repr : callable function
        A function that formats the author name (generally LastName, Firstname I.)

    max_papers : int
        The maximum number of papers to query for a given author.
    """

    try:
        depth = int(depth)
    except TypeError:
        raise TypeError("depth must be an integer-like type")

    if depth < 1:
        raise ValueError("depth must be a positive integer")

    if author_repr is None:
        author_repr = lambda x: x

    all_articles = []
    level_authors = [name]
    for i in xrange(depth):

        next_level_authors = []
        for j, author in enumerate(level_authors):

            # If this is a "Collaboration" or "Team",
            # we should ignore it and just deal with
            # *real* people.

            if "collaboration" in author.lower() \
            or " team" in author.lower():
                continue

            # Get 5 top articles by this author
            articles = search(u'"{author}"'.format(author=author),
                fl="author,citation_count", filter="property:refereed", rows="all",
                order="desc", sort="citations")

            # Add these articles to the list
            all_articles.extend(articles)

            # Put these authors into the next level
            next_level_authors.extend(sum([article.author for article in articles], []))

        level_authors = []
        level_authors.extend(next_level_authors)

    # Initialise a group with the name input
    links = []
    groups = [0]
    values = []
    nodes = [author_repr(name)]

    # Remove article double-ups?

    # Go through all articles
    for group_number, article in enumerate(all_articles, start=1):

        # Make sure each co-author has a node
        for co_author in article.author:

            co_author = author_repr(co_author)
            if co_author not in nodes:

                # Create the node for this author
                groups.append(group_number)
                nodes.append(author_repr(co_author))

        # Links should be drawn between all these article.author's,
        # since they all published together.
        for (author_one, author_two) in itertools.combinations(article.author, 2):
            if author_repr(author_one) == author_repr(author_two):
                continue

            source = nodes.index(author_repr(author_one))
            target = nodes.index(author_repr(author_two))

            link = (source, target)
            knil = (target, source)
            if link not in links and knil not in links:
                links.append(link)
                values.append(1)

            else:
                try:
                    index = links.index(link)
                except:
                    index = links.index(knil)

                values[index] += 1

    # Build formatted nodes and links
    formatted_nodes = []
    for author, group in zip(nodes, groups):
        formatted_nodes.append({"name": author, "group": group})

    formatted_links = []
    for (source, target), value in zip(links, values):
        formatted_links.append({
            "source": source,
            "target": target,
            "value": value
            })

    output = {
        "nodes": formatted_nodes,
        "links": formatted_links
    }

    return output