예제 #1
0
def test_authors_in_cache():
    create_cache(drop=True, file=test_cache)
    # Variables
    expected_auth = ["53164702100", "57197093438"]
    search_auth = ["55317901900"]
    # Test empty cache
    df1 = pd.DataFrame(expected_auth, columns=["auth_id"], dtype="int64")
    incache, tosearch = authors_in_cache(df1, file=test_cache)
    expected_cols = ['auth_id', 'eid', 'surname', 'initials', 'givenname',
                     'affiliation', 'documents', 'affiliation_id', 'city',
                     'country', 'areas']
    expected_auth = [int(au) for au in expected_auth]
    assert_equal(tosearch, expected_auth)
    assert_equal(len(incache), 0)
    assert_equal(incache.columns.tolist(), expected_cols)
    # Test partial retrieval
    q = "AU-ID({})".format(') OR AU-ID('.join([str(a) for a in expected_auth]))
    res = pd.DataFrame(AuthorSearch(q).authors, dtype="int64")
    res["auth_id"] = res["eid"].str.split("-").str[-1]
    res = res[expected_cols]
    cache_insert(res, table="authors", file=test_cache)
    df2 = pd.DataFrame(expected_auth + search_auth, columns=["auth_id"],
                       dtype="int64")
    incache, tosearch = authors_in_cache(df2, file=test_cache)
    assert_equal(tosearch, [55317901900])
    assert_equal(len(incache), 2)
    # Test full retrieval
    incache, tosearch = authors_in_cache(df1, file=test_cache)
    assert_equal(tosearch, [])
    assert_equal(len(incache), 2)
예제 #2
0
def test_sources_afids_in_sources_cache():
    create_cache(drop=True, file=test_cache)
    # Variables
    expected_sources = [22900]
    expected_years = [2010, 2005]
    df = pd.DataFrame(list(product(expected_sources, expected_years)),
                      columns=["source_id", "year"], dtype="int64")
    # Populate cache
    res = query_year(expected_years[0], expected_sources, False, False, afid=True)
    cache_insert(res, table="sources", file=test_cache)
    # Retrieve from cache
    sources_ys_incache, sources_ys_search = sources_in_cache(df, file=test_cache)
    expected_sources = [int(s) for s in expected_sources]
    assert_equal(sources_ys_incache.source_id.tolist(), expected_sources)
    assert_equal(sources_ys_incache.year.tolist(), [expected_years[0]])
    assert_equal(sources_ys_search.source_id.tolist(), expected_sources)
    assert_equal(sources_ys_search.year.tolist(), [expected_years[1]])
예제 #3
0
def test_sources_afids_in_cache_partial():
    create_cache(drop=True, file=test_cache)
    # Variables
    expected_sources = [22900]
    expected_years = [2010, 2005]
    df = pd.DataFrame(list(product(expected_sources, expected_years)),
                      columns=["source_id", "year"], dtype="int64")
    sa_incache, sa_search = sources_in_cache(df, file=test_cache, afid=True)
    # Populate cache
    res = query_year(expected_years[0], expected_sources, False, False, afid=True)
    cache_insert(res, table="sources_afids", file=test_cache)
    # Retrieve from cache
    sa_incache, sa_search = sources_in_cache(df, file=test_cache, afid=True)
    expected_sources = set([int(s) for s in expected_sources])
    assert_equal(set(sa_incache.source_id.tolist()), set(expected_sources))
    assert_equal(set(sa_incache.year.tolist()), set([expected_years[0]]))
    assert_equal(set(sa_search.source_id.tolist()), set(expected_sources))
    assert_equal(set(sa_search.year.tolist()), set([expected_years[1]]))
    expected = range(182-5, 182+5)
    assert_true(len(sa_incache) in expected)
    assert_true(len(sa_incache.afid.drop_duplicates()) in expected)
예제 #4
0
파일: queries.py 프로젝트: fagan2888/sosia
def query_author_data(authors_list, refresh=False, verbose=False):
    """Wrapper function to search author data for a list of authors, searching
    first in cache and then via stacked search.

    Parameters
    ----------
    authors_list : list
       List of Scopus Author IDs to search.

    refresh : bool (optional, default=False)
        Whether to refresh scopus cached files if they exist, or not.

    verbose : bool (optional)
        Whether to print information on the search progress.

    Returns
    -------
    authors_data : DataFrame
        A dataframe with authors data from AuthorSearch for the list provided.
    """
    authors = pd.DataFrame(authors_list, columns=["auth_id"], dtype="int64")
    # merge existing data in cache and separate missing records
    auth_done, auth_missing = authors_in_cache(authors)
    if auth_missing:
        params = {
            "group": auth_missing,
            "res": [],
            "refresh": refresh,
            "joiner": ") OR AU-ID(",
            "q_type": "author",
            "template": Template("AU-ID($fill)")
        }
        if verbose:
            print("Pre-filtering...")
            params.update({"total": len(auth_missing)})
        res, _ = stacked_query(**params)
        res = pd.DataFrame(res)
        cache_insert(res, table="authors")
        auth_done, _ = authors_in_cache(authors)
    return auth_done
예제 #5
0
def test_author_year_in_cache():
    create_cache(drop=True, file=test_cache)
    # Variables
    expected_auth = ["53164702100", "57197093438"]
    search_auth = ["55317901900"]
    year = 2016
    # Test empty cache
    df1 = pd.DataFrame(expected_auth, columns=["auth_id"],
                       dtype="int64")
    df1["year"] = year
    auth_y_incache, auth_y_search = author_year_in_cache(df1, file=test_cache)
    assert_frame_equal(auth_y_search, df1)
    assert_equal(len(auth_y_incache), 0)
    # Test partial retrieval
    fill = ') OR AU-ID('.join([str(a) for a in expected_auth])
    q = "(AU-ID({})) AND PUBYEAR BEF {}".format(fill, year+1)
    res = build_dict(ScopusSearch(q).results, expected_auth)
    res = pd.DataFrame.from_dict(res, orient="index", dtype="int64")
    res["year"] = year
    cols = ["year", "first_year", "n_pubs", "n_coauth"]
    res = res[cols].reset_index().rename(columns={"index": "auth_id"})
    cache_insert(res, table="author_year", file=test_cache)
    df2 = pd.DataFrame(expected_auth + search_auth,
                       columns=["auth_id"], dtype="int64")
    df2["year"] = year
    auth_y_incache, auth_y_search = author_year_in_cache(df2, file=test_cache)
    expected_auth = [int(au) for au in expected_auth]
    search_auth = [int(au) for au in search_auth]
    assert_equal(sorted(auth_y_incache.auth_id.tolist()), expected_auth)
    assert_equal(auth_y_incache.year.tolist(), [year, year])
    assert_equal(auth_y_search.auth_id.tolist(), search_auth)
    assert_equal(auth_y_search.year.tolist(), [year])
    # Test full retrieval
    auth_year_incache, auth_year_search = author_year_in_cache(df1, file=test_cache)
    assert_equal(sorted(auth_year_incache.auth_id.tolist()), expected_auth)
    assert_equal(auth_year_incache.year.tolist(), [year, year])
    assert_true(auth_year_search.empty)
예제 #6
0
def test_author_size_in_cache():
    create_cache(drop=True, file=test_cache)
    # Variables
    expected_auth = 53164702100
    expected_years = [2010, 2017]
    pubs1 = 0
    pubs2 = 6
    cols = ["auth_id", "year"]
    df = pd.DataFrame(list(product([expected_auth], expected_years)),
                      columns=cols, dtype="int64")
    # Test empty cache
    size = author_size_in_cache(df, file=test_cache)
    assert_equal(len(size), 0)
    assert_true(isinstance(size, pd.DataFrame))
    # Test adding to and retrieving from cache
    tp1 = (expected_auth, expected_years[0], pubs1)
    cache_insert(tp1, table="author_size", file=test_cache)
    tp2 = (expected_auth, expected_years[1], pubs2)
    cache_insert(tp2, table="author_size", file=test_cache)
    size = author_size_in_cache(df, file=test_cache)
    assert_equal(len(size), 2)
    assert_frame_equal(size[cols], df)
    assert_equal(size[size.year == expected_years[0]]["n_pubs"][0], pubs1)
    assert_equal(size[size.year == expected_years[1]]["n_pubs"][1], pubs2)
예제 #7
0
def search_group_from_sources(self, stacked, verbose, refresh=False):
    """Define groups of authors based on publications from a set of sources.

    Parameters
    ----------
    self : sosia.Original
        The object of the Scientist to search information for.

    verbose : bool (optional, default=False)
        Whether to report on the progress of the process.

    refresh : bool (optional, default=False)
        Whether to refresh cached search files.

    Returns
    -------
    today, then, negative : set
        Set of authors publishing in three periods: During the year of
        treatment, during years to match on, and during years before the
        first publication.
    """
    # Filtering variables
    min_year = self.first_year - self.year_margin
    max_year = self.first_year + self.year_margin
    if self.period:
        _margin_setter = self.publications_period
    else:
        _margin_setter = self.publications
    max_pubs = max(margin_range(len(_margin_setter), self.pub_margin))
    years = list(range(min_year, max_year + 1))
    search_years = [min_year - 1]
    if not self._ignore_first_id:
        search_years.extend(range(min_year, max_year + 1))
    search_sources, _ = zip(*self.search_sources)

    # Verbose variables
    n = len(search_sources)
    text = "Searching authors for search_group in {} sources...".format(n)
    custom_print(text, verbose)
    today = set()
    then = set()
    negative = set()

    if stacked:  # Make use of SQL cache
        # Year provided (select also based on location)
        # Get already cached sources from cache
        sources_ay = DataFrame(list(product(search_sources,
                                            [self.active_year])),
                               columns=["source_id", "year"])
        _, _search = sources_in_cache(sources_ay, refresh=refresh, afid=True)
        res = query_year(self.active_year,
                         _search.source_id.tolist(),
                         refresh,
                         verbose,
                         afid=True)
        cache_insert(res, table="sources_afids")
        sources_ay, _ = sources_in_cache(sources_ay,
                                         refresh=refresh,
                                         afid=True)
        # Authors publishing in provided year and locations
        mask = None
        if self.search_affiliations:
            mask = sources_ay.afid.isin(self.search_affiliations)
        today = flat_set_from_df(sources_ay, "auids", mask)
        # Years before active year
        # Get already cached sources from cache
        sources_ys = DataFrame(list(product(search_sources, search_years)),
                               columns=["source_id", "year"])
        _, sources_ys_search = sources_in_cache(sources_ys, refresh=refresh)
        missing_years = set(sources_ys_search.year.tolist())
        # Eventually add information for missing years to cache
        for y in missing_years:
            mask = sources_ys_search.year == y
            _sources_search = sources_ys_search[mask].source_id.tolist()
            res = query_year(y, _sources_search, refresh, verbose)
            cache_insert(res, table="sources")
        # Get full cache
        sources_ys, _ = sources_in_cache(sources_ys, refresh=False)
        # Authors publishing in year(s) of first publication
        if not self._ignore_first_id:
            mask = sources_ys.year.between(min_year, max_year, inclusive=True)
            then = flat_set_from_df(sources_ys, "auids", mask)
        # Authors with publications before
        mask = sources_ys.year < min_year
        negative = flat_set_from_df(sources_ys, "auids", mask)
    else:
        auth_count = []
        print_progress(0, n, verbose)
        for i, source_id in enumerate(search_sources):
            info = query_journal(source_id, [self.active_year] + years,
                                 refresh)
            today.update(info[str(self.active_year)])
            if not self._ignore_first_id:
                for y in years:
                    then.update(info[str(y)])
            for y in range(int(min(info.keys())), min_year):
                negative.update(info[str(y)])
            for y in info:
                if int(y) <= self.active_year:
                    auth_count.extend(info[str(y)])
            print_progress(i + 1, n, verbose)
        c = Counter(auth_count)
        negative.update({a for a, npub in c.items() if npub > max_pubs})

    return today, then, negative
예제 #8
0
def filter_pub_counts(group,
                      ybefore,
                      yupto,
                      npapers,
                      yfrom=None,
                      verbose=False):
    """Filter authors based on restrictions in the number of
    publications in different periods, searched by query_size.

    Parameters
    ----------
    group : list of str
        Scopus IDs of authors to be filtered.

    ybefore : int
        Year to be used as first year. Publications on this year and before
        need to be 0.

    yupto : int
        Year up to which to count publications.

    npapers : list
        List of count of publications, minimum and maximum.

    yfrom : int (optional, default=None)
        If provided, publications are counted only after this year.
        Publications are still set to 0 before ybefore.

    Returns
    -------
    group : list of str
        Scopus IDs filtered.

    pubs_counts : list of int
        List of count of publications within the period provided for authors
        in group.

    older_authors : list of str
        Scopus IDs filtered out because have publications before ybefore.

    Notes
    -----
    It uses cached values first, and searches for more data if needed.
    """
    group = [int(x) for x in group]
    years_check = [ybefore, yupto]
    if yfrom:
        years_check.extend([yfrom - 1])
    authors = DataFrame(list(product(group, years_check)),
                        columns=["auth_id", "year"],
                        dtype="int64")
    authors_size = author_size_in_cache(authors)
    au_skip = []
    group_tocheck = [x for x in group]
    older_authors = []
    pubs_counts = []
    # use information in cache
    if not authors_size.empty:
        # authors that can be already removed because older
        mask = ((authors_size.year <= ybefore) & (authors_size.n_pubs > 0))
        remove = (authors_size[mask]["auth_id"].drop_duplicates().tolist())
        older_authors.extend(remove)
        au_remove = [x for x in remove]
        # remove if number of pubs in year is in any case too small
        mask = ((authors_size.year >= yupto) &
                (authors_size.n_pubs < min(npapers)))
        remove = (authors_size[mask]["auth_id"].drop_duplicates().tolist())
        au_remove.extend(remove)
        # authors with no pubs before min year
        mask = (((authors_size.year == ybefore) & (authors_size.n_pubs == 0)))
        au_ok_miny = (authors_size[mask]["auth_id"].drop_duplicates().tolist())
        # check publications in range
        if yfrom:
            # adjust count by substracting the count before period; keep
            # only authors for which it is possible
            mask = (authors_size.year == yfrom - 1)
            authors_size_bef = authors_size[mask]
            authors_size_bef["year"] = yupto
            authors_size_bef.columns = ["auth_id", "year", "n_pubs_bef"]
            bef_auth = set(authors_size_bef["auth_id"])
            mask = ((authors_size["auth_id"].isin(bef_auth)) &
                    (authors_size["year"] == yupto))
            authors_size = authors_size[mask]
            authors_size = authors_size.merge(authors_size_bef,
                                              "left",
                                              on=["auth_id", "year"])
            authors_size = authors_size.fillna(0)
            authors_size["n_pubs"] -= authors_size["n_pubs_bef"]
        # authors that can be already removed because of pubs count
        mask = (((authors_size.year >= yupto) &
                 (authors_size.n_pubs < min(npapers))) |
                ((authors_size.year <= yupto) &
                 (authors_size.n_pubs > max(npapers))))
        remove = (authors_size[mask]["auth_id"].drop_duplicates().tolist())
        au_remove.extend(remove)
        # authors with pubs count within the range before the given year
        mask = (((authors_size.year == yupto) &
                 (authors_size.n_pubs >= min(npapers))) &
                (authors_size.n_pubs <= max(npapers)))
        au_ok_year = authors_size[mask][["auth_id",
                                         "n_pubs"]].drop_duplicates()
        # authors ok (match both conditions)
        au_ok = list(set(au_ok_miny).intersection(set(au_ok_year["auth_id"])))
        mask = au_ok_year["auth_id"].isin(au_ok)
        pubs_counts = au_ok_year[mask]["n_pubs"].tolist()
        # authors that match only the first condition, but the second is
        # not known, can skip the first cindition check.
        au_skip = [x for x in au_ok_miny if x not in au_remove + au_ok]
        group = [x for x in group if x not in au_remove]
        group_tocheck = [x for x in group if x not in au_skip + au_ok]
    text = "Left with {} authors based on size information already in "\
           "cache.\n{} to check\n".format(len(group), len(group_tocheck))
    custom_print(text, verbose)
    # Verify that publications before minimum year are 0
    if group_tocheck:
        text = "Searching through characteristics of {:,} authors...".format(
            len(group_tocheck))
        custom_print(text, verbose)
        print_progress(0, len(group_tocheck), verbose)
        to_loop = [x for x in group_tocheck]  # Temporary copy
        for i, au in enumerate(to_loop):
            q = "AU-ID({}) AND PUBYEAR BEF {}".format(au, ybefore + 1)
            size = base_query("docs", q, size_only=True)
            tp = (au, ybefore, size)
            cache_insert(tp, table="author_size")
            print_progress(i + 1, len(to_loop), verbose)
            if not size == 0:
                group.remove(au)
                group_tocheck.remove(au)
                older_authors.append(au)
        text = "Left with {} authors based on size information before "\
               "minium year\n Filtering based on size query before "\
               "provided year\n".format(len(group))
        custom_print(text, verbose)
    # Verify that publications before the given year falle in range
    group_tocheck.extend(au_skip)
    n = len(group_tocheck)
    if group_tocheck:
        text = "Searching through characteristics of {:,} authors".format(n)
        custom_print(text, verbose)
        print_progress(0, n, verbose)
        for i, au in enumerate(group_tocheck):
            q = "AU-ID({}) AND PUBYEAR BEF {}".format(au, yupto + 1)
            n_pubs_yupto = base_query("docs", q, size_only=True)
            tp = (au, yupto, n_pubs_yupto)
            cache_insert(tp, table="author_size")
            # Eventually decrease publication count
            if yfrom and n_pubs_yupto >= min(npapers):
                q = "AU-ID({}) AND PUBYEAR BEF {}".format(au, yfrom)
                n_pubs_yfrom = base_query("docs", q, size_only=True)
                tp = (au, yfrom - 1, n_pubs_yfrom)
                cache_insert(tp, table="author_size")
                n_pubs_yupto -= n_pubs_yfrom
            if n_pubs_yupto < min(npapers) or n_pubs_yupto > max(npapers):
                group.remove(au)
            else:
                pubs_counts.append(n_pubs_yupto)
            print_progress(i + 1, n, verbose)
    return group, pubs_counts, older_authors
예제 #9
0
파일: sosia.py 프로젝트: fagan2888/sosia
    def find_matches(self,
                     stacked=False,
                     verbose=False,
                     stop_words=STOPWORDS,
                     information=True,
                     refresh=False,
                     **tfidf_kwds):
        """Find matches within search_group based on four criteria:
        1. Started publishing in about the same year
        2. Has about the same number of publications in the year of treatment
        3. Has about the same number of coauthors in the year of treatment
        4. Has about the same number of citations in the year of treatment
        5. Works in the same field as the scientist's main field

        Parameters
        ----------
        stacked : bool (optional, default=False)
            Whether to combine searches in few queries or not.  Cached
            files will most likely not be resuable.  Set to True if you
            query in distinct fields or you want to minimize API key usage.

        verbose : bool (optional, default=False)
            Whether to report on the progress of the process.

        stop_words : list (optional, default=STOPWORDS)
            A list of words that should be filtered in the analysis of
            abstracts.  Default list is the list of english stopwords
            by nltk, augmented with numbers and interpunctuation.

        information : bool or iterable (optional, default=True)
            Whether to return additional information on the matches that may
            help in the selection process.  If an iterable of keywords is
            provied, only return information for these keywords.  Allowed
            values are "first_year", "num_coauthors", "num_publications",
            "num_citations", "country", "language",
            "reference_sim", "abstract_sim".

        refresh : bool (optional, default=False)
            Whether to refresh cached search files.

        tfidf_kwds : keywords
            Parameters to pass to TfidfVectorizer from the sklearn package
            for abstract vectorization.  Not used when `information=False` or
            or when "abstract_sim" is not in `information`.  See
            https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html
            for possible values.

        Returns
        -------
        matches : list
            A list of Scopus IDs of scientists matching all the criteria (if
            information is False) or a list of namedtuples with the Scopus ID
            and additional information (if information is True).

        Raises
        ------
        ValueError
            If information is not bool and contains invalid keywords.
        """
        # Checks
        info_keys = [
            "first_name", "surname", "first_year", "num_coauthors",
            "num_publications", "num_citations", "num_coauthors_period",
            "num_publications_period", "num_citations_period", "subjects",
            "country", "affiliation_id", "affiliation", "language",
            "reference_sim", "abstract_sim"
        ]
        if isinstance(information, bool):
            if information:
                keywords = info_keys
            elif self.search_affiliations:
                information = True
                keywords = ["affiliation_id"]
            else:
                keywords = None
        else:
            keywords = information
            invalid = [x for x in keywords if x not in info_keys]
            if invalid:
                text = ("Parameter information contains invalid keywords: ",
                        ", ".join(invalid))
                raise ValueError(text)
            if self.search_affiliations and "affiliation_id" not in keywords:
                keywords.append("affiliation_id")
        # Variables
        _years = range(self.first_year - self.year_margin,
                       self.first_year + self.year_margin + 1)
        if self.period:
            _npapers = margin_range(len(self.publications_period),
                                    self.pub_margin)
            _ncits = margin_range(self.citations_period, self.cits_margin)
            _ncoauth = margin_range(len(self.coauthors_period),
                                    self.coauth_margin)
            _npapers_full = margin_range(len(self.publications),
                                         self.pub_margin)
            _ncits_full = margin_range(self.citations, self.cits_margin)
            _ncoauth_full = margin_range(len(self.coauthors),
                                         self.coauth_margin)
        else:
            _npapers = margin_range(len(self.publications), self.pub_margin)
            _ncits = margin_range(self.citations, self.cits_margin)
            _ncoauth = margin_range(len(self.coauthors), self.coauth_margin)
        n = len(self.search_group)
        text = "Searching through characteristics of {:,} authors".format(n)
        custom_print(text, verbose)

        # First round of filtering: minimum publications and main field
        # create df of authors
        authors = query_author_data(self.search_group, verbose=verbose)
        same_field = (authors.areas.str.startswith(self.main_field[1]))
        enough_pubs = (authors.documents.astype(int) >= int(min(_npapers)))
        group = authors[same_field & enough_pubs]["auth_id"].tolist()
        group.sort()
        n = len(group)
        text = "Left with {} authors\nFiltering based on provided "\
               "conditions...".format(n)
        custom_print(text, verbose)

        # Second round of filtering:
        # Check having no publications before minimum year, and if 0, the
        # number of publications in the relevant period.
        params = {
            "group": group,
            "ybefore": min(_years) - 1,
            "yupto": self.year,
            "npapers": _npapers,
            "yfrom": self.year_period,
            "verbose": verbose
        }
        group, _, _ = filter_pub_counts(**params)
        # Also screen out ids with too many publications over the full period
        if self.period:
            params.update({
                "npapers": [1, max(_npapers_full)],
                "yfrom": None,
                "group": group
            })
            group, _, _ = filter_pub_counts(**params)

        # Third round of filtering: citations (in the FULL period).
        authors = pd.DataFrame({"auth_id": group, "year": self.year})
        _, authors_cits_search = author_cits_in_cache(authors)
        text = "Search and filter based on count of citations\n{} to search "\
               "out of {}\n".format(len(authors_cits_search), len(group))
        custom_print(text, verbose)
        if not authors_cits_search.empty:
            authors_cits_search['n_cits'] = 0
            print_progress(0, len(authors_cits_search), verbose)
            for i, au in authors_cits_search.iterrows():
                q = "REF({}) AND PUBYEAR BEF {} AND NOT AU-ID({})".format(
                    au['auth_id'], self.year + 1, au['auth_id'])
                n = base_query("docs", q, size_only=True)
                authors_cits_search.at[i, 'n_cits'] = n
                print_progress(i + 1, len(authors_cits_search), verbose)
            cache_insert(authors_cits_search, table="author_cits_size")
        auth_cits_incache, _ = author_cits_in_cache(
            authors[["auth_id", "year"]])
        # keep if citations are in range
        mask = ((auth_cits_incache.n_cits <= max(_ncits)) &
                (auth_cits_incache.n_cits >= min(_ncits)))
        if self.period:
            mask = ((auth_cits_incache.n_cits >= min(_ncits)) &
                    (auth_cits_incache.n_cits <= max(_ncits_full)))
        group = (auth_cits_incache[mask]['auth_id'].tolist())

        # Fourth round of filtering: Download publications, verify coauthors
        # (in the FULL period) and first year.
        n = len(group)
        text = "Left with {} authors\nFiltering based on coauthors "\
               "number...".format(n)
        custom_print(text, verbose)
        authors = pd.DataFrame({
            "auth_id": group,
            "year": self.year
        },
                               dtype="uint64")
        _, author_year_search = author_year_in_cache(authors)
        matches = []
        if stacked:  # Combine searches
            if not author_year_search.empty:
                q = Template(
                    "AU-ID($fill) AND PUBYEAR BEF {}".format(self.year + 1))
                auth_year_group = author_year_search.auth_id.tolist()
                params = {
                    "group": auth_year_group,
                    "res": [],
                    "template": q,
                    "refresh": refresh,
                    "joiner": ") OR AU-ID(",
                    "q_type": "docs"
                }
                if verbose:
                    params.update({"total": len(auth_year_group)})
                res, _ = stacked_query(**params)
                res = build_dict(res, auth_year_group)
                if res:
                    # res can become empty after build_dict if a au_id is old
                    res = pd.DataFrame.from_dict(res, orient="index")
                    res["year"] = self.year
                    res = res[["year", "first_year", "n_pubs", "n_coauth"]]
                    res.index.name = "auth_id"
                    res = res.reset_index()
                    cache_insert(res, table="author_year")
            author_year_cache, _ = author_year_in_cache(authors)
            if self._ignore_first_id:
                # only number of coauthors should be big enough
                enough = (author_year_cache.n_coauth >= min(_ncoauth))
                notoomany = (author_year_cache.n_coauth <= max(_ncoauth_full))
                mask = enough & notoomany
            elif self.period:
                # number of coauthors should be "big enough" and first year in
                # window
                same_start = (author_year_cache.first_year.between(
                    min(_years), max(_years)))
                enough = (author_year_cache.n_coauth >= min(_ncoauth))
                notoomany = (author_year_cache.n_coauth <= max(_ncoauth_full))
                mask = same_start & enough & notoomany
            else:
                # all restrictions apply
                same_start = (author_year_cache.first_year.between(
                    min(_years), max(_years)))
                same_coauths = (author_year_cache.n_coauth.between(
                    min(_ncoauth), max(_ncoauth)))
                mask = same_start & same_coauths
            matches = author_year_cache[mask]["auth_id"].tolist()
        else:  # Query each author individually
            for i, au in enumerate(group):
                print_progress(i + 1, len(group), verbose)
                res = base_query("docs",
                                 "AU-ID({})".format(au),
                                 refresh=refresh)
                res = [
                    p for p in res
                    if p.coverDate and int(p.coverDate[:4]) <= self.year
                ]
                # Filter
                min_year = int(min([p.coverDate[:4] for p in res]))
                authids = [p.author_ids for p in res if p.author_ids]
                authors = set([a for p in authids for a in p.split(";")])
                n_coauth = len(authors) - 1  # Subtract 1 for focal author
                if self._ignore_first_id and (n_coauth < max(_ncoauth)):
                    # only number of coauthors should be big enough
                    continue
                elif (self.period and ((n_coauth < max(_ncoauth)) or
                                       (min_year not in _years))):
                    # number of coauthors should be "big enough" and first year
                    # in window
                    continue
                elif ((len(res) not in _npapers) or (min_year not in _years)
                      or (n_coauth not in _ncoauth)):
                    continue
                matches.append(au)

        if self.period:
            text = "Left with {} authors\nFiltering based on exact period "\
                   "citations and coauthors...".format(len(matches))
            custom_print(text, verbose)
            # Further screen matches based on period cits and coauths
            to_loop = [m for m in matches]  # temporary copy
            for m in to_loop:
                q = "AU-ID({})".format(m)
                res = base_query("docs",
                                 "AU-ID({})".format(m),
                                 refresh=refresh,
                                 fields=["eid", "author_ids", "coverDate"])
                pubs = [
                    p for p in res if int(p.coverDate[:4]) <= self.year
                    and int(p.coverDate[:4]) >= self.year_period
                ]
                coauths = set(get_authors(pubs)) - {str(m)}
                if not (min(_ncoauth) <= len(coauths) <= max(_ncoauth)):
                    matches.remove(m)
                    continue
                eids_period = [p.eid for p in pubs]
                cits = count_citations(search_ids=eids_period,
                                       pubyear=self.year + 1,
                                       exclusion_key="AU-ID",
                                       exclusion_ids=[str(m)])
                if not (min(_ncits) <= cits <= max(_ncits)):
                    matches.remove(m)
        text = "Found {:,} author(s) matching all criteria".format(
            len(matches))
        custom_print(text, verbose)

        # Possibly add information to matches
        if keywords and len(matches) > 0:
            custom_print("Providing additional information...", verbose)
            profiles = [
                Scientist([str(a)],
                          self.year,
                          period=self.period,
                          refresh=refresh) for a in matches
            ]
            matches = inform_matches(profiles, self, keywords, stop_words,
                                     verbose, refresh, **tfidf_kwds)
        if self.search_affiliations:
            matches = [
                m for m in matches if len(
                    set(m.affiliation_id.replace(" ", "").split(";")).
                    intersection([str(a) for a in self.search_affiliations]))
            ]
        return matches