Example #1
0
 def test_citation_matching_issue621(self) -> None:
     """Make sure that a citation like 1 Wheat 9 doesn't match 9 Wheat 1"""
     # The fixture contains a reference to 9 F. 1, so we expect no results.
     citation_str = "1 F. 9 (1795)"
     citation = get_citations(citation_str)[0]
     results = resolve_fullcase_citation(citation)
     self.assertEqual(NO_MATCH_RESOURCE, results)
Example #2
0
    def test_make_html_from_html(self) -> None:
        """Can we convert the HTML of an opinion into modified HTML?"""
        # fmt: off

        test_pairs = [
            # Id. citation with HTML tags
            ('<div><p>the improper views of the Legislature.\" 2 <i>id., at '
             '73.</i></p>\n<p>Nathaniel Gorham of Massachusetts</p></div>',
             '<div><p>the improper views of the Legislature." 2 <i><span '
             'class="citation no-link">id., at 73</span>.</i></p>\n<p>'
             'Nathaniel Gorham of Massachusetts</p></div>'),

            # Id. citation with an intervening HTML tag
            #  (We expect the HTML to be unchanged, since it's too risky to
            #   modify with another tag in the way)
            ('<div><p>the improper views of the Legislature.\" 2 <i>id.,</i> '
             'at <b>73, bolded</b>.</p>\n<p>Nathaniel Gorham of Massachusetts'
             '</p></div>',
             '<div><p>the improper views of the Legislature.\" 2 <i>id.,</i> '
             'at <b>73, bolded</b>.</p>\n<p>Nathaniel Gorham of Massachusetts'
             '</p></div>'),

            # Ibid. citation with HTML tags
            ('<div><p>possess any peculiar knowledge of the mere policy of '
             'public measures.\" <i>Ibid.</i> Gerry of Massachusetts '
             'like</p></div>',
             '<div><p>possess any peculiar knowledge of the mere policy of '
             'public measures." <i><span class="citation no-link">Ibid.'
             '</span></i> Gerry of Massachusetts like</p></div>'
            ),
        ]

        # fmt: on
        for s, expected_html in test_pairs:
            with self.subTest(
                "Testing html to html conversion for %s..." % s,
                s=s,
                expected_html=expected_html,
            ):
                opinion = Opinion(html=s)
                get_and_clean_opinion_text(opinion)
                citations = get_citations(opinion.cleaned_text)

                # Stub out fake output from do_resolve_citations(), since the
                # purpose of this test is not to test that. We just need
                # something that looks like what create_cited_html() expects
                # to receive.
                citation_resolutions = {NO_MATCH_RESOURCE: citations}

                created_html = create_cited_html(opinion, citation_resolutions)
                self.assertEqual(
                    created_html,
                    expected_html,
                    msg="\n%s\n\n    !=\n\n%s" % (created_html, expected_html),
                )
Example #3
0
    def test_make_html_from_matched_citation_objects(self) -> None:
        """Can we render matched citation objects as HTML?"""
        # This test case is similar to the two above, except it allows us to
        # test the rendering of citation objects that we assert are correctly
        # matched. (No matching is performed in the previous cases.)
        # fmt: off

        test_pairs = [
            # Id. citation with page number ("Id., at 123, 124")
            ('asdf, Id., at 123, 124. Lorem ipsum dolor sit amet',
             '<pre class="inline">asdf, </pre><span class="citation" data-id="'
             'MATCH_ID"><a href="MATCH_URL">Id., at 123, 124</a></span><pre '
             'class="inline">. Lorem ipsum dolor sit amet</pre>'),

            # Id. citation with complex page number ("Id. @ 123:1, ¶¶ 124")
            ('asdf, Id. @ 123:1, ¶¶ 124. Lorem ipsum dolor sit amet',
             '<pre class="inline">asdf, </pre><span class="citation" data-id='
             '"MATCH_ID"><a href="MATCH_URL">Id.</a></span><pre class='
             '"inline"> @ 123:1, ¶¶ 124. Lorem ipsum dolor sit amet</pre>'),

            # Id. citation without page number ("Id. Something else")
            ('asdf, Id. Lorem ipsum dolor sit amet',
             '<pre class="inline">asdf, </pre><span class="citation" data-id="'
             'MATCH_ID"><a href="MATCH_URL">Id.</a></span><pre class="inline">'
             ' Lorem ipsum dolor sit amet</pre>'),
        ]

        # fmt: on
        for s, expected_html in test_pairs:
            with self.subTest(
                    f"Testing object to HTML rendering for {s}...",
                    s=s,
                    expected_html=expected_html,
            ):
                opinion = Opinion(plain_text=s)
                get_and_clean_opinion_text(opinion)
                citations = get_citations(opinion.cleaned_text)

                # Stub out fake output from do_resolve_citations(), since the
                # purpose of this test is not to test that. We just need
                # something that looks like what create_cited_html() expects
                # to receive. Also make sure that the "matched" opinion is
                # mocked appropriately.
                opinion.pk = "MATCH_ID"
                opinion.cluster = Mock(OpinionCluster(id=24601))
                opinion.cluster.get_absolute_url.return_value = "MATCH_URL"
                citation_resolutions = {opinion: citations}

                created_html = create_cited_html(opinion, citation_resolutions)

                self.assertEqual(
                    created_html,
                    expected_html,
                    msg=f"\n{created_html}\n\n    !=\n\n{expected_html}",
                )
Example #4
0
 def __populate_db_contexts_for_opinion(
         self,
         session: Session,
         opinion: Opinion,
         reporter_resource_dict: dict,
         context_slice=slice(-128, 128),
 ) -> None:
     unstructured_html = opinion.html_text
     if not unstructured_html:
         raise ValueError(f"No HTML for case {opinion.resource_id}")
     unstructured_text = BeautifulSoup(unstructured_html,
                                       features="lxml").text
     clean_text = unstructured_text.replace("U. S.", "U.S.")
     tokenizer = OneTimeTokenizer(self.eyecite_tokenizer)
     citations = list(eyecite.get_citations(clean_text,
                                            tokenizer=tokenizer))
     cited_resources = eyecite.resolve_citations(citations)
     for resource, citation_list in cited_resources.items():
         cited_opinion_res_id = reporter_resource_dict.get(
             format_reporter(
                 resource.citation.groups.get("volume"),
                 resource.citation.groups.get("reporter"),
                 resource.citation.groups.get("page"),
             ))
         if cited_opinion_res_id is None:
             continue
         for citation in citation_list:
             if not isinstance(citation, CaseCitation):
                 continue
             if (citation.metadata.parenthetical is not None
                     and ParentheticalProcessor.is_descriptive(
                         citation.metadata.parenthetical)):
                 session.add(
                     OpinionParenthetical(
                         citing_opinion_id=opinion.resource_id,
                         cited_opinion_id=cited_opinion_res_id,
                         text=ParentheticalProcessor.prepare_text(
                             citation.metadata.parenthetical),
                     ))
             start = max(0, citation.index + context_slice.start)
             stop = min(len(tokenizer.words),
                        citation.index + context_slice.stop)
             session.add(
                 CitationContext(
                     citing_opinion_id=opinion.resource_id,
                     cited_opinion_id=cited_opinion_res_id,
                     text=" ".join([
                         s for s in tokenizer.words[start:stop]
                         if isinstance(s, str)
                     ]),
                 ))
Example #5
0
 def test_identifying_parallel_citations(self) -> None:
     """Given a string, can we identify parallel citations"""
     tests = (
         # A pair consisting of a test string and the number of parallel
         # citations that should be identifiable in that string.
         # Simple case
         ("1 U.S. 1 (22 U.S. 33)", 1, 2),
         # Too far apart
         ("1 U.S. 1 too many words 22 U.S. 33", 0, 0),
         # Three citations
         # ("1 U.S. 1, (44 U.S. 33, 99 U.S. 100)", 1, 3),
         # Parallel citation after a valid citation too early on
         ("1 U.S. 1 too many words, then 22 U.S. 33, 13 WL 33223", 1, 2),
     )
     for q, citation_group_count, expected_num_parallel_citations in tests:
         with self.subTest(
                 f"Testing parallel citation identification for: {q}...",
                 q=q,
                 citation_group_count=citation_group_count,
                 expected_num_parallel_citations=
                 expected_num_parallel_citations,
         ):
             citations = get_citations(q)
             citation_groups = identify_parallel_citations(citations)
             computed_num_citation_groups = len(citation_groups)
             self.assertEqual(
                 computed_num_citation_groups,
                 citation_group_count,
                 msg=
                 "Did not have correct number of citation groups. Got %s, "
                 "not %s." %
                 (computed_num_citation_groups, citation_group_count),
             )
             if not citation_groups:
                 # Add an empty list to make testing easier.
                 citation_groups = [[]]
             computed_num_parallel_citation = len(list(citation_groups)[0])
             self.assertEqual(
                 computed_num_parallel_citation,
                 expected_num_parallel_citations,
                 msg=
                 "Did not identify correct number of parallel citations in "
                 "the group. Got %s, not %s" % (
                     computed_num_parallel_citation,
                     expected_num_parallel_citations,
                 ),
             )
Example #6
0
def get_query_citation(cd: CleanData) -> Optional[List[FullCaseCitation]]:
    """Extract citations from the query string and return them, or return
    None
    """
    if not cd.get("q"):
        return None
    citations = get_citations(cd["q"],
                              do_post_citation=False,
                              do_defendant=False)

    citations = [c for c in citations if isinstance(c, FullCaseCitation)]

    matches = None
    if len(citations) == 1:
        # If it's not exactly one match, user doesn't get special help.
        matches = search_db_for_fullcitation(citations[0])
        if len(matches) == 1:
            # If more than one match, don't show the tip
            return matches.result.docs[0]

    return matches
Example #7
0
    def run_test_pairs(self, test_pairs, message, tokenizers=None):
        def get_comparison_attrs(cite):
            out = {
                "groups": cite.groups,
                "metadata": cite.metadata,
            }
            if isinstance(cite, ResourceCitation):
                out["year"] = cite.year
                out["corrected_reporter"] = cite.corrected_reporter()
            return out

        if tokenizers is None:
            tokenizers = tested_tokenizers
        for q, expected_cites, *kwargs in test_pairs:
            kwargs = kwargs[0] if kwargs else {}
            clean_steps = kwargs.pop("clean", [])
            clean_q = clean_text(q, clean_steps)
            for tokenizer in tokenizers:
                with self.subTest(message,
                                  tokenizer=type(tokenizer).__name__,
                                  q=q):
                    cites_found = get_citations(clean_q,
                                                tokenizer=tokenizer,
                                                **kwargs)
                    self.assertEqual(
                        [type(i) for i in cites_found],
                        [type(i) for i in expected_cites],
                        f"Extracted cite count doesn't match for {repr(q)}",
                    )
                    for a, b in zip(cites_found, expected_cites):
                        found_attrs = get_comparison_attrs(a)
                        expected_attrs = get_comparison_attrs(b)
                        self.assertEqual(
                            found_attrs,
                            expected_attrs,
                            f"Extracted cite attrs don't match for {repr(q)}",
                        )
Example #8
0
    def test_make_html_from_plain_text(self) -> None:
        """Can we convert the plain text of an opinion into HTML?"""
        # fmt: off

        test_pairs = [
            # Simple example for full citations
            ('asdf 22 U.S. 33 asdf',
             '<pre class="inline">asdf </pre><span class="'
             'citation no-link">22 U.S. 33</span><pre class="'
             'inline"> asdf</pre>'),

            # Using a variant format for U.S. (Issue #409)
            ('asdf 22 U. S. 33 asdf',
             '<pre class="inline">asdf </pre><span class="'
             'citation no-link">22 U. S. 33</span><pre class="'
             'inline"> asdf</pre>'),

            # Full citation across line break
            ('asdf John v. Doe, 123\nU.S. 456, upholding foo bar',
             '<pre class="inline">asdf John v. Doe, </pre><span class="'
             'citation no-link">123\nU.S. 456</span><pre class="inline">, '
             'upholding foo bar</pre>'),

            # Basic short form citation
            ('existing text asdf, 515 U.S., at 240. foobar',
             '<pre class="inline">existing text asdf, </pre><span class="'
             'citation no-link">515 U.S., at 240</span><pre class="inline">. '
             'foobar</pre>'),

            # Short form citation with no comma after reporter in original
            ('existing text asdf, 1 U. S. at 2. foobar',
             '<pre class="inline">existing text asdf, </pre><span class="'
             'citation no-link">1 U. S. at 2</span><pre class="inline">. '
             'foobar</pre>'),

            # Short form citation across line break
            ('asdf.’ ” 123 \n U.S., at 456. Foo bar foobar',
             '<pre class="inline">asdf.’ ” </pre><span class="citation '
             'no-link">123 \n U.S., at 456</span><pre class="inline">. Foo '
             'bar foobar</pre>'),

            # First kind of supra citation (standard kind)
            ('existing text asdf, supra, at 2. foobar',
             '<pre class="inline">existing text asdf, </pre><span class="'
             'citation no-link">supra, at 2</span><pre class="inline">. '
             'foobar</pre>'),

            # Second kind of supra citation (with volume)
            ('existing text asdf, 123 supra, at 2. foo bar',
             '<pre class="inline">existing text asdf, 123 </pre><span class="'
             'citation no-link">supra, at 2</span><pre class="inline">. foo '
             'bar</pre>'),

            # Third kind of supra citation (sans page)
            ('existing text asdf, supra, foo bar',
             '<pre class="inline">existing text asdf, </pre><span class="'
             'citation no-link">supra,</span><pre class="inline"> foo bar'
             '</pre>'),

            # Fourth kind of supra citation (with period)
            ('existing text asdf, supra. foo bar',
             '<pre class="inline">existing text asdf, </pre><span class="'
             'citation no-link">supra.</span><pre class="inline"> foo bar'
             '</pre>'),

            # Supra citation across line break
            ('existing text asdf, supra, at\n99 (quoting foo)',
             '<pre class="inline">existing text asdf, </pre><span class="'
             'citation no-link">supra, at\n99</span><pre class="inline"> '
             '(quoting foo)</pre>'),

            # Id. citation ("Id., at 123")
            ('asdf, id., at 123. Lorem ipsum dolor sit amet',
             '<pre class="inline">asdf, </pre><span class="citation no-link">'
             'id., at 123</span><pre class="inline">. Lorem ipsum dolor sit '
             'amet</pre>'),

            # Duplicate Id. citation
            ('asd, id., at 123. Lo rem ip sum. asdf, id., at 123. Lo rem ip.',
             '<pre class="inline">asd, </pre><span class="citation no-link">'
             'id., at 123</span><pre class="inline">. Lo rem ip sum. asdf, '
             '</pre><span class="citation no-link">id., at 123</span><pre '
             'class="inline">. Lo rem ip.</pre>'),

            # Id. citation across line break
            ('asdf." Id., at 315.\n       Lorem ipsum dolor sit amet',
             '<pre class="inline">asdf." </pre><span class="citation '
             'no-link">Id., at 315</span><pre class="inline">.\n       Lorem '
             'ipsum dolor sit amet</pre>'),

            # Ibid. citation ("... Ibid.")
            ('asdf, Ibid. Lorem ipsum dolor sit amet',
             '<pre class="inline">asdf, </pre><span class="citation no-link">'
             'Ibid.</span><pre class="inline"> Lorem ipsum dolor sit amet'
             '</pre>'),

            # NonopinionCitation
            ('Lorem ipsum dolor sit amet. U.S. Code §3617. Foo bar.',
             '<pre class="inline">Lorem ipsum dolor sit amet. U.S. Code </pre>'
             '<span class="citation no-link">§3617.</span><pre class="inline">'
             ' Foo bar.</pre>'),
        ]

        # fmt: on
        for s, expected_html in test_pairs:
            with self.subTest(
                    f"Testing plain text to html conversion for {s}...",
                    s=s,
                    expected_html=expected_html,
            ):
                opinion = Opinion(plain_text=s)
                get_and_clean_opinion_text(opinion)
                citations = get_citations(opinion.cleaned_text)

                # Stub out fake output from do_resolve_citations(), since the
                # purpose of this test is not to test that. We just need
                # something that looks like what create_cited_html() expects
                # to receive.
                citation_resolutions = {NO_MATCH_RESOURCE: citations}

                created_html = create_cited_html(opinion, citation_resolutions)
                self.assertEqual(
                    created_html,
                    expected_html,
                    msg=f"\n{created_html}\n\n    !=\n\n{expected_html}",
                )
Example #9
0
    def test_annotate(self):
        def straighten_quotes(text):
            return text.replace("’", "'")

        def lower_annotator(before, text, after):
            return before + text.lower() + after

        test_pairs = (
            # single cite
            ("1 U.S. 1", "<0>1 U.S. 1</0>", []),
            # cite with extra text
            ("foo 1 U.S. 1 bar", "foo <0>1 U.S. 1</0> bar", []),
            # cite with punctuation
            ("foo '1 U.S. 1' bar", "foo '<0>1 U.S. 1</0>' bar", []),
            # law cite
            (
                "foo. Mass. Gen. Laws ch. 1, § 2. bar",
                "foo. <0>Mass. Gen. Laws ch. 1, § 2</0>. bar",
                [],
            ),
            # journal cite
            (
                "foo. 1 Minn. L. Rev. 2. bar",
                "foo. <0>1 Minn. L. Rev. 2</0>. bar",
                [],
            ),
            # Id. cite
            (
                "1 U.S. 1. Foo. Id. Bar. Id. at 2.",
                "<0>1 U.S. 1</0>. Foo. <1>Id.</1> Bar. <2>Id. at 2</2>.",
                [],
            ),
            # Supra cite
            (
                "1 U.S. 1. Foo v. Bar, supra at 2.",
                "<0>1 U.S. 1</0>. Foo v. Bar, <1>supra at 2</1>.",
                [],
            ),
            # whitespace and html -- no unbalanced tag check
            (
                "<body>foo  <i>1   <b>U.S.</b></i>   1 bar</body>",
                "<body>foo  <i><0>1   <b>U.S.</b></i>   1</0> bar</body>",
                ["html", "inline_whitespace"],
            ),
            # whitespace and html -- skip unbalanced tags
            (
                "foo  <i>1 U.S.</i> 1; 2 <i>U.S.</i> 2",
                "foo  <i>1 U.S.</i> 1; <1>2 <i>U.S.</i> 2</1>",
                ["html", "inline_whitespace"],
                {
                    "unbalanced_tags": "skip"
                },
            ),
            # whitespace and html -- wrap unbalanced tags
            (
                "<i>1 U.S.</i> 1; 2 <i>U.S.</i> 2",
                "<i><0>1 U.S.</0></i><0> 1</0>; <1>2 <i>U.S.</i> 2</1>",
                ["html", "inline_whitespace"],
                {
                    "unbalanced_tags": "wrap"
                },
            ),
            # tighly-wrapped html -- skip unbalanced tags (issue #54)
            (
                "foo <i>Ibid.</i> bar",
                "foo <i><0>Ibid.</0></i> bar",
                ["html", "inline_whitespace"],
                {
                    "unbalanced_tags": "skip"
                },
            ),
            # whitespace containing linebreaks
            ("1\nU.S. 1", "<0>1\nU.S. 1</0>", ["all_whitespace"]),
            # multiple Id. tags
            (
                "1 U.S. 1. Id. 2 U.S. 2. Id.",
                "<0>1 U.S. 1</0>. <1>Id.</1> <2>2 U.S. 2</2>. <3>Id.</3>",
                [],
            ),
            # replacement in cleaners
            (
                "1 Abbott’s Pr.Rep. 1",
                "<0>1 Abbott’s Pr.Rep. 1</0>",
                [straighten_quotes],
            ),
            # custom annotator
            (
                "1 U.S. 1",
                "<0>1 u.s. 1</0>",
                [],
                {
                    "annotator": lower_annotator
                },
            ),
        )
        for source_text, expected, clean_steps, *annotate_kwargs in test_pairs:
            annotate_kwargs = annotate_kwargs[0] if annotate_kwargs else {}
            with self.subTest(
                    source_text,
                    clean_steps=clean_steps,
                    annotate_args=annotate_kwargs,
            ):
                plain_text = clean_text(source_text, clean_steps)
                cites = get_citations(plain_text)
                annotations = [(c.span(), f"<{i}>", f"</{i}>")
                               for i, c in enumerate(cites)]
                annotated = annotate(
                    plain_text,
                    annotations,
                    source_text=source_text,
                    **annotate_kwargs,
                )
                self.assertEqual(annotated, expected)
Example #10
0
def find_citations_for_opinion_by_pks(
    self,
    opinion_pks: List[int],
    index: bool = True,
) -> None:
    """Find citations for search.Opinion objects.

    :param opinion_pks: An iterable of search.Opinion PKs
    :param index: Whether to add the item to Solr
    :return: None
    """
    opinions: List[Opinion] = Opinion.objects.filter(pk__in=opinion_pks)
    for opinion in opinions:
        # Memoize parsed versions of the opinion's text
        get_and_clean_opinion_text(opinion)

        # Extract the citations from the opinion's text
        citations: List[CitationBase] = get_citations(opinion.cleaned_text)

        # If no citations are found, continue
        if not citations:
            continue

        # Resolve all those different citation objects to Opinion objects,
        # using a variety of heuristics.
        try:
            citation_resolutions: Dict[
                MatchedResourceType,
                List[SupportedCitationType]] = do_resolve_citations(
                    citations, opinion)
        except ResponseNotReady as e:
            # Threading problem in httplib, which is used in the Solr query.
            raise self.retry(exc=e, countdown=2)

        # Generate the citing opinion's new HTML with inline citation links
        opinion.html_with_citations = create_cited_html(
            opinion, citation_resolutions)

        # Delete the unmatched citations
        citation_resolutions.pop(NO_MATCH_RESOURCE, None)

        # Increase the citation count for the cluster of each matched opinion
        # if that cluster has not already been cited by this opinion. First,
        # calculate a list of the IDs of every opinion whose cluster will need
        # updating.
        all_cited_opinions = opinion.opinions_cited.all().values_list(
            "pk", flat=True)
        opinion_ids_to_update = set()
        for _opinion in citation_resolutions.keys():
            if _opinion.pk not in all_cited_opinions:
                opinion_ids_to_update.add(_opinion.pk)

        # Finally, commit these changes to the database in a single
        # transcation block. Trigger a single Solr update as well, if
        # required.
        with transaction.atomic():
            opinion_clusters_to_update = OpinionCluster.objects.filter(
                sub_opinions__pk__in=opinion_ids_to_update)
            opinion_clusters_to_update.update(
                citation_count=F("citation_count") + 1)
            if index:
                add_items_to_solr.delay(
                    opinion_clusters_to_update.values_list("pk", flat=True),
                    "search.OpinionCluster",
                )

            # Nuke existing citations
            OpinionsCited.objects.filter(citing_opinion_id=opinion.pk).delete()

            # Create the new ones.
            OpinionsCited.objects.bulk_create([
                OpinionsCited(
                    citing_opinion_id=opinion.pk,
                    cited_opinion_id=_opinion.pk,
                    depth=len(_citations),
                ) for _opinion, _citations in citation_resolutions.items()
            ])

            # Save all the changes to the citing opinion (send to solr later)
            opinion.save(index=False)

    # If a Solr update was requested, do a single one at the end with all the
    # pks of the passed opinions
    if index:
        add_items_to_solr.delay(opinion_pks, "search.Opinion")
Example #11
0
    def checkResolution(self, *expected_resolutions: Tuple[Optional[int],
                                                           str]):
        """Helper function to check how a list of citation strings is
        resolved by resolve_citations().

        For example, suppose we want to check
        resolutions for "1 U.S. 1. 1 U.S., at 2. 1 F.2d 1. 2 U.S., at 2.".

        We can call this:
            >>> self.checkResolution(
            ...     (0, "1 U.S. 1."),
            ...     (0, "1 U.S., at 2."),
            ...     (1, "1 F.2d 1."),
            ...     (None, "2 U.S., at 2."),
            ... )
        Meaning "1 U.S. 1." and "1 U.S., at 2." should resolve into the first
        Resource, "1 F.2d 1." should resolve into the second Resource, and
        "2 U.S., at 2." shouldn't be included in any Resource.

        checkResolutionList converts the above input to expected_resolution_dict:
            {
                Resource(citation=<1 U.S. 1>): [<1 U.S. 1>, <1 U.S., at 2>],
                Resource(citation=<1 F.2d 1>): [<1 F.2d 1>],
            }

        And then calls:
            self.assertResolution(
                [<1 U.S. 1>, <1 U.S., at 2>, <1 F.2d 1>, <2 U.S., at 2>],
                expected_resolution_dict
            )
        """
        # input we're building for self.assertResolution
        expected_resolution_dict = defaultdict(list)
        citations = []

        # resources we've found so far
        resources: List[Resource] = []

        for i, cite_text in expected_resolutions:
            # extract cite and make sure there's only one:
            cites = get_citations(cite_text)
            self.assertEqual(
                len(cites),
                1,
                f"Failed to find exactly one cite in {repr(cite_text)}",
            )
            cite = cites[0]
            citations.append(cite)

            # skip clustering for cites marked "None":
            if i is None:
                continue

            # make sure resources are numbered consecutively
            if i > len(resources):
                self.fail(
                    f"Invalid row {repr((i, cite_text))}: target index {i} is too high."
                )

            # add each resource when first encountered
            if i == len(resources):
                if not isinstance(cite, FullCitation):
                    self.fail(
                        f"Invalid row {repr((i, cite_text))}: first instance of {i} must be a full cite."
                    )
                resources.append(Resource(citation=cite))

            # add current cite to resource
            expected_resolution_dict[resources[i]].append(cites[0])

        self.assertResolution(citations, expected_resolution_dict)
Example #12
0
 def test_read_case_list_from_eyecite_case_citation(self):
     case_citation = eyecite.get_citations("9 F. Cas. 50")[0]
     cases_again = self.client.read_decision_list_by_cite(
         cite=case_citation)
     assert cases_again[0].name_abbreviation == "Fikes v. Bentley"