Ejemplo n.º 1
0
def import_law_box_case(case_path):
    """Open the file, get its contents, convert to XML and extract the meta data.

    Return a document object for saving in the database
    """
    raw_text = open(case_path).read()
    clean_html_tree, complete_html_tree, clean_html_str, body_text = get_html_from_raw_text(
        raw_text)

    sha1 = hashlib.sha1(clean_html_str).hexdigest()
    citations = get_citations_from_tree(complete_html_tree, case_path)
    judges = get_judge(clean_html_tree, case_path)
    court = get_court_object(clean_html_tree, citations, case_path, judges)

    doc = Document(
        source='L',
        sha1=sha1,
        html=clean_html_str,
        # we clear this field later, putting the value into html_lawbox.
        date_filed=get_date_filed(clean_html_tree,
                                  citations=citations,
                                  case_path=case_path,
                                  court=court),
        precedential_status=get_precedential_status(),
        judges=judges,
        download_url=case_path,
    )

    docket = Docket(
        docket_number=get_docket_number(clean_html_tree,
                                        case_path=case_path,
                                        court=court),
        case_name=get_case_name(complete_html_tree, case_path),
        court=court,
    )

    # Necessary for dup_finder.
    path = '//p/text()'
    doc.body_text = ' '.join(clean_html_tree.xpath(path))

    # Add the dict of citations to the object as its attributes.
    citations_as_dict = map_citations_to_models(citations)
    for k, v in citations_as_dict.items():
        setattr(doc, k, v)

    doc.docket = docket

    return doc
Ejemplo n.º 2
0
def import_law_box_case(case_path):
    """Open the file, get its contents, convert to XML and extract the meta data.

    Return a document object for saving in the database
    """
    raw_text = open(case_path).read()
    clean_html_tree, complete_html_tree, clean_html_str, body_text = get_html_from_raw_text(
        raw_text)

    sha1 = hashlib.sha1(clean_html_str).hexdigest()
    citations = get_citations_from_tree(complete_html_tree, case_path)
    judges = get_judge(clean_html_tree, case_path)
    court = get_court_object(clean_html_tree, citations, case_path, judges)

    doc = Document(
        source='L',
        sha1=sha1,
        html=clean_html_str,
        # we clear this field later, putting the value into html_lawbox.
        date_filed=get_date_filed(clean_html_tree, citations=citations,
                                  case_path=case_path, court=court),
        precedential_status=get_precedential_status(),
        judges=judges,
        download_url=case_path,
    )

    docket = Docket(
        docket_number=get_docket_number(
            clean_html_tree,
            case_path=case_path,
            court=court
        ),
        case_name=get_case_name(complete_html_tree, case_path),
        court=court,
    )

    # Necessary for dup_finder.
    path = '//p/text()'
    doc.body_text = ' '.join(clean_html_tree.xpath(path))

    # Add the dict of citations to the object as its attributes.
    citations_as_dict = map_citations_to_models(citations)
    for k, v in citations_as_dict.items():
        setattr(doc, k, v)

    doc.docket = docket

    return doc
Ejemplo n.º 3
0
def citation_redirector(request, reporter=None, volume=None, page=None):
    """Take a citation URL and use it to redirect the user to the canonical page
    for that citation.

    This uses the same infrastructure as the thing that identifies citations in
    the text of opinions.
    """
    if request.method == 'POST':
        form = CitationRedirectorForm(request.POST)
        if form.is_valid():
            # Redirect to the page with the right values
            cd = form.cleaned_data
            return HttpResponseRedirect(
                reverse('citation_redirector', kwargs=cd))
        else:
            # Error in form, somehow.
            return render(request, 'citation_redirect_info_page.html', {
                'show_homepage': True,
                'form': form,
                'private': True
            })
    else:
        if all(_ is None for _ in (reporter, volume, page)):
            # No parameters. Show the standard page.
            form = CitationRedirectorForm()
            return render(request, 'citation_redirect_info_page.html', {
                'show_homepage': True,
                'form': form,
                'private': False,
            })

        else:
            # We have a citation. Look it up, redirect the user or show
            # disambiguation.
            citation_str = " ".join([volume, reporter, page])
            try:
                citation = get_citations(citation_str)[0]
                citation_str = citation.base_citation(
                )  # Corrects typos/variations.
                lookup_fields = [map_citations_to_models([citation]).keys()[0]]
            except IndexError:
                # Unable to disambiguate the citation. Try looking in *all*
                # citation fields.
                lookup_fields = OpinionCluster().citation_fields

            # We were able to get a match, expand it if it's a federal/state
            # match.
            if (len(lookup_fields) == 1
                    and lookup_fields[0] == 'federal_cite_one'):
                lookup_fields = [
                    'federal_cite_one', 'federal_cite_two',
                    'federal_cite_three'
                ]
            elif (len(lookup_fields) == 1
                  and lookup_fields[0] == 'state_cite_one'):
                lookup_fields = [
                    'state_cite_one', 'state_cite_two', 'state_cite_three'
                ]
            q = Q()
            for lookup_field in lookup_fields:
                q |= Q(**{lookup_field: citation_str})
            clusters = OpinionCluster.objects.filter(q)

            # Show the correct page....
            if clusters.count() == 0:
                # No results for an otherwise valid citation.
                return render(
                    request, 'citation_redirect_info_page.html', {
                        'none_found': True,
                        'citation_str': citation_str,
                        'private': True,
                    })

            elif clusters.count() == 1:
                # Total success. Redirect to correct location.
                return HttpResponseRedirect(clusters[0].get_absolute_url())

            elif clusters.count() > 1:
                # Multiple results. Show them.
                return render(
                    request, 'citation_redirect_info_page.html', {
                        'too_many': True,
                        'citation_str': citation_str,
                        'clusters': clusters,
                        'private': True,
                    })
Ejemplo n.º 4
0
def citation_redirector(request, reporter=None, volume=None, page=None):
    """Take a citation URL and use it to redirect the user to the canonical page
    for that citation.

    This uses the same infrastructure as the thing that identifies citations in
    the text of opinions.
    """
    if request.method == 'POST':
        form = CitationRedirectorForm(request.POST)
        if form.is_valid():
            # Redirect to the page with the right values
            cd = form.cleaned_data
            return HttpResponseRedirect(
                reverse('citation_redirector', kwargs=cd)
            )
        else:
            # Error in form, somehow.
            return render_to_response(
                'citation_redirect_info_page.html',
                {'show_homepage': True,
                 'form': form,
                 'private': True},
                RequestContext(request),
            )
    else:
        if all(_ is None for _ in (reporter, volume, page)):
            # Show the most basic page
            form = CitationRedirectorForm()
            return render_to_response(
                'citation_redirect_info_page.html',
                {
                    'show_homepage': True,
                    'form': form,
                    'private': False,
                },
                RequestContext(request),
            )

        else:
            # Look up the citation, redirect the user or show disambiguation.
            citation_str = " ".join([volume, reporter, page])
            try:
                citation = get_citations(citation_str)[0]
                citation_str = citation.base_citation()  # Corrects typos/variations.
                lookup_fields = [map_citations_to_models([citation]).keys()[0]]
            except IndexError:
                # Unable to disambiguate the citation. Try looking in *all*
                # citation fields.
                lookup_fields = OpinionCluster().citation_fields

            # We were able to get a match, expand it if it's a federal/state
            # match.
            if (len(lookup_fields) == 1 and
                    lookup_fields[0] == 'federal_cite_one'):
                lookup_fields = ['federal_cite_one', 'federal_cite_two',
                                 'federal_cite_three']
            elif (len(lookup_fields) == 1 and
                    lookup_fields[0] == 'state_cite_one'):
                lookup_fields = ['state_cite_one', 'state_cite_two',
                                 'state_cite_three']
            q = Q()
            for lookup_field in lookup_fields:
                q |= Q(**{lookup_field: citation_str})
            clusters = OpinionCluster.objects.filter(q)

            # Show the correct page....
            if clusters.count() == 0:
                # No results for an otherwise valid citation.
                return render_to_response(
                    'citation_redirect_info_page.html',
                    {
                        'none_found': True,
                        'citation_str': citation_str,
                        'private': True,
                    },
                    RequestContext(request),
                    status=404,
                )

            elif clusters.count() == 1:
                # Total success. Redirect to correct location.
                return HttpResponseRedirect(
                    clusters[0].get_absolute_url()
                )

            elif clusters.count() > 1:
                # Multiple results. Show them.
                return render_to_response(
                    'citation_redirect_info_page.html',
                    {
                        'too_many': True,
                        'citation_str': citation_str,
                        'clusters': clusters,
                        'private': True,
                    },
                    RequestContext(request),
                    status=300,
                )
Ejemplo n.º 5
0
def make_and_save(item, skipdupes=False, min_dates=None, testing=True):
    """Associates case data from `parse_opinions` with objects. Saves these
    objects.

    min_date: if not none, will skip cases after min_date
    """
    date_filed = date_argued = date_reargued = date_reargument_denied = date_cert_granted = date_cert_denied = None
    unknown_date = None
    for date_cluster in item['dates']:
        for date_info in date_cluster:
            # check for any dates that clearly aren't dates
            if date_info[1].year < 1600 or date_info[1].year > 2020:
                continue
            # check for untagged dates that will be assigned to date_filed
            if date_info[0] is None:
                date_filed = date_info[1]
                continue
            # try to figure out what type of date it is based on its tag string
            if date_info[0] in FILED_TAGS:
                date_filed = date_info[1]
            elif date_info[0] in DECIDED_TAGS:
                if not date_filed:
                    date_filed = date_info[1]
            elif date_info[0] in ARGUED_TAGS:
                date_argued = date_info[1]
            elif date_info[0] in REARGUE_TAGS:
                date_reargued = date_info[1]
            elif date_info[0] in REARGUE_DENIED_TAGS:
                date_reargument_denied = date_info[1]
            elif date_info[0] in CERT_GRANTED_TAGS:
                date_cert_granted = date_info[1]
            elif date_info[0] in CERT_DENIED_TAGS:
                date_cert_denied = date_info[1]
            else:
                unknown_date = date_info[1]
                if date_info[0] not in UNKNOWN_TAGS:
                    print("\nFound unknown date tag '%s' with date '%s'.\n" %
                          date_info)

    # the main date (used for date_filed in OpinionCluster) and panel dates
    # (used for finding judges) are ordered in terms of which type of dates
    # best reflect them
    main_date = (date_filed or date_argued or date_reargued or
                 date_reargument_denied or unknown_date)
    panel_date = (date_argued or date_reargued or date_reargument_denied or
                  date_filed or unknown_date)

    if main_date is None:
        raise Exception("Failed to get a date for " + item['file'])

    if min_dates is not None:
        if min_dates.get(item['court_id']) is not None:
            if main_date >= min_dates[item['court_id']]:
                print(main_date, 'after', min_dates[item['court_id']],
                      ' -- skipping.')
                return

    docket = Docket(
        source=Docket.COLUMBIA,
        date_argued=date_argued,
        date_reargued=date_reargued,
        date_cert_granted=date_cert_granted,
        date_cert_denied=date_cert_denied,
        date_reargument_denied=date_reargument_denied,
        court_id=item['court_id'],
        case_name_short=item['case_name_short'] or '',
        case_name=item['case_name'] or '',
        case_name_full=item['case_name_full'] or '',
        docket_number=item['docket'] or ''
    )

    # get citations in the form of, e.g. {'federal_cite_one': '1 U.S. 1', ...}
    found_citations = []
    for c in item['citations']:
        found = get_citations(c)
        if not found:
            # if the docket number --is-- citation string, we're likely dealing
            # with a somewhat common triplet of (docket number, date,
            # jurisdiction), which isn't a citation at all (so there's no
            # problem)
            if item['docket']:
                docket_no = item['docket'].lower()
                if 'claim no.' in docket_no:
                    docket_no = docket_no.split('claim no.')[0]
                for junk in DOCKET_JUNK:
                    docket_no = docket_no.replace(junk, '')
                docket_no = docket_no.strip('.').strip()
                if docket_no and docket_no in c.lower():
                    continue

            # there are a trivial number of letters (except for months and a few
            # trivial words) in the citation, then it's not a citation at all
            non_trivial = c.lower()
            for trivial in TRIVIAL_CITE_WORDS:
                non_trivial = non_trivial.replace(trivial, '')
            num_letters = sum(non_trivial.count(letter) for letter in string.lowercase)
            if num_letters < 3:
                continue

            # if there is a string that's known to indicate a bad citation, then
            # it's not a citation
            if any(bad in c for bad in BAD_CITES):
                continue
            # otherwise, this is a problem
            raise Exception("Failed to get a citation from the string '%s' in "
                            "court '%s' with docket '%s'." % (
                                c, item['court_id'], item['docket']
                            ))
        else:
            found_citations.extend(found)
    citations_map = map_citations_to_models(found_citations)

    cluster = OpinionCluster(
        judges=item.get('judges', '') or "",
        precedential_status=('Unpublished' if item['unpublished'] else 'Published'),
        date_filed=main_date,
        case_name_short=item['case_name_short'] or '',
        case_name=item['case_name'] or '',
        case_name_full=item['case_name_full'] or '',
        source='Z',
        attorneys=item['attorneys'] or '',
        posture=item['posture'] or '',
        **citations_map
    )
    panel = [find_person(n, item['court_id'], case_date=panel_date) for n in
             item['panel']]
    panel = [x for x in panel if x is not None]

    opinions = []
    for i, opinion_info in enumerate(item['opinions']):
        if opinion_info['author'] is None:
            author = None
        else:
            author = find_person(opinion_info['author'], item['court_id'],
                                 case_date=panel_date)
        converted_text = convert_columbia_html(opinion_info['opinion'])
        opinion_type = OPINION_TYPE_MAPPING[opinion_info['type']]
        if opinion_type == '020lead' and i > 0:
            opinion_type = '050addendum'

        opinion = Opinion(
            author=author,
            per_curiam=opinion_info['per_curiam'],
            type=opinion_type,
            # type=OPINION_TYPE_MAPPING[opinion_info['type']],
            html_columbia=converted_text,
            sha1=opinion_info['sha1'],
            local_path=opinion_info['local_path'],
        )
        joined_by = [find_person(n, item['court_id'], case_date=panel_date) for n in opinion_info['joining']]
        joined_by = [x for x in joined_by if x is not None]
        opinions.append((opinion, joined_by))

    if min_dates is None:
        # check to see if this is a duplicate
        dups = find_dups(docket, cluster, panel, opinions)
        if dups:
            if skipdupes:
                print('Duplicate. skipping.')
            else:
                raise Exception("Found %s duplicate(s)." % len(dups))

    # save all the objects
    if not testing:
        try:
            docket.save()
            cluster.docket = docket
            cluster.save(index=False)
            for member in panel:
                cluster.panel.add(member)
            for opinion, joined_by in opinions:
                opinion.cluster = cluster
                opinion.save(index=False)
                for joiner in joined_by:
                    opinion.joined_by.add(joiner)
            if settings.DEBUG:
                domain = "http://127.0.0.1:8000"
            else:
                domain = "https://www.courtlistener.com"
            print("Created item at: %s%s" % (domain, cluster.get_absolute_url()))
        except:
            # if anything goes wrong, try to delete everything
            try:
                docket.delete()
            except:
                pass
            raise
Ejemplo n.º 6
0
def make_and_save(item):
    """Associates case data from `parse_opinions` with objects. Saves these objects."""
    date_filed = date_argued = date_reargued = date_reargument_denied = date_cert_granted = date_cert_denied = None
    for date_cluster in item['dates']:
        for date_info in date_cluster:
            # check for any dates that clearly aren't dates
            if date_info[1].year < 1600 or date_info[1].year > 2020:
                continue
            # check for untagged dates that will be assigned to date_filed
            if date_info[0] is None:
                date_filed = date_info[1]
                continue
            # try to figure out what type of date it is based on its tag string
            if date_info[0] in FILED_TAGS:
                date_filed = date_info[1]
            elif date_info[0] in DECIDED_TAGS:
                if not date_filed:
                    date_filed = date_info[1]
            elif date_info[0] in ARGUED_TAGS:
                date_argued = date_info[1]
            elif date_info[0] in REARGUE_TAGS:
                date_reargued = date_info[1]
            elif date_info[0] in REARGUE_DENIED_TAGS:
                date_reargument_denied = date_info[1]
            elif date_info[0] in CERT_GRANTED_TAGS:
                date_cert_granted = date_info[1]
            elif date_info[0] in CERT_DENIED_TAGS:
                date_cert_denied = date_info[1]
            else:
                print("Found unknown date tag '%s' with date '%s'." % date_info)

    docket = Docket(
        date_argued=date_argued
        ,date_reargued=date_reargued
        ,date_cert_granted=date_cert_granted
        ,date_cert_denied=date_cert_denied
        ,date_reargument_denied=date_reargument_denied
        ,court_id=item['court_id']
        ,case_name_short=item['case_name_short'] or ''
        ,case_name=item['case_name'] or ''
        ,case_name_full=item['case_name_full'] or ''
        ,docket_number=item['docket'] or ''
    )
    docket.save()

    # get citations in the form of, e.g. {'federal_cite_one': '1 U.S. 1', ...}
    found_citations = []
    for c in item['citations']:
        found = get_citations(c)
        if not found:
            raise Exception("Failed to get a citation from the string '%s'." % c)
        elif len(found) > 1:
            raise Exception("Got multiple citations from string '%s' when there should have been one." % c)
        found_citations.append(found[0])
    citations_map = map_citations_to_models(found_citations)

    cluster = OpinionCluster(
        docket=docket
        ,precedential_status=('Unpublished' if item['unpublished'] else 'Published')
        ,date_filed=date_filed
        ,case_name_short=item['case_name_short'] or ''
        ,case_name=item['case_name'] or ''
        ,case_name_full=item['case_name_full'] or ''
        ,source='Z'
        ,attorneys=item['attorneys'] or ''
        ,posture=item['posture'] or ''
        ,**citations_map
    )
    cluster.save()
    
    if date_argued is not None:
        paneldate = date_argued
    else:
        paneldate = date_filed
    panel = [find_person(n, item['court_id'], paneldate) for n in item['panel']]
    panel = [x for x in panel if x is not None]
    for member in panel:
        cluster.panel.add(member)

    for opinion_info in item['opinions']:
        if opinion_info['author'] is None:
            author = None
        else:
            author = find_person(opinion_info['author'], item['court_id'], date_filed or date_argued)
        opinion = Opinion(
            cluster=cluster
            ,author=author
            ,type=OPINION_TYPE_MAPPING[opinion_info['type']]
            ,html_columbia=opinion_info['opinion']
        )
        opinion.save()
        joined_by = [find_person(n, item['court_id'], paneldate) for n in opinion_info['joining']]
        joined_by = [x for x in joined_by if x is not None]
        for joiner in joined_by:
            opinion.joined_by.add(joiner)
Ejemplo n.º 7
0
def make_and_save(item,
                  skipdupes=False,
                  min_dates=None,
                  start_dates=None,
                  testing=True):
    """Associates case data from `parse_opinions` with objects. Saves these
    objects.

    min_date: if not none, will skip cases after min_date
    """
    date_filed = date_argued = date_reargued = date_reargument_denied = date_cert_granted = date_cert_denied = None
    unknown_date = None
    for date_cluster in item['dates']:
        for date_info in date_cluster:
            # check for any dates that clearly aren't dates
            if date_info[1].year < 1600 or date_info[1].year > 2020:
                continue
            # check for untagged dates that will be assigned to date_filed
            if date_info[0] is None:
                date_filed = date_info[1]
                continue
            # try to figure out what type of date it is based on its tag string
            if date_info[0] in FILED_TAGS:
                date_filed = date_info[1]
            elif date_info[0] in DECIDED_TAGS:
                if not date_filed:
                    date_filed = date_info[1]
            elif date_info[0] in ARGUED_TAGS:
                date_argued = date_info[1]
            elif date_info[0] in REARGUE_TAGS:
                date_reargued = date_info[1]
            elif date_info[0] in REARGUE_DENIED_TAGS:
                date_reargument_denied = date_info[1]
            elif date_info[0] in CERT_GRANTED_TAGS:
                date_cert_granted = date_info[1]
            elif date_info[0] in CERT_DENIED_TAGS:
                date_cert_denied = date_info[1]
            else:
                unknown_date = date_info[1]
                if date_info[0] not in UNKNOWN_TAGS:
                    print("\nFound unknown date tag '%s' with date '%s'.\n" %
                          date_info)

    # the main date (used for date_filed in OpinionCluster) and panel dates
    # (used for finding judges) are ordered in terms of which type of dates
    # best reflect them
    main_date = (date_filed or date_argued or date_reargued
                 or date_reargument_denied or unknown_date)
    panel_date = (date_argued or date_reargued or date_reargument_denied
                  or date_filed or unknown_date)

    if main_date is None:
        raise Exception("Failed to get a date for " + item['file'])

    # special rule for Kentucky
    if item['court_id'] == 'kycourtapp' and main_date <= date(1975, 12, 31):
        item['court_id'] = 'kycourtapphigh'

    if min_dates is not None:
        if min_dates.get(item['court_id']) is not None:
            if main_date >= min_dates[item['court_id']]:
                print(main_date, 'after', min_dates[item['court_id']],
                      ' -- skipping.')
                return
    if start_dates is not None:
        if start_dates.get(item['court_id']) is not None:
            if main_date <= start_dates[item['court_id']]:
                print(main_date, 'before court founding:',
                      start_dates[item['court_id']], ' -- skipping.')
                return

    docket = Docket(source=Docket.COLUMBIA,
                    date_argued=date_argued,
                    date_reargued=date_reargued,
                    date_cert_granted=date_cert_granted,
                    date_cert_denied=date_cert_denied,
                    date_reargument_denied=date_reargument_denied,
                    court_id=item['court_id'],
                    case_name_short=item['case_name_short'] or '',
                    case_name=item['case_name'] or '',
                    case_name_full=item['case_name_full'] or '',
                    docket_number=item['docket'] or '')

    # get citations in the form of, e.g. {'federal_cite_one': '1 U.S. 1', ...}
    found_citations = []
    for c in item['citations']:
        found = get_citations(c)
        if not found:
            # if the docket number --is-- citation string, we're likely dealing
            # with a somewhat common triplet of (docket number, date,
            # jurisdiction), which isn't a citation at all (so there's no
            # problem)
            if item['docket']:
                docket_no = item['docket'].lower()
                if 'claim no.' in docket_no:
                    docket_no = docket_no.split('claim no.')[0]
                for junk in DOCKET_JUNK:
                    docket_no = docket_no.replace(junk, '')
                docket_no = docket_no.strip('.').strip()
                if docket_no and docket_no in c.lower():
                    continue

            # there are a trivial number of letters (except for months and a few
            # trivial words) in the citation, then it's not a citation at all
            non_trivial = c.lower()
            for trivial in TRIVIAL_CITE_WORDS:
                non_trivial = non_trivial.replace(trivial, '')
            num_letters = sum(
                non_trivial.count(letter) for letter in string.lowercase)
            if num_letters < 3:
                continue

            # if there is a string that's known to indicate a bad citation, then
            # it's not a citation
            if any(bad in c for bad in BAD_CITES):
                continue
            # otherwise, this is a problem
            raise Exception("Failed to get a citation from the string '%s' in "
                            "court '%s' with docket '%s'." %
                            (c, item['court_id'], item['docket']))
        else:
            found_citations.extend(found)
    citations_map = map_citations_to_models(found_citations)

    cluster = OpinionCluster(
        judges=item.get('judges', '') or "",
        precedential_status=('Unpublished'
                             if item['unpublished'] else 'Published'),
        date_filed=main_date,
        case_name_short=item['case_name_short'] or '',
        case_name=item['case_name'] or '',
        case_name_full=item['case_name_full'] or '',
        source='Z',
        attorneys=item['attorneys'] or '',
        posture=item['posture'] or '',
        **citations_map)
    panel = [
        find_person(n, item['court_id'], case_date=panel_date)
        for n in item['panel']
    ]
    panel = [x for x in panel if x is not None]

    opinions = []
    for i, opinion_info in enumerate(item['opinions']):
        if opinion_info['author'] is None:
            author = None
        else:
            author = find_person(opinion_info['author'],
                                 item['court_id'],
                                 case_date=panel_date)
        converted_text = convert_columbia_html(opinion_info['opinion'])
        opinion_type = OPINION_TYPE_MAPPING[opinion_info['type']]
        if opinion_type == '020lead' and i > 0:
            opinion_type = '050addendum'

        opinion = Opinion(
            author=author,
            per_curiam=opinion_info['per_curiam'],
            type=opinion_type,
            # type=OPINION_TYPE_MAPPING[opinion_info['type']],
            html_columbia=converted_text,
            sha1=opinion_info['sha1'],
            local_path=opinion_info['local_path'],
        )
        joined_by = [
            find_person(n, item['court_id'], case_date=panel_date)
            for n in opinion_info['joining']
        ]
        joined_by = [x for x in joined_by if x is not None]
        opinions.append((opinion, joined_by))

    if min_dates is None:
        # check to see if this is a duplicate
        dups = find_dups(docket, cluster)
        if dups:
            if skipdupes:
                print('Duplicate. skipping.')
            else:
                raise Exception("Found %s duplicate(s)." % len(dups))

    # save all the objects
    if not testing:
        try:
            docket.save()
            cluster.docket = docket
            cluster.save(index=False)
            for member in panel:
                cluster.panel.add(member)
            for opinion, joined_by in opinions:
                opinion.cluster = cluster
                opinion.save(index=False)
                for joiner in joined_by:
                    opinion.joined_by.add(joiner)
            if settings.DEBUG:
                domain = "http://127.0.0.1:8000"
            else:
                domain = "https://www.courtlistener.com"
            print("Created item at: %s%s" %
                  (domain, cluster.get_absolute_url()))
        except:
            # if anything goes wrong, try to delete everything
            try:
                docket.delete()
            except:
                pass
            raise