Пример #1
0
def parse_bill_number(q, congress=None, not_exist_ok=False):
    m = bill_number_re.match(
        q.replace(" ", "").replace(".", "").replace("-", ""))
    if m == None: return None
    search_type_flag = None
    if m.group(3) != None:
        cn = int(m.group(4))
        search_type_flag = "bill-with-congress"
    elif congress != None:
        try:
            cn = int(congress)
        except:
            cn = CURRENT_CONGRESS
        search_type_flag = "bill-default-congress"
    else:
        cn = CURRENT_CONGRESS
        search_type_flag = "bill-guessed-congress"
    try:
        b = Bill.objects.get(congress=cn,
                             bill_type=BillType.by_slug(m.group(1).lower()),
                             number=int(m.group(2)))
        b.search_type_flag = search_type_flag
        return b
    except Bill.DoesNotExist:
        if not_exist_ok:
            # Return a dummy bill indicating that string matched the regex.
            b = Bill(congress=cn,
                     bill_type=BillType.by_slug(m.group(1).lower()),
                     number=int(m.group(2)))
            b.search_type_flag = search_type_flag
            return b
        return None
Пример #2
0
def was_bill_enacted(b, startdate, enddate, recurse=True):
	# Our status code is currently tied to the assignment of a slip
	# law number, which isn't what we mean exactly.
	#
	# (Additionally, we should count a bill as enacted if any identified companion
	# bill is enacted.)

	# If it *was* assigned a slip law number, which in the future might
	# be useful for veto overrides, then OK.
	if b.current_status in BillStatus.final_status_passed_bill and \
		startdate <= b.current_status_date <= enddate:
		return True

	# Otherwise, check the actions for a <signed> action.
	fn = "data/congress/%s/bills/%s/%s%d/data.json" % (
    	b.congress,
        BillType.by_value(b.bill_type).slug,
        BillType.by_value(b.bill_type).slug,
        b.number)
	bj = json.load(open(fn))
	for axn in bj["actions"]:
		if axn["type"] == "signed" and startdate.isoformat() <= axn["acted_at"] <= enddate.isoformat():
			return True

	# Otherwise check companion bills.
	#if recurse:
	#	for rb in RelatedBill.objects.filter(bill=b, relation="identical").select_related("related_bill"):
	#		if was_bill_enacted(rb.related_bill, startdate, enddate, recurse=False):
	#			return True
			
	return False
Пример #3
0
def was_bill_enacted_2013(b, startdate, enddate):
    # Our status code is currently tied to the assignment of a slip
    # law number, which isn't what we mean exactly.
    #
    # (Additionally, we should count a bill as enacted if any identified companion
    # bill is enacted.)

    # TODO: See new function in the Bill model.

    # If it *was* assigned a slip law number, which in the future might
    # be useful for veto overrides, then OK.
    if b.current_status in BillStatus.final_status_passed_bill and \
     startdate <= b.current_status_date <= enddate:
        return True

    # Otherwise, check the actions for a <signed> action.
    fn = "data/congress/%s/bills/%s/%s%d/data.json" % (
        b.congress, BillType.by_value(
            b.bill_type).slug, BillType.by_value(b.bill_type).slug, b.number)
    bj = json.load(open(fn))
    for axn in bj["actions"]:
        if axn["type"] == "signed" and startdate.isoformat(
        ) <= axn["acted_at"] <= enddate.isoformat():
            return True

    return False
Пример #4
0
def was_bill_enacted_2013(b, startdate, enddate):
	# Our status code is currently tied to the assignment of a slip
	# law number, which isn't what we mean exactly.
	#
	# (Additionally, we should count a bill as enacted if any identified companion
	# bill is enacted.)

	# TODO: See new function in the Bill model.

	# If it *was* assigned a slip law number, which in the future might
	# be useful for veto overrides, then OK.
	if b.current_status in BillStatus.final_status_passed_bill and \
		startdate <= b.current_status_date <= enddate:
		return True

	# Otherwise, check the actions for a <signed> action.
	fn = "data/congress/%s/bills/%s/%s%d/data.json" % (
    	b.congress,
        BillType.by_value(b.bill_type).slug,
        BillType.by_value(b.bill_type).slug,
        b.number)
	bj = json.load(open(fn))
	for axn in bj["actions"]:
		if axn["type"] == "signed" and startdate.isoformat() <= axn["acted_at"] <= enddate.isoformat():
			return True
		
	return False
Пример #5
0
def get_bill_text_metadata(bill, version):
    from bill.models import BillType # has to be here and not module-level to avoid cyclic dependency
    import glob, json

    bt = BillType.by_value(bill.bill_type).slug
    basename = "data/congress/%d/bills/%s/%s%d/text-versions" % (bill.congress, bt, bt, bill.number)
    
    if version == None:
        # Cycle through files to find most recent version by date.
        dat = None
        for versionfile in glob.glob(basename + "/*/data.json"):
            d = json.load(open(versionfile))
            if not dat or d["issued_on"] > dat["issued_on"]:
                dat = d
        if not dat: return None
    else:
        dat = json.load(open(basename + "/%s/data.json" % version))
        
    basename += "/" + dat["version_code"]

    bt2 = BillType.by_value(bill.bill_type).xml_code
    html_fn = "data/us/bills.text/%s/%s/%s%d%s.html" % (bill.congress, bt2, bt2, bill.number, dat["version_code"])

    if os.path.exists(basename + "/mods.xml"):
        dat["mods_file"] = basename + "/mods.xml"

    # get a plain text file if one exists
    if os.path.exists(basename + "/document.txt"):
        dat["text_file"] = basename + "/document.txt"
        dat["has_displayable_text"] = True

        for source in dat.get("sources", []):
            if source["source"] == "statutes":
                dat["text_file_source"] = "statutes"

    # get an HTML file if one exists
    if os.path.exists(html_fn):
        dat["html_file"] = html_fn
        dat["has_displayable_text"] = True

    # get a PDF file if one exists
    pdf_fn = "data/us/bills.text/%s/%s/%s%d%s.pdf" % (bill.congress, bt2, bt2, bill.number, dat["version_code"])
    if os.path.exists(pdf_fn):
        dat["pdf_file"] = pdf_fn
        dat["has_thumbnail"] = True
        dat["thumbnail_path"] = bill.get_absolute_url() + "/_text_image"

    # get an XML file if one exists
    if os.path.exists(basename + "/catoxml.xml"):
        dat["xml_file"] = basename + "/catoxml.xml"
        dat["has_displayable_text"] = True
        dat["xml_file_source"] = "cato-deepbills"
    elif os.path.exists(basename + "/document.xml"):
        dat["xml_file"] = basename + "/document.xml"
        dat["has_displayable_text"] = True

    return dat
Пример #6
0
def get_bill_text_metadata(bill, version):
    from bill.models import BillType  # has to be here and not module-level to avoid cyclic dependency
    import glob, json

    bt = BillType.by_value(bill.bill_type).slug
    basename = "data/congress/%d/bills/%s/%s%d/text-versions" % (
        bill.congress, bt, bt, bill.number)

    if version == None:
        # Cycle through files to find most recent version by date.
        dat = None
        for versionfile in glob.glob(basename + "/*/data.json"):
            d = json.load(open(versionfile))
            if not dat or d["issued_on"] > dat["issued_on"]:
                dat = d
        if not dat: return None
    else:
        dat = json.load(open(basename + "/%s/data.json" % version))

    basename += "/" + dat["version_code"]

    bt2 = BillType.by_value(bill.bill_type).xml_code
    html_fn = "data/us/bills.text/%s/%s/%s%d%s.html" % (
        bill.congress, bt2, bt2, bill.number, dat["version_code"])

    if os.path.exists(basename + "/mods.xml"):
        dat["mods_file"] = basename + "/mods.xml"

    # get a plain text file if one exists
    if os.path.exists(basename + "/document.txt"):
        dat["text_file"] = basename + "/document.txt"
        dat["has_displayable_text"] = True

        for source in dat.get("sources", []):
            if source["source"] == "statutes":
                dat["text_file_source"] = "statutes"

    # get an HTML file if one exists
    if os.path.exists(html_fn):
        dat["html_file"] = html_fn
        dat["has_displayable_text"] = True

    # get an XML file if one exists
    if os.path.exists(basename + "/catoxml.xml"):
        dat["xml_file"] = basename + "/catoxml.xml"
        dat["has_displayable_text"] = True
        dat["xml_file_source"] = "cato-deepbills"
    elif os.path.exists(basename + "/document.xml"):
        dat["xml_file"] = basename + "/document.xml"
        dat["has_displayable_text"] = True

    thumb_fn = "data/us/bills.text/%s/%s/%s%d%s-thumb200.png" % (
        bill.congress, bt2, bt2, bill.number, dat["version_code"])
    if os.path.exists(thumb_fn):
        dat["thumbnail_path"] = thumb_fn

    return dat
Пример #7
0
def bill_text(request, congress, type_slug, number, version=None):
    if version == "":
        version = None

    try:
        bill_type = BillType.by_slug(type_slug)
    except BillType.NotFound:
        raise Http404("Invalid bill type: " + type_slug)
    bill = get_object_or_404(Bill, congress=congress, bill_type=bill_type, number=number)

    from billtext import load_bill_text, bill_gpo_status_codes
    try:
        textdata = load_bill_text(bill, version)
    except IOError:
        textdata = None

    # Get a list of the alternate versions of this bill.
    alternates = None
    if textdata:
        alternates = []
        for v in bill_gpo_status_codes:
            fn = "data/us/bills.text/%s/%s/%s%d%s.mods.xml" % (bill.congress, BillType.by_value(bill.bill_type).xml_code, BillType.by_value(bill.bill_type).xml_code, bill.number, v)
            if os.path.exists(fn):
                alternates.append(load_bill_text(bill, v, mods_only=True))
        alternates.sort(key = lambda mods : mods["docdate"])

    # Get a list of related bills.
    from billtext import get_current_version
    related_bills = []
    for rb in list(bill.find_reintroductions()) + [r.related_bill for r in bill.get_related_bills()]:
        try:
            rbv = get_current_version(rb)
            if not (rb, rbv) in related_bills: related_bills.append((rb, rbv))
        except IOError:
            pass # text not available
    for btc in BillTextComparison.objects.filter(bill1=bill).exclude(bill2=bill):
        if not (btc.bill2, btc.ver2) in related_bills: related_bills.append((btc.bill2, btc.ver2))
    for btc in BillTextComparison.objects.filter(bill2=bill).exclude(bill1=bill):
        if not (btc.bill1, btc.ver1) in related_bills: related_bills.append((btc.bill1, btc.ver1))

    return {
        'bill': bill,
        "congressdates": get_congress_dates(bill.congress),
        "textdata": textdata,
        "version": version,
        "alternates": alternates,
        "related_bills": related_bills,
    }
Пример #8
0
def bill_text(request, congress, type_slug, number, version=None):
    if version == "":
        version = None
    
    try:
        bill_type = BillType.by_slug(type_slug)
    except BillType.NotFound:
        raise Http404("Invalid bill type: " + type_slug)
    bill = get_object_or_404(Bill, congress=congress, bill_type=bill_type, number=number)
    
    from billtext import load_bill_text, bill_gpo_status_codes
    try:
        textdata = load_bill_text(bill, version)
    except IOError:
        textdata = None

    # Get a list of the alternate versions of this bill.
    alternates = None
    if textdata:
        alternates = []
        for v in bill_gpo_status_codes:
            fn = "data/us/bills.text/%s/%s/%s%d%s.mods.xml" % (bill.congress, BillType.by_value(bill.bill_type).xml_code, BillType.by_value(bill.bill_type).xml_code, bill.number, v)
            if os.path.exists(fn):
                alternates.append(load_bill_text(bill, v, mods_only=True))
        alternates.sort(key = lambda mods : mods["docdate"])

    # Get a list of related bills.
    from billtext import get_current_version
    related_bills = []
    for rb in list(bill.find_reintroductions()) + [r.related_bill for r in bill.get_related_bills()]:
        try:
            rbv = get_current_version(rb)
            if not (rb, rbv) in related_bills: related_bills.append((rb, rbv))
        except IOError:
            pass # text not available
    for btc in BillTextComparison.objects.filter(bill1=bill).exclude(bill2=bill):
        if not (btc.bill2, btc.ver2) in related_bills: related_bills.append((btc.bill2, btc.ver2))
    for btc in BillTextComparison.objects.filter(bill2=bill).exclude(bill1=bill):
        if not (btc.bill1, btc.ver1) in related_bills: related_bills.append((btc.bill1, btc.ver1))

    return {
        'bill': bill,
        "congressdates": get_congress_dates(bill.congress),
        "textdata": textdata,
        "version": version,
        "alternates": alternates,
        "related_bills": related_bills,
    }
Пример #9
0
    def load_bill_from_url(congress, type_slug, number):
        try:
            bill_type = BillType.by_slug(type_slug)
        except BillType.NotFound:
            raise Http404("Invalid bill type: " + type_slug)

        return get_object_or_404(Bill, congress=congress, bill_type=bill_type, number=number)
Пример #10
0
def get_transparency_stats(person, role, stats, congress, startdate, enddate):
	global transparency_bills
	if not transparency_bills:
		transparency_bills = []
		for line in open("analysis/transparency-bills.txt"):
			bill = Bill.from_congressproject_id(re.split("\s", line)[0])
			if bill.congress != congress: continue
			transparency_bills.append(bill)

	# which bills are in the right chamber?
	plausible_bills = []
	for bill in transparency_bills:
		if BillType.by_value(bill.bill_type).chamber == RoleType.by_value(role.role_type).congress_chamber:
			plausible_bills.append(bill)

	# did person sponsor any of these within this session?
	sponsored = []
	for bill in transparency_bills:
		if startdate <= bill.introduced_date <= enddate and bill.sponsor == person:
			sponsored.append(bill)

	# did person cosponsor any of these within this session?
	cosponsored = []
	for cosp in Cosponsor.objects.filter(person=person, bill__in=transparency_bills, joined__gte=startdate, joined__lte=enddate):
		cosponsored.append(cosp.bill)

	stats["transparency-bills"] = {
		"value": len(sponsored)*3 + len(cosponsored),
		"sponsored": make_bill_entries(sponsored),
		"cosponsored": make_bill_entries(cosponsored),
		"num_bills": len(plausible_bills),
		"chamber": RoleType.by_value(role.role_type).congress_chamber,
	}
Пример #11
0
def bill_advocacy_tips(request, congress, type_slug, number):
    try:
        bill_type = BillType.by_slug(type_slug)
    except BillType.NotFound:
        raise Http404("Invalid bill type: " + type_slug)
    bill = get_object_or_404(Bill, congress=congress, bill_type=bill_type, number=number)
    return { "bill": bill }
Пример #12
0
def load_docs_house_gov(options, bill_index):
    # Look at the three most recent JSON files by looking at the lexicographically last ones,
    # which possibly cover the current week, the next week, and the week after that.
    for fn in sorted(os.listdir("data/congress/upcoming_house_floor"))[-3:]:
        data = json.load(open("data/congress/upcoming_house_floor/" + fn))
        for billinfo in data.get("upcoming", []):
            if "bill_id" not in billinfo: continue

            m = re.match(r"([hrsjconres]+)(\d+)-(\d+)", billinfo["bill_id"])
            if not m:
                log.error('Could not parse bill_id "%s" in docs.house.gov.' %
                          billinfo["bill_id"])
                continue

            bt = BillType.by_slug(m.group(1))
            try:
                bill = Bill.objects.get(congress=int(m.group(3)),
                                        bill_type=bt,
                                        number=int(m.group(2)))
            except Exception as e:
                log.error('Could not get bill "%s" in docs.house.gov: %s.' %
                          (billinfo["bill_id"], str(e)))
                continue

            bill.docs_house_gov_postdate = BillProcessor.parse_datetime(
                billinfo["published_at"])
            if bill.senate_floor_schedule_postdate is None or bill.docs_house_gov_postdate > bill.senate_floor_schedule_postdate:
                bill.scheduled_consideration_date = BillProcessor.parse_datetime(
                    data["week_of"])
            bill.save()
            if bill_index: bill.update_index(bill_index)
            if not options.disable_events: bill.create_events()
Пример #13
0
def bill_details_user_view(request, congress, type_slug, number):
    try:
        bill_type = BillType.by_slug(type_slug)
    except BillType.NotFound:
        raise Http404("Invalid bill type: " + type_slug)
    bill = get_object_or_404(Bill, congress=congress, bill_type=bill_type, number=number)
    
    ret = { }
    if request.user.is_staff:
        admin_panel = """
            {% load humanize %}
            <div class="clear"> </div>
            <div style="margin-top: 1.5em; padding: .5em; background-color: #EEE; ">
                <b>ADMIN</b> - <a href="{% url "bill_go_to_summary_admin" %}?bill={{bill.id}}">Edit Summary</a>
                <br/>Tracked by {{feed.tracked_in_lists.count|intcomma}} users
                ({{feed.tracked_in_lists_with_email.count|intcomma}} w/ email).
            </div>
            """
        from django.template import Template, Context, RequestContext, loader
        ret["admin_panel"] = Template(admin_panel).render(RequestContext(request, {
            'bill': bill,
            "feed": Feed.BillFeed(bill),
            }))
    
    from person.views import render_subscribe_inline
    ret.update(render_subscribe_inline(request, Feed.BillFeed(bill)))
    
    return ret
Пример #14
0
def load_docs_house_gov(options, bill_index):
    # Get most recent JSON file by looking at the lexicographically last one.
    fn = sorted(os.listdir("data/congress/upcoming_house_floor"))[-1]
    data = json.load(open("data/congress/upcoming_house_floor/" + fn))
    for billinfo in data.get("upcoming", []):
        if "bill_id" not in billinfo: continue

        m = re.match(r"([hrsjconres]+)(\d+)-(\d+)", billinfo["bill_id"])
        if not m:
            log.error('Could not parse bill_id "%s" in docs.house.gov.' %
                      billinfo["bill_id"])
            continue

        bt = BillType.by_slug(m.group(1))
        try:
            bill = Bill.objects.get(congress=int(m.group(3)),
                                    bill_type=bt,
                                    number=int(m.group(2)))
        except Exception as e:
            log.error('Could not get bill "%s" in docs.house.gov: %s.' %
                      (billinfo["bill_id"], str(e)))
            continue

        bill.docs_house_gov_postdate = BillProcessor.parse_datetime(
            billinfo["published_at"])
        bill.save()
        if bill_index: bill.update_index(bill_index)
        if not options.disable_events: bill.create_events()
Пример #15
0
 def dereference(ref):
     m = re.match(r"([a-z]+)(\d+)-(\d+)", ref)
     bill_type = BillType.by_xml_code(m.group(1))
     bill = Bill.objects.get(congress=m.group(2),
                             bill_type=bill_type,
                             number=m.group(3))
     return bill
Пример #16
0
def bill_advocacy_tips(request, congress, type_slug, number):
    try:
        bill_type = BillType.by_slug(type_slug)
    except BillType.NotFound:
        raise Http404("Invalid bill type: " + type_slug)
    bill = get_object_or_404(Bill, congress=congress, bill_type=bill_type, number=number)
    return { "bill": bill }
Пример #17
0
def load_docs_house_gov(options, bill_index):
    # Look at the three most recent JSON files by looking at the lexicographically last ones,
    # which possibly cover the current week, the next week, and the week after that.
    if not os.path.exists(settings.CONGRESS_DATA_PATH + "/upcoming_house_floor"):
        print("No upcoming_house_floor data.")
        return
    for fn in sorted(os.listdir(settings.CONGRESS_DATA_PATH + "/upcoming_house_floor"))[-3:]:
        data = json.load(open(settings.CONGRESS_DATA_PATH + "/upcoming_house_floor/" + fn))
        for billinfo in data.get("upcoming", []):
            if "bill_id" not in billinfo: continue
    
            m = re.match(r"([hrsjconres]+)(\d+)-(\d+)", billinfo["bill_id"])
            if not m:
                log.error('Could not parse bill_id "%s" in docs.house.gov.' % billinfo["bill_id"])
                continue
 
            bt = BillType.by_slug(m.group(1))
            try:
                bill = Bill.objects.get(congress=int(m.group(3)), bill_type=bt, number=int(m.group(2)))
            except Exception as e:
                log.error('Could not get bill "%s" in docs.house.gov: %s.' % (billinfo["bill_id"], str(e)))
                continue

            bill.docs_house_gov_postdate = BillProcessor.parse_datetime(billinfo["published_at"])
            if bill.senate_floor_schedule_postdate is None or bill.docs_house_gov_postdate > bill.senate_floor_schedule_postdate: bill.scheduled_consideration_date = BillProcessor.parse_datetime(data["week_of"])
            bill.save()
            if bill_index: bill.update_index(bill_index)
            if not options.disable_events: bill.create_events()
Пример #18
0
def get_bill_number(bill, show_congress_number="ARCHIVAL"):
    "Compute display form of bill number"

    from bill.models import BillType
    ret = '%s %s' % (BillType.by_value(bill.bill_type).label, bill.number)
    if (bill.congress != settings.CURRENT_CONGRESS and show_congress_number == "ARCHIVAL") or show_congress_number == "ALL":
        ret += ' (%s)' % ordinal(bill.congress)
    return ret
Пример #19
0
def bill_text(request, congress, type_slug, number, version=None):
    if version == "":
        version = None

    try:
        bill_type = BillType.by_slug(type_slug)
    except BillType.NotFound:
        raise Http404("Invalid bill type: " + type_slug)
    bill = get_object_or_404(Bill, congress=congress, bill_type=bill_type, number=number)

    from .billtext import load_bill_text, get_bill_text_versions
    try:
        textdata = load_bill_text(bill, version)
    except IOError:
        textdata = None

    # Get a list of the alternate versions of this bill.
    alternates = None
    is_latest = True
    if textdata:
        alternates = []
        for v in get_bill_text_versions(bill):
            try:
                alternates.append(load_bill_text(bill, v, mods_only=True))
            except IOError:
                pass
        alternates.sort(key = lambda mods : mods["docdate"])
        if len(alternates) > 0:
            is_latest = False
            if textdata["doc_version"] == alternates[-1]["doc_version"]:
                is_latest = True

    # Get a list of related bills.
    from .billtext import get_current_version
    related_bills = []
    for rb in list(bill.find_reintroductions()) + [r.related_bill for r in bill.get_related_bills()]:
        try:
            rbv = get_current_version(rb)
            if not (rb, rbv) in related_bills: related_bills.append((rb, rbv))
        except IOError:
            pass # text not available
    for btc in BillTextComparison.objects.filter(bill1=bill).exclude(bill2=bill):
        if not (btc.bill2, btc.ver2) in related_bills: related_bills.append((btc.bill2, btc.ver2))
    for btc in BillTextComparison.objects.filter(bill2=bill).exclude(bill1=bill):
        if not (btc.bill1, btc.ver1) in related_bills: related_bills.append((btc.bill1, btc.ver1))

    return {
        "bill_subpage": "Text",
        'bill': bill,
        "congressdates": get_congress_dates(bill.congress),
        "textdata": textdata,
        "version": version,
        "is_latest": is_latest,
        "alternates": alternates,
        "related_bills": related_bills,
        "days_old": (datetime.datetime.now().date() - bill.current_status_date).days,
        "is_on_bill_text_page": True, # for the header tabs
    }
Пример #20
0
def bill_text(request, congress, type_slug, number, version=None):
    if version == "":
        version = None

    try:
        bill_type = BillType.by_slug(type_slug)
    except BillType.NotFound:
        raise Http404("Invalid bill type: " + type_slug)
    bill = get_object_or_404(Bill, congress=congress, bill_type=bill_type, number=number)

    from .billtext import load_bill_text, get_bill_text_versions
    try:
        textdata = load_bill_text(bill, version)
    except IOError:
        textdata = None

    # Get a list of the alternate versions of this bill.
    alternates = None
    is_latest = True
    if textdata:
        alternates = []
        for v in get_bill_text_versions(bill):
            try:
                alternates.append(load_bill_text(bill, v, mods_only=True))
            except IOError:
                pass
        alternates.sort(key = lambda mods : mods["docdate"])
        if len(alternates) > 0:
            is_latest = False
            if textdata["doc_version"] == alternates[-1]["doc_version"]:
                is_latest = True

    # Get a list of related bills.
    from .billtext import get_current_version
    related_bills = []
    for rb in list(bill.find_reintroductions()) + [r.related_bill for r in bill.get_related_bills()]:
        try:
            rbv = get_current_version(rb)
            if not (rb, rbv) in related_bills: related_bills.append((rb, rbv))
        except IOError:
            pass # text not available
    for btc in BillTextComparison.objects.filter(bill1=bill).exclude(bill2=bill):
        if not (btc.bill2, btc.ver2) in related_bills: related_bills.append((btc.bill2, btc.ver2))
    for btc in BillTextComparison.objects.filter(bill2=bill).exclude(bill1=bill):
        if not (btc.bill1, btc.ver1) in related_bills: related_bills.append((btc.bill1, btc.ver1))

    return {
        "bill_subpage": "Text",
        'bill': bill,
        "congressdates": get_congress_dates(bill.congress),
        "textdata": textdata,
        "version": version,
        "is_latest": is_latest,
        "alternates": alternates,
        "related_bills": related_bills,
        "days_old": (datetime.datetime.now().date() - bill.current_status_date).days,
        "is_on_bill_text_page": True, # for the header tabs
    }
Пример #21
0
def load_bill_text(bill, version, plain_text=False, mods_only=False):
    if bill.congress < 103 or plain_text:
        return load_bill_text_alt(bill, version, plain_text=plain_text, mods_only=mods_only)
    
    from bill.models import BillType # has to be here and not module-level to avoid cyclic dependency

    bt = BillType.by_value(bill.bill_type).xml_code
    basename = "data/us/bills.text/%s/%s/%s%d%s" % (bill.congress, bt, bt, bill.number, version if version != None else "")
    
    if mods_only:
        bill_text_content = None
    else:
        bill_text_content = open(basename + ".html").read()
    
    mods = lxml.etree.parse(basename + ".mods.xml")
    ns = { "mods": "http://www.loc.gov/mods/v3" }
    
    docdate = mods.xpath("string(mods:originInfo/mods:dateIssued)", namespaces=ns)
    gpo_url = "http://www.gpo.gov/fdsys/search/pagedetails.action?packageId=" + mods.xpath("string(mods:recordInfo/mods:recordIdentifier[@source='DGPO'])", namespaces=ns)
    #gpo_url = mods.xpath("string(mods:identifier[@type='uri'])", namespaces=ns)
    gpo_pdf_url = mods.xpath("string(mods:location/mods:url[@displayLabel='PDF rendition'])", namespaces=ns)
    doc_version = mods.xpath("string(mods:extension/mods:billVersion)", namespaces=ns)
    numpages = mods.xpath("string(mods:physicalDescription/mods:extent)", namespaces=ns)
    if numpages: numpages = re.sub(r" p\.$", " pages", numpages)
    
    docdate = datetime.date(*(int(d) for d in docdate.split("-")))
    
    doc_version_name = bill_gpo_status_codes[doc_version]

    # load a list of citations as marked up by GPO
    citations = []
    for cite in mods.xpath("//mods:identifier", namespaces=ns):
        if cite.get("type") == "USC citation":
            citations.append( parse_usc_citation(cite) )
        elif cite.get("type") == "Statute citation":
            citations.append({ "type": "statutes_at_large", "text": cite.text })
        elif cite.get("type") == "public law citation":
            try:
                congress_cite, slip_law_num = re.match(r"Public Law (\d+)-(\d+)$", cite.text).groups()
                citations.append({ "type": "slip_law", "text": cite.text, "congress": int(congress_cite), "number": int(slip_law_num) })
            except:
                citations.append({ "type": "unknown", "text": cite.text })
            
    return {
        "bill_id": bill.id,
        "bill_name": bill.title,
        "basename": basename,
        "text_html": bill_text_content,
        "docdate": docdate,
        "gpo_url": gpo_url,
        "gpo_pdf_url": gpo_pdf_url,
        "doc_version": doc_version,
        "doc_version_name": doc_version_name,
        "numpages": numpages,
        "has_html_text": True,
        "citations": citations,
    }
 def process_bill(self, obj, node):
     amends_type = BillType.by_xml_code(node.xpath('string(amends/@type)'))
     amends_number = int(node.xpath('string(amends/@number)'))
     try:
         amends_seq = int(node.xpath('string(amends/@sequence)'))
     except ValueError:
         amends_seq = None
     obj.bill = Bill.objects.get(congress=obj.congress, bill_type=amends_type, number=amends_number)
     obj.sequence = amends_seq
Пример #23
0
def get_bill_text_version_regular(bill, version):
    basename = bill.data_dir_path + "/text-versions"
    dat = json.load(open(basename + "/%s/data.json" % version))

    dat["status_name"] = get_gpo_status_code_name(dat["version_code"])
    dat["corresponding_status_codes"] = get_gpo_status_code_corresponding_status(
        dat["version_code"])
    dat["issued_on"] = datetime.date(*(int(d)
                                       for d in dat["issued_on"].split("-")))

    # find content files

    basename += "/" + dat["version_code"]

    from bill.models import BillType  # has to be here and not module-level to avoid cyclic dependency
    bt = BillType.by_value(bill.bill_type).slug
    html_fn = "data/congress-bill-text-legacy/%d/%s/%s%d/%s.html" % (
        bill.congress, bt, bt, bill.number, dat["version_code"])

    if os.path.exists(basename + "/mods.xml"):
        dat["mods_file"] = basename + "/mods.xml"

    # get a plain text file if one exists
    if os.path.exists(basename + "/document.txt"):
        dat["text_file"] = basename + "/document.txt"
        dat["has_displayable_text"] = True

        for source in dat.get("sources", []):
            if source["source"] == "statutes":
                dat["text_file_source"] = "statutes"

    # get an HTML file if one exists
    if os.path.exists(html_fn):
        dat["html_file"] = html_fn
        dat["has_displayable_text"] = True

    # get a PDF file if one exists
    pdf_fn = basename + "/document.pdf"
    if os.path.exists(pdf_fn):
        dat["pdf_file"] = pdf_fn
        dat["has_thumbnail"] = True
        dat["thumbnail_path"] = bill.get_absolute_url() + "/_text_image"

    # get an XML file if one exists
    if os.path.exists(basename + "/catoxml.xml"):
        dat["xml_file"] = basename + "/catoxml.xml"
        dat["has_displayable_text"] = True
        dat["xml_file_source"] = "cato-deepbills"
    elif os.path.exists(basename + "/document.xml"):
        dat["xml_file"] = basename + "/document.xml"
        dat["has_displayable_text"] = True

    if settings.DEBUG:
        dat["has_thumbnail"] = True

    return dat
Пример #24
0
def load_bill_from_url(congress, type_slug, number):
    # not sure why we were trying this
    #if type_slug.isdigit():
    #    bill_type = type_slug
    try:
        bill_type = BillType.by_slug(type_slug)
    except BillType.NotFound:
        raise Http404("Invalid bill type: " + type_slug)

    return get_object_or_404(Bill, congress=congress, bill_type=bill_type, number=number)
Пример #25
0
 def bill(self):
      if not hasattr(self, "_ref"):
            if ":" in self.feedname and self.feedname.split(":")[0] in ("bill",):
                 from bill.models import Bill, BillType
                 m = re.match(r"([a-z]+)(\d+)-(\d+)", self.feedname.split(":")[1])
                 bill_type = BillType.by_xml_code(m.group(1))
                 return Bill.objects.get(congress=m.group(2), bill_type=bill_type, number=m.group(3))
            else:
                 self._ref = None
      return self._ref
Пример #26
0
def load_bill_from_url(congress, type_slug, number):
    # not sure why we were trying this
    #if type_slug.isdigit():
    #    bill_type = type_slug
    try:
        bill_type = BillType.by_slug(type_slug)
    except BillType.NotFound:
        raise Http404("Invalid bill type: " + type_slug)

    return get_object_or_404(Bill, congress=congress, bill_type=bill_type, number=number)
Пример #27
0
 def process_relatedbills(self, obj, node):
     RelatedBill.objects.filter(bill=obj).delete()
     for subnode in node.xpath("./relatedbills/bill"):
         try:
             related_bill = Bill.objects.get(
                 congress=subnode.get("session"),
                 bill_type=BillType.by_xml_code(subnode.get("type")),
                 number=int(subnode.get("number")),
             )
         except Bill.DoesNotExist:
             continue
         RelatedBill.objects.create(bill=obj, related_bill=related_bill, relation=subnode.get("relation"))
Пример #28
0
def get_bill_number(bill, show_congress_number="ARCHIVAL"):
    "Compute display form of bill number"
    
    if bill.congress <= 42:
        # This is an American Memory bill. It's number is stored.
        ret = bill.title.split(":")[0]
    else:
        from bill.models import BillType
        ret = '%s %s' % (BillType.by_value(bill.bill_type).label, bill.number)
    if (bill.congress != settings.CURRENT_CONGRESS and show_congress_number == "ARCHIVAL") or show_congress_number == "ALL":
        ret += ' (%s)' % ordinal(bill.congress)
    return ret
 def process_relatedbills(self, obj, node):
     RelatedBill.objects.filter(bill=obj).delete()
     for subnode in node.xpath('./relatedbills/bill'):
         try:
             related_bill = Bill.objects.get(
                 congress=subnode.get("session"),
                 bill_type=BillType.by_xml_code(subnode.get("type")),
                 number=int(subnode.get("number")))
         except Bill.DoesNotExist:
             continue
         RelatedBill.objects.create(bill=obj,
                                    related_bill=related_bill,
                                    relation=subnode.get("relation")[0:16])
    def process_bill(self, obj, node):
        if node.xpath('string(amends/@type)') == "treaty":
           # Cannot handle this.
           return False

        amends_type = BillType.by_xml_code(node.xpath('string(amends/@type)'))
        amends_number = int(node.xpath('string(amends/@number)'))
        try:
            amends_seq = int(node.xpath('string(amends/@sequence)'))
        except ValueError:
            amends_seq = None
        obj.bill = Bill.objects.get(congress=obj.congress, bill_type=amends_type, number=amends_number)
        obj.sequence = amends_seq
        return True
Пример #31
0
 def bill(self):
     if not hasattr(self, "_ref"):
         if ":" in self.feedname and self.feedname.split(":")[0] in (
                 "bill", ):
             from bill.models import Bill, BillType
             m = re.match(r"([a-z]+)(\d+)-(\d+)",
                          self.feedname.split(":")[1])
             bill_type = BillType.by_xml_code(m.group(1))
             return Bill.objects.get(congress=m.group(2),
                                     bill_type=bill_type,
                                     number=m.group(3))
         else:
             self._ref = None
     return self._ref
    def process_bill(self, obj, node):
        if node.xpath('string(amends/@type)') == "treaty":
           # Cannot handle this.
           return False

        amends_type = BillType.by_xml_code(node.xpath('string(amends/@type)'))
        amends_number = int(node.xpath('string(amends/@number)'))
        try:
            amends_seq = int(node.xpath('string(amends/@sequence)'))
        except ValueError:
            amends_seq = None
        obj.bill = Bill.objects.get(congress=obj.congress, bill_type=amends_type, number=amends_number)
        obj.sequence = amends_seq
        return True
def parse_bill_number(q, congress=None):
    m = bill_number_re.match(q.replace(" ", "").replace(".", "").replace("-", ""))
    if m == None: return None
    if m.group(3) != None:
        cn = int(m.group(4))
    elif congress != None:
        try:
            cn = int(congress)
        except:
            cn = CURRENT_CONGRESS
    else:
        cn = CURRENT_CONGRESS
    try:
        return Bill.objects.get(congress=cn, bill_type=BillType.by_slug(m.group(1).lower()), number=int(m.group(2)))
    except Bill.DoesNotExist:
        return None
Пример #34
0
def parse_bill_number(q, congress=None):
    m = bill_number_re.match(q.replace(" ", "").replace(".", "").replace("-", ""))
    if m == None: return None
    if m.group(3) != None:
        cn = int(m.group(4))
    elif congress != None:
        try:
            cn = int(congress)
        except:
            cn = CURRENT_CONGRESS
    else:
        cn = CURRENT_CONGRESS
    try:
        return Bill.objects.get(congress=cn, bill_type=BillType.by_slug(m.group(1).lower()), number=int(m.group(2)))
    except Bill.DoesNotExist:
        return None
Пример #35
0
def get_bill_for_page(page):
    for template in mwparserfromhell.parse(page["text"]).filter_templates():
        if template.name.strip() == "Infobox U.S. legislation":
            # print page["title"].encode("utf8")
            billref = get_bill_from_infobox(template)
            if billref:
                try:
                    if billref[0] == "PL":
                        # Get by pulic law number.
                        return Bill.objects.get(congress=billref[1], sliplawpubpriv="PUB", sliplawnum=billref[2])
                    elif billref[0] == "BILL":
                        # It's a bill number.
                        return Bill.objects.get(
                            congress=billref[1], bill_type=BillType.by_slug(billref[2]), number=billref[3]
                        )
                except Bill.DoesNotExist:
                    return None
    return None
Пример #36
0
def bill_redirect(request, istext=None):
    """
    Redirect requests to obsolete bill urls which look like:

        /congress/bill.xpd?bill=[type_code][congress_number]-[bill_num]
    """

    token = request.GET.get('bill', '')
    match = BILL_TOKEN_REGEXP.search(token)
    if not match:
        raise Http404()
    type_code, congress, number = match.groups()
    try:
        bill_type = BillType.by_xml_code(type_code)
    except BillType.NotFound:
        raise Http404()
    bill = get_object_or_404(Bill, bill_type=bill_type, congress=congress,
                             number=number)
    return redirect(bill.get_absolute_url() + ("" if not istext else "/text"), permanent=True)
Пример #37
0
def get_bill_for_page(page):
	for template in mwparserfromhell.parse(page["text"]).filter_templates():
		if template.name.strip() == "Infobox U.S. legislation":
			#print page["title"].encode("utf8")
			try:
				billref = get_bill_from_infobox(template)
			except Exception as e:
				print(page["pageid"], e)
				billref = None
			if billref:
				try:
					if billref[0] == "PL":
						# Get by pulic law number.
						return Bill.objects.get(congress=billref[1], sliplawpubpriv="PUB", sliplawnum=billref[2])
					elif billref[0] == "BILL":
						# It's a bill number.
						return Bill.objects.get(congress=billref[1], bill_type=BillType.by_slug(billref[2]), number=billref[3])
				except Bill.DoesNotExist:
					return None
	return None
Пример #38
0
def bill_details(request, congress, type_slug, number):
    if type_slug.isdigit():
        bill_type = type_slug
    else:
        try:
            bill_type = BillType.by_slug(type_slug)
        except BillType.NotFound:
            raise Http404("Invalid bill type: " + type_slug)
    
    bill = get_object_or_404(Bill, congress=congress, bill_type=bill_type, number=number)
    
    from person.name import get_person_name
    sponsor_name = None if not bill.sponsor else \
        get_person_name(bill.sponsor, role_date=bill.introduced_date, firstname_position='before', show_suffix=True)
    
    def get_reintroductions():
        reintro_prev = None
        reintro_next = None
        for reintro in bill.find_reintroductions():
            if reintro.congress < bill.congress: reintro_prev = reintro
            if reintro.congress > bill.congress and not reintro_next: reintro_next = reintro
        return reintro_prev, reintro_next
        
    def get_text_info():
        from billtext import load_bill_text
        try:
            return load_bill_text(bill, None, mods_only=True)
        except IOError:
            return None

    return {
        'bill': bill,
        "congressdates": get_congress_dates(bill.congress),
        "subtitle": get_secondary_bill_title(bill, bill.titles),
        "sponsor_name": sponsor_name,
        "reintros": get_reintroductions, # defer so we can use template caching
        "current": bill.congress == CURRENT_CONGRESS,
        "dead": bill.congress != CURRENT_CONGRESS and bill.current_status not in BillStatus.final_status_obvious,
        "feed": Feed.BillFeed(bill),
        "text": get_text_info,
    }
Пример #39
0
def get_bill_text_metadata(bill, version):
    from bill.models import BillType # has to be here and not module-level to avoid cyclic dependency
    import glob, json

    bt = BillType.by_value(bill.bill_type).slug
    basename = "data/congress/%d/bills/%s/%s%d/text-versions" % (bill.congress, bt, bt, bill.number)
    
    if version == None:
        # Cycle through files to find most recent version by date.
        dat = None
        for versionfile in glob.glob(basename + "/*/data.json"):
            d = json.load(open(versionfile))
            if not dat or d["issued_on"] > dat["issued_on"]:
                dat = d
        if not dat: return None
    else:
        dat = json.load(open(basename + "/%s/data.json" % version))
        
    dat["plain_text_file"] = basename + "/" + dat["version_code"] + "/document.txt"
    
    return dat
Пример #40
0
def load_docs_house_gov(options, bill_index):
    # Get most recent JSON file by looking at the lexicographically last one.
    fn = sorted(os.listdir("data/congress/upcoming_house_floor"))[-1]
    data = json.load(open("data/congress/upcoming_house_floor/" + fn))
    for billinfo in data.get("upcoming", []):
        m = re.match(r"([hrsjconres]+)(\d+)-(\d+)", billinfo["bill_id"])
        if not m:
            log.error('Could not parse bill_id "%s" in docs.house.gov.' % billinfo["bill_id"])
            continue

        bt = BillType.by_slug(m.group(1))
        try:
            bill = Bill.objects.get(congress=int(m.group(3)), bill_type=bt, number=int(m.group(2)))
        except Exception as e:
            log.error('Could not get bill "%s" in docs.house.gov: %s.' % (billinfo["bill_id"], str(e)))
            continue

        bill.docs_house_gov_postdate = BillProcessor.parse_datetime(billinfo["published_at"])
        bill.save()
        if bill_index: bill.update_index(bill_index)
        if not options.disable_events: bill.create_events()
Пример #41
0
def load_bill_text_alt(bill, version, plain_text=False, mods_only=False):
    # Load bill text info from the Congress project JSON files.
    
    from bill.models import BillType # has to be here and not module-level to avoid cyclic dependency
    import glob, json

    bt = BillType.by_value(bill.bill_type).slug
    basename = "data/congress/%d/bills/%s/%s%d/text-versions" % (bill.congress, bt, bt, bill.number)
    
    if version == None:
        # Cycle through files to find most recent version by date.
        dat = None
        for versionfile in glob.glob(basename + "/*.json"):
            d = json.load(open(versionfile))
            if not dat or d["issued_on"] > dat["issued_on"]:
                dat = d
    else:
        dat = json.load(open(basename + "/%s.json" % version))
            
    if not mods_only:
        raise Exception("Bill text not available.")
            
    gpo_url = dat["urls"]["pdf"]
    m = re.match(r"http://www.gpo.gov/fdsys/pkg/(STATUTE-\d+)/pdf/(STATUTE-\d+-.*).pdf", gpo_url)
    if m:
        gpo_url = "http://www.gpo.gov/fdsys/granule/%s/%s/content-detail.html" % m.groups()
            
    return {
        "bill_id": bill.id,
        "bill_name": bill.title,
        "basename": basename,
        "docdate": datetime.date(*(int(d) for d in dat["issued_on"].split("-"))),
        "gpo_url": gpo_url,
        "gpo_pdf_url": dat["urls"]["pdf"],
        "doc_version": dat["version_code"],
        "doc_version_name": bill_gpo_status_codes[dat["version_code"]],
        "has_html_text": False,
    }
Пример #42
0
def get_bill_text_metadata(bill, version):
    from bill.models import BillType  # has to be here and not module-level to avoid cyclic dependency
    import glob, json

    bt = BillType.by_value(bill.bill_type).slug
    basename = "data/congress/%d/bills/%s/%s%d/text-versions" % (
        bill.congress, bt, bt, bill.number)

    if version == None:
        # Cycle through files to find most recent version by date.
        dat = None
        for versionfile in glob.glob(basename + "/*/data.json"):
            d = json.load(open(versionfile))
            if not dat or d["issued_on"] > dat["issued_on"]:
                dat = d
        if not dat: return None
    else:
        dat = json.load(open(basename + "/%s/data.json" % version))

    dat["plain_text_file"] = basename + "/" + dat[
        "version_code"] + "/document.txt"

    return dat
Пример #43
0
def get_bill_text_metadata(bill, version):
    from bill.models import BillType  # has to be here and not module-level to avoid cyclic dependency
    import glob, json

    bt = BillType.by_value(bill.bill_type).slug
    basename = "data/congress/%d/bills/%s/%s%d/text-versions" % (
        bill.congress, bt, bt, bill.number)

    if version == None:
        # Cycle through files to find most recent version by date.
        dat = None
        for versionfile in glob.glob(basename + "/*/data.json"):
            d = json.load(open(versionfile))
            if not dat or d["issued_on"] > dat["issued_on"]:
                dat = d
        if not dat: return None
    else:
        dat = json.load(open(basename + "/%s/data.json" % version))

    # human readable status name

    dat["status_name"] = get_gpo_status_code_name(dat["version_code"])
    dat["corresponding_status_codes"] = get_gpo_status_code_corresponding_status(
        dat["version_code"])

    # parse date

    dat["issued_on"] = datetime.date(*(int(d)
                                       for d in dat["issued_on"].split("-")))

    # find content files

    basename += "/" + dat["version_code"]

    bt2 = BillType.by_value(bill.bill_type).xml_code
    html_fn = "data/congress-bill-text-legacy/%s/%s/%s%d%s.html" % (
        bill.congress, bt2, bt2, bill.number, dat["version_code"])

    if os.path.exists(basename + "/mods.xml"):
        dat["mods_file"] = basename + "/mods.xml"

    # get a plain text file if one exists
    if os.path.exists(basename + "/document.txt"):
        dat["text_file"] = basename + "/document.txt"
        dat["has_displayable_text"] = True

        for source in dat.get("sources", []):
            if source["source"] == "statutes":
                dat["text_file_source"] = "statutes"

    # get an HTML file if one exists
    if os.path.exists(html_fn):
        dat["html_file"] = html_fn
        dat["has_displayable_text"] = True

    # get a PDF file if one exists
    pdf_fn = basename + "/document.pdf"
    if os.path.exists(pdf_fn):
        dat["pdf_file"] = pdf_fn
        dat["has_thumbnail"] = True
        dat["thumbnail_path"] = bill.get_absolute_url() + "/_text_image"

    # get an XML file if one exists
    if os.path.exists(basename + "/catoxml.xml"):
        dat["xml_file"] = basename + "/catoxml.xml"
        dat["has_displayable_text"] = True
        dat["xml_file_source"] = "cato-deepbills"
    elif os.path.exists(basename + "/document.xml"):
        dat["xml_file"] = basename + "/document.xml"
        dat["has_displayable_text"] = True

    if settings.DEBUG:
        dat["has_thumbnail"] = True

    return dat
Пример #44
0
def load_bill_text(bill, version, plain_text=False, mods_only=False):
    if bill.congress < 103:
        return load_bill_text_alt(bill, version, plain_text=plain_text, mods_only=mods_only)
    
    from bill.models import BillType # has to be here and not module-level to avoid cyclic dependency

    bt = BillType.by_value(bill.bill_type).xml_code
    basename = "data/us/bills.text/%s/%s/%s%d%s" % (bill.congress, bt, bt, bill.number, version if version != None else "")
    
    if mods_only:
        bill_text_content = None
    else:
        if plain_text:
            try:
                return open(basename + ".txt").read().decode("utf8", "ignore") # otherwise we get 'Chuck failed' in the xapian_backend apparently due to decoding issue.
            except IOError:
                return ""
        elif os.path.exists(basename + ".xml") and False:
            dom = lxml.etree.parse(basename + ".xml")
            transform = lxml.etree.parse(os.path.join(os.path.dirname(os.path.realpath(__file__)), "textxsl/billres.xsl"))
            transform = lxml.etree.XSLT(transform)
            result = transform(dom)
            
            # empty nodes cause HTML parsing problems, so remove them.
            # iterate in reverse document order so that we hit parents after
            # their children, since if we remove all of the children then we may
            # want to remove the parent too.
            for node in reversed(list(result.getiterator())):
                if node.xpath("string(.)") == "":
                    node.getparent().remove(node)
                    
            bill_text_content = lxml.etree.tostring(result.xpath("head/style")[0]) + lxml.etree.tostring(result.xpath("body")[0])
        else:
            bill_text_content = open(basename + ".html").read()
    
    mods = lxml.etree.parse(basename + ".mods.xml")
    ns = { "mods": "http://www.loc.gov/mods/v3" }
    docdate = mods.xpath("string(mods:originInfo/mods:dateIssued)", namespaces=ns)
    gpo_url = "http://www.gpo.gov/fdsys/search/pagedetails.action?packageId=" + mods.xpath("string(mods:recordInfo/mods:recordIdentifier[@source='DGPO'])", namespaces=ns)
    #gpo_url = mods.xpath("string(mods:identifier[@type='uri'])", namespaces=ns)
    gpo_pdf_url = mods.xpath("string(mods:location/mods:url[@displayLabel='PDF rendition'])", namespaces=ns)
    doc_version = mods.xpath("string(mods:extension/mods:billVersion)", namespaces=ns)
    numpages = mods.xpath("string(mods:physicalDescription/mods:extent)", namespaces=ns)
    if numpages: numpages = re.sub(r" p\.$", " pages", numpages)
    
    docdate = datetime.date(*(int(d) for d in docdate.split("-")))
    
    doc_version_name = bill_gpo_status_codes[doc_version]
    
    return {
        "bill_id": bill.id,
        "bill_name": bill.title,
        "basename": basename,
        "text_html": bill_text_content,
        "docdate": docdate,
        "gpo_url": gpo_url,
        "gpo_pdf_url": gpo_pdf_url,
        "doc_version": doc_version,
        "doc_version_name": doc_version_name,
        "numpages": numpages,
        "has_html_text": True,
    }
Пример #45
0
 def type_handler(self, value):
     return BillType.by_xml_code(value)
Пример #46
0
def main(options):
    """
    Process bill terms and bills
    """

    # Terms

    term_processor = TermProcessor()
    terms_parsed = set()
    
    # Cache existing terms. There aren't so many.
    existing_terms = { }
    for term in BillTerm.objects.all():
        existing_terms[(int(term.term_type), term.name)] = term

    log.info('Processing old bill terms')
    TERMS_FILE = 'data/us/liv.xml'
    tree = etree.parse(TERMS_FILE)
    for node in tree.xpath('/liv/top-term'):
        term = term_processor.process(BillTerm(), node)
        term.term_type = TermType.old
        try:
            # No need to update an existing term because there are no other attributes.
            term = existing_terms[(int(term.term_type), term.name)]
            terms_parsed.add(term.id)
        except:
            log.debug("Created %s" % term)
            term.save()
            term.subterms.clear()
            
        for subnode in node.xpath('./term'):
            subterm = term_processor.process(BillTerm(), subnode)
            subterm.term_type = TermType.old
            try:
                # No need to update an existing term because there are no other attributes.
                subterm = existing_terms[(int(subterm.term_type), subterm.name)]
                term.subterms.add(subterm) 
                terms_parsed.add(subterm.id)
            except:
                try:
                    log.debug("Created %s" % subterm)
                    subterm.save()
                    term.subterms.add(subterm)
                    
                    existing_terms[(int(subterm.term_type), subterm.name)] = subterm
                    terms_parsed.add(subterm.id)
                except IntegrityError:
                    log.error('Duplicated term %s' % term_processor.display_node(subnode))

    log.info('Processing new bill terms')
    for FILE in ('data/us/liv111.xml', 'data/us/crsnet.xml'):
        tree = etree.parse(FILE)
        for node in tree.xpath('/liv/top-term'):
            term = term_processor.process(BillTerm(), node)
            term.term_type = TermType.new
            try:
                # No need to update an existing term because there are no other attributes.
                term = existing_terms[(int(term.term_type), term.name)]
                terms_parsed.add(term.id)
            except:
                log.debug("Created %s" % term)
                term.save()
                term.subterms.clear()

            for subnode in node.xpath('./term'):
                subterm = term_processor.process(BillTerm(), subnode)
                subterm.term_type = TermType.new
                try:
                    # No need to update an existing term because there are no other attributes.
                    subterm = existing_terms[(int(subterm.term_type), subterm.name)]
                    terms_parsed.add(subterm.id)
                    term.subterms.add(subterm)
                except:
                    try:
                        log.debug("Created %s" % term)
                        subterm.save()
                        term.subterms.add(subterm)
                        
                        existing_terms[(int(subterm.term_type), subterm.name)] = subterm
                        terms_parsed.add(subterm.id)
                    except IntegrityError:
                        log.error('Duplicated term %s' % term_processor.display_node(subnode))

    for term in existing_terms.values():
        if not term.id in terms_parsed:
            log.debug("Deleted %s" % term)
            term.delete()

    # Bills
    
    bill_index = None
    if not options.disable_indexing:
        from bill.search_indexes import BillIndex
        bill_index = BillIndex()

    if options.congress:
        files = glob.glob('data/us/%s/bills/*.xml' % options.congress)
        log.info('Parsing bills of only congress#%s' % options.congress)
    else:
        files = glob.glob('data/us/*/bills/*.xml')
        
    if options.filter:
        files = [f for f in files if re.match(options.filter, f)]
        
    log.info('Processing bills: %d files' % len(files))
    total = len(files)
    progress = Progress(total=total, name='files', step=100)

    bill_processor = BillProcessor()
    seen_bill_ids = []
    for fname in files:
        progress.tick()
        
        if not File.objects.is_changed(fname) and not options.force:
            m = re.search(r"/(\d+)/bills/([a-z]+)(\d+)\.xml$", fname)

            try:
                b = Bill.objects.get(congress=m.group(1), bill_type=BillType.by_xml_code(m.group(2)), number=m.group(3))
                seen_bill_ids.append(b.id)
                
                # Update the index/events for any bill with recently changed text
                textfile = "data/us/bills.text/%s/%s/%s%s.txt" % (m.group(1), m.group(2), m.group(2), m.group(3))
                if (bill_index and not options.disable_events) and os.path.exists(textfile) and File.objects.is_changed(textfile):
                    bill_index.update_object(b, using="bill") # index the full text
                    b.create_events() # events for new bill text documents
                    File.objects.save_file(textfile)
                    
                continue
            except Bill.DoesNotExist:
                pass # just parse as normal
            
        if options.slow:
            time.sleep(1)
            
        skip_stuff = False
            
        tree = etree.parse(fname)
        for node in tree.xpath('/bill'):
            if not skip_stuff:
                try:
                    bill = bill_processor.process(Bill(), node)
                except:
                    print fname
                    raise
            else:
                m = re.search(r"/(\d+)/bills/([a-z]+)(\d+)\.xml$", fname)
                bill = Bill.objects.get(congress=m.group(1), bill_type=BillType.by_xml_code(m.group(2)), number=m.group(3))
           
            seen_bill_ids.append(bill.id) # don't delete me later
            
            actions = []
            bill.sliplawpubpriv = None
            bill.sliplawnum = None
            for axn in tree.xpath("actions/*[@state]"):
                actions.append( (repr(bill_processor.parse_datetime(axn.xpath("string(@datetime)"))), BillStatus.by_xml_code(axn.xpath("string(@state)")), axn.xpath("string(text)")) )
                
                if actions[-1][1] in (BillStatus.enacted_signed, BillStatus.enacted_veto_override):
                    bill.sliplawpubpriv = "PUB" if axn.get("type") == "public" else "PRI"
                    bill.sliplawnum = int(axn.get("number").split("-")[1])
                    
            bill.major_actions = actions
            try:
                bill.save()
            except:
            	print bill
            	raise
            if bill_index: bill_index.update_object(bill, using="bill")
            
            if not options.disable_events:
                bill.create_events()

        if not skip_stuff:
            File.objects.save_file(fname)
        
    # delete bill objects that are no longer represented on disk.... this is too dangerous.
    if options.congress and not options.filter and False:
        # this doesn't work because seen_bill_ids is too big for sqlite!
        Bill.objects.filter(congress=options.congress).exclude(id__in = seen_bill_ids).delete()
        
    # Parse docs.house.gov for what might be coming up this week.
    import iso8601
    dhg_html = urllib.urlopen("http://docs.house.gov/floor/").read()
    m = re.search(r"class=\"downloadXML\" href=\"(Download.aspx\?file=.*?)\"", dhg_html)
    if not m:
        log.error('No docs.house.gov download link found at http://docs.house.gov.')
    else:
        def bt_re(bt): return re.escape(bt[1]).replace(r"\.", "\.?\s*")
        try:
            dhg = etree.parse(urllib.urlopen("http://docs.house.gov/floor/" + m.group(1))).getroot()
        except:
            print "http://docs.house.gov/" + m.group(1)
            raise
        # iso8601.parse_date(dhg.get("week-date")+"T00:00:00").date()
        for item in dhg.xpath("category/floor-items/floor-item"):
            billname = item.xpath("legis-num")[0].text
            m = re.match("\s*(?:Concur in the Senate Amendment to |Senate Amendment to )?("
                + "|".join(bt_re(bt) for bt in BillType)
                + ")(\d+)\s*(\[Conference Report\]\s*)?$", billname, re.I)
            if not m:
                if billname.strip() != "H.R. __":
                    log.error('Could not parse legis-num "%s" in docs.house.gov.' % billname)
            else:
                for bt in BillType:
                    if re.match(bt_re(bt) + "$", m.group(1)):
                        try:
                            bill = Bill.objects.get(congress=CURRENT_CONGRESS, bill_type=bt[0], number=m.group(2))
                            bill.docs_house_gov_postdate = iso8601.parse_date(item.get("add-date")).replace(tzinfo=None)
                            bill.save()
                            if bill_index: bill_index.update_object(bill, using="bill")
                            
                            if not options.disable_events:
                                bill.create_events()
                        except Bill.DoesNotExist:
                            log.error('Could not find bill "%s" in docs.house.gov.' % billname)
                        break
                else:
                    log.error('Could not parse legis-num bill type "%s" in docs.house.gov.' % billname)

    # Parse Senate.gov's "Floor Schedule" blurb for coming up tomorrow.
    now = datetime.now()
    sfs = urllib.urlopen("http://www.senate.gov/pagelayout/legislative/d_three_sections_with_teasers/calendars.htm").read()
    try:
        sfs = re.search(r"Floor Schedule([\w\W]*)Previous Meeting", sfs).group(1)
        for congress, bill_type, number in re.findall(r"http://hdl.loc.gov/loc.uscongress/legislation.(\d+)([a-z]+)(\d+)", sfs):
            bill_type = BillType.by_slug(bill_type)
            bill = Bill.objects.get(congress=congress, bill_type=bill_type, number=number)
            if bill.senate_floor_schedule_postdate == None or now - bill.senate_floor_schedule_postdate > timedelta(days=7):
                bill.senate_floor_schedule_postdate = now
                bill.save()
                if bill_index: bill_index.update_object(bill, using="bill")
                if not options.disable_events:
                    bill.create_events()
    except Exception as e:
        log.error('Could not parse Senate Floor Schedule: ' + repr(e))
 def type_handler(self, value):
     return BillType.by_xml_code(value)
def main(options):
    """
    Process bill terms and bills
    """

    # Terms

    term_processor = TermProcessor()
    terms_parsed = set()

    # Cache existing terms. There aren't so many.
    existing_terms = {}
    for term in BillTerm.objects.all():
        existing_terms[(int(term.term_type), term.name)] = term

    log.info('Processing old bill terms')
    TERMS_FILE = 'bill/liv.xml'
    tree = etree.parse(TERMS_FILE)
    for node in tree.xpath('/liv/top-term'):
        term = term_processor.process(BillTerm(), node)
        term.term_type = TermType.old
        try:
            # No need to update an existing term because there are no other attributes.
            term = existing_terms[(int(term.term_type), term.name)]
            terms_parsed.add(term.id)
        except:
            log.debug("Created %s" % term)
            term.save()
            term.subterms.clear()

        for subnode in node.xpath('./term'):
            subterm = term_processor.process(BillTerm(), subnode)
            subterm.term_type = TermType.old
            try:
                # No need to update an existing term because there are no other attributes.
                subterm = existing_terms[(int(subterm.term_type),
                                          subterm.name)]
                term.subterms.add(subterm)
                terms_parsed.add(subterm.id)
            except:
                try:
                    log.debug("Created %s" % subterm)
                    subterm.save()
                    term.subterms.add(subterm)

                    existing_terms[(int(subterm.term_type),
                                    subterm.name)] = subterm
                    terms_parsed.add(subterm.id)
                except IntegrityError:
                    log.error('Duplicated term %s' %
                              term_processor.display_node(subnode))

    log.info('Processing new bill terms')
    for FILE in ('bill/liv111.xml', 'bill/crsnet.xml'):
        tree = etree.parse(FILE)
        for node in tree.xpath('/liv/top-term'):
            term = term_processor.process(BillTerm(), node)
            term.term_type = TermType.new
            try:
                # No need to update an existing term because there are no other attributes.
                term = existing_terms[(int(term.term_type), term.name)]
                terms_parsed.add(term.id)
            except:
                log.debug("Created %s" % term)
                term.save()
                term.subterms.clear()

            for subnode in node.xpath('./term'):
                subterm = term_processor.process(BillTerm(), subnode)
                subterm.term_type = TermType.new
                try:
                    # No need to update an existing term because there are no other attributes.
                    subterm = existing_terms[(int(subterm.term_type),
                                              subterm.name)]
                    terms_parsed.add(subterm.id)
                    term.subterms.add(subterm)
                except:
                    try:
                        log.debug("Created %s" % term)
                        subterm.save()
                        term.subterms.add(subterm)

                        existing_terms[(int(subterm.term_type),
                                        subterm.name)] = subterm
                        terms_parsed.add(subterm.id)
                    except IntegrityError:
                        log.error('Duplicated term %s' %
                                  term_processor.display_node(subnode))

    for term in existing_terms.values():
        if not term.id in terms_parsed:
            log.debug("Deleted %s" % term)
            term.delete()

    # Bills

    bill_index = None
    if not options.disable_indexing:
        from bill.search_indexes import BillIndex
        bill_index = BillIndex()

    if options.congress:
        files = glob.glob(settings.CONGRESS_DATA_PATH +
                          '/%s/bills/*/*/data.xml' % options.congress)
        log.info('Parsing unitedstates/congress bills of only congress#%s' %
                 options.congress)
    else:
        files = glob.glob(settings.CONGRESS_DATA_PATH +
                          '/*/bills/*/*/data.xml')

    if options.filter:
        files = [f for f in files if re.match(options.filter, f)]

    log.info('Processing bills: %d files' % len(files))
    total = len(files)
    progress = Progress(total=total, name='files', step=100)

    bill_processor = BillProcessor()
    seen_bill_ids = []
    for fname in files:
        progress.tick()

        # With indexing or events enabled, if the bill metadata file hasn't changed check
        # the bill's latest text file for changes so we can create a text-is-available
        # event and so we can index the bill's text.
        if (not options.congress or int(options.congress) > 42) and (
                bill_index and not options.disable_events
        ) and not File.objects.is_changed(fname) and not options.force:
            m = re.match(
                re.escape(settings.CONGRESS_DATA_PATH) +
                r'/(?P<congress>\d+)/bills/(?P<bill_type>[a-z]+)/(?P<bill_type_2>[a-z]+)(?P<number>\d+)/data.xml',
                fname)

            try:
                b = Bill.objects.get(congress=int(m.group("congress")),
                                     bill_type=BillType.by_slug(
                                         m.group("bill_type")),
                                     number=m.group("number"))
                seen_bill_ids.append(b.id)

                # Update the index/events for any bill with recently changed text
                textfile = get_bill_text_metadata(b, None)
                if not textfile:
                    if b.congress >= 103 and b.introduced_date < (
                            datetime.now() - timedelta(days=14)).date():
                        print("No bill text?", fname, b.introduced_date)
                    continue
                textfile = textfile["text_file"]
                if os.path.exists(textfile) and File.objects.is_changed(
                        textfile):
                    b.update_index(bill_index)  # index the full text
                    b.create_events()  # events for new bill text documents
                    File.objects.save_file(textfile)

                continue
            except Bill.DoesNotExist:
                print("Unchanged metadata file but bill doesn't exist:", fname)
                pass  # just parse as normal

        if options.slow:
            time.sleep(1)

        tree = etree.parse(fname)
        for node in tree.xpath('/bill'):
            try:
                bill = bill_processor.process(Bill(), node)
            except:
                print(fname)
                raise

            seen_bill_ids.append(bill.id)  # don't delete me later

            # So far this is just for American Memory bills.
            if node.xpath("string(source/@url)"):
                bill.source_link = str(node.xpath("string(source/@url)"))
            else:
                bill.source_link = None

            actions = []
            for axn in tree.xpath("actions/*[@state]"):
                if axn.xpath("string(@state)") == "REFERRED":
                    continue  # we don't track this state
                actions.append((
                    repr(
                        bill_processor.parse_datetime(
                            axn.xpath("string(@datetime)"))),
                    BillStatus.by_xml_code(axn.xpath("string(@state)")),
                    axn.xpath("string(text)"),
                    etree.tostring(axn, encoding=str),
                ))

            bill.sliplawpubpriv = None
            bill.sliplawnum = None
            for axn in tree.xpath("actions/enacted"):
                bill.sliplawpubpriv = "PUB" if axn.get(
                    "type") == "public" else "PRI"
                bill.sliplawnum = int(axn.get("number").split("-")[1])

            bill.major_actions = actions
            try:
                bill.save()
            except:
                print(bill)
                raise

            if bill_index:
                bill.update_index(bill_index)

            if not options.disable_events:
                bill.create_events()

        File.objects.save_file(fname)

    # delete bill objects that are no longer represented on disk.... this is too dangerous.
    if options.congress and not options.filter:
        # this doesn't work because seen_bill_ids is too big for sqlite!
        for b in Bill.objects.filter(congress=options.congress).exclude(
                id__in=seen_bill_ids):
            print("Bill is no longer on disk: ", b.id, b)

    # The rest is for current only...

    if options.congress and int(options.congress) != settings.CURRENT_CONGRESS:
        return

    # Find what might be coming up this week.
    load_docs_house_gov(options, bill_index)
    load_senate_floor_schedule(options, bill_index)
#!script

from bill.models import BillType

import glob, re, os.path

for congress in range(103, 112 + 1):
    for fn in glob.glob("data/us/bills.text/%d/*/*.xml" % congress):
        bill_type, bill_number, print_code, stuff = re.search(
            r"/([a-z]+)(\d+)([a-z][a-z0-9]*)?(\.gen|\.mods)?\.xml$",
            fn).groups()
        if not print_code: continue  # my symbolic links to latest version
        if stuff: continue  # ".gen".html
        bill_type = BillType.by_xml_code(bill_type).slug
        fn2 = "data/congress/%d/bills/%s/%s%s/text-versions/%s/mods.xml" % (
            congress, bill_type, bill_type, bill_number, print_code)
        if not os.path.exists(fn2):
            print(fn2)
Пример #50
0
def bill_details(request, congress, type_slug, number):
    if type_slug.isdigit():
        bill_type = type_slug
    else:
        try:
            bill_type = BillType.by_slug(type_slug)
        except BillType.NotFound:
            raise Http404("Invalid bill type: " + type_slug)
    
    bill = get_object_or_404(Bill, congress=congress, bill_type=bill_type, number=number)
    
    from person.name import get_person_name
    sponsor_name = None if not bill.sponsor else \
        get_person_name(bill.sponsor, role_date=bill.introduced_date, firstname_position='before', show_suffix=True)
    
    def get_reintroductions():
        reintro_prev = None
        reintro_next = None
        for reintro in bill.find_reintroductions():
            if reintro.congress < bill.congress: reintro_prev = reintro
            if reintro.congress > bill.congress and not reintro_next: reintro_next = reintro
        return reintro_prev, reintro_next
        
    def get_text_info():
        from models import USCSection
        from billtext import load_bill_text
        from search import parse_slip_law_number
        import re
        try:
            metadata = load_bill_text(bill, None, mods_only=True)
            
            # do interesting stuff with citations
            if "citations" in metadata:
                slip_laws = []
                statutes = []
                usc = { }
                other = []
                usc_other = USCSection(name="Other Citations", ordering=99999)
                for cite in metadata["citations"]:
                    if cite["type"] == "slip_law":
                        slip_laws.append(cite)
                        cite["bill"] = parse_slip_law_number(cite["text"])
                    elif cite["type"] == "statutes_at_large":
                        statutes.append(cite)
                    elif cite["type"] == "usc":
                        # build a normalized citation and a link to LII
                        cite_norm = "usc/" + cite["title"]
                        cite_link = "http://www.law.cornell.edu/uscode/text/" + cite["title"]
                        if cite["section"]:
                            cite_link += "/" + cite["section"]
                            cite_norm += "/" + cite["section"]
                        if cite["paragraph"]: cite_link += "#" + "_".join(re.findall(r"\(([^)]+)\)", cite["paragraph"]))
                        
                        # Build a tree of title-chapter-...-section nodes so we can
                        # display the citations in context.
                        try:
                            sec_obj = USCSection.objects.get(citation=cite_norm)
                        except: # USCSection.DoesNotExist and MultipleObjectsReturned both possible
                            # the 'id' field is set to make these objects properly hashable
                            sec_obj = USCSection(id=cite["text"], name=cite["text"], parent_section=usc_other)
                        
                        sec_obj.link = cite_link
                        
                        if "range_to_section" in cite:
                            sec_obj.range_to_section = cite["range_to_section"]
                        
                        # recursively go up to the title
                        path = [sec_obj]
                        while sec_obj.parent_section:
                            sec_obj = sec_obj.parent_section
                            path.append(sec_obj)
                            
                        # now pop off from the path to put the node at the right point in a tree
                        container = usc
                        while path:
                            p = path.pop(-1)
                            if p not in container: container[p] = { }
                            container = container[p]
                        
                    else:
                        other.append(cite)
                        
                slip_laws.sort(key = lambda x : (x["congress"], x["number"]))
                
                # restructure data format
                def ucfirst(s): return s[0].upper() + s[1:]
                def rebuild_usc_sec(seclist, indent=0):
                    ret = []
                    seclist = sorted(seclist.items(), key=lambda x : x[0].ordering)
                    for sec, subparts in seclist:
                        ret.append({
                            "text": (ucfirst(sec.level_type + ((" " + sec.number) if sec.number else "") + (": " if sec.name else "")) if sec.level_type else "") + (sec.name if sec.name else ""),
                            "link": getattr(sec, "link", None),
                            "range_to_section": getattr(sec, "range_to_section", None),
                            "indent": indent,
                        })
                        ret.extend(rebuild_usc_sec(subparts, indent=indent+1))
                    return ret
                usc = rebuild_usc_sec(usc)
                
                metadata["citations"] = {
                    "slip_laws": slip_laws, "statutes": statutes, "usc": usc, "other": other,
                    "count": len(slip_laws)+len(statutes)+len(usc)+len(other) }
            return metadata
        except IOError:
            return None

    return {
        'bill': bill,
        "congressdates": get_congress_dates(bill.congress),
        "subtitle": get_secondary_bill_title(bill, bill.titles),
        "sponsor_name": sponsor_name,
        "reintros": get_reintroductions, # defer so we can use template caching
        "current": bill.congress == CURRENT_CONGRESS,
        "dead": bill.congress != CURRENT_CONGRESS and bill.current_status not in BillStatus.final_status_obvious,
        "feed": Feed.BillFeed(bill),
        "text": get_text_info,
    }
Пример #51
0
def main(options):
    """
    Process bill terms and bills
    """

    # Terms

    term_processor = TermProcessor()
    terms_parsed = set()
    
    # Cache existing terms. There aren't so many.
    existing_terms = { }
    for term in BillTerm.objects.all():
        existing_terms[(int(term.term_type), term.name)] = term

    log.info('Processing old bill terms')
    TERMS_FILE = 'data/us/liv.xml'
    tree = etree.parse(TERMS_FILE)
    for node in tree.xpath('/liv/top-term'):
        term = term_processor.process(BillTerm(), node)
        term.term_type = TermType.old
        try:
            # No need to update an existing term because there are no other attributes.
            term = existing_terms[(int(term.term_type), term.name)]
            terms_parsed.add(term.id)
        except:
            log.debug("Created %s" % term)
            term.save()
            term.subterms.clear()
            
        for subnode in node.xpath('./term'):
            subterm = term_processor.process(BillTerm(), subnode)
            subterm.term_type = TermType.old
            try:
                # No need to update an existing term because there are no other attributes.
                subterm = existing_terms[(int(subterm.term_type), subterm.name)]
                term.subterms.add(subterm) 
                terms_parsed.add(subterm.id)
            except:
                try:
                    log.debug("Created %s" % subterm)
                    subterm.save()
                    term.subterms.add(subterm)
                    
                    existing_terms[(int(subterm.term_type), subterm.name)] = subterm
                    terms_parsed.add(subterm.id)
                except IntegrityError:
                    log.error('Duplicated term %s' % term_processor.display_node(subnode))

    log.info('Processing new bill terms')
    for FILE in ('data/us/liv111.xml', 'data/us/crsnet.xml'):
        tree = etree.parse(FILE)
        for node in tree.xpath('/liv/top-term'):
            term = term_processor.process(BillTerm(), node)
            term.term_type = TermType.new
            try:
                # No need to update an existing term because there are no other attributes.
                term = existing_terms[(int(term.term_type), term.name)]
                terms_parsed.add(term.id)
            except:
                log.debug("Created %s" % term)
                term.save()
                term.subterms.clear()

            for subnode in node.xpath('./term'):
                subterm = term_processor.process(BillTerm(), subnode)
                subterm.term_type = TermType.new
                try:
                    # No need to update an existing term because there are no other attributes.
                    subterm = existing_terms[(int(subterm.term_type), subterm.name)]
                    terms_parsed.add(subterm.id)
                    term.subterms.add(subterm)
                except:
                    try:
                        log.debug("Created %s" % term)
                        subterm.save()
                        term.subterms.add(subterm)
                        
                        existing_terms[(int(subterm.term_type), subterm.name)] = subterm
                        terms_parsed.add(subterm.id)
                    except IntegrityError:
                        log.error('Duplicated term %s' % term_processor.display_node(subnode))

    for term in existing_terms.values():
        if not term.id in terms_parsed:
            log.debug("Deleted %s" % term)
            term.delete()

    # Bills
    
    bill_index = None
    if not options.disable_indexing:
        from bill.search_indexes import BillIndex
        bill_index = BillIndex()

    if options.congress and int(options.congress) <= 42:
        files = glob.glob('data/congress/%s/bills/*/*/*.xml' % options.congress)
        log.info('Parsing unitedstates/congress bills of only congress#%s' % options.congress)
    elif options.congress:
        files = glob.glob('data/us/%s/bills/*.xml' % options.congress)
        log.info('Parsing bills of only congress#%s' % options.congress)
    else:
        files = glob.glob('data/us/*/bills/*.xml')
        
    if options.filter:
        files = [f for f in files if re.match(options.filter, f)]
        
    log.info('Processing bills: %d files' % len(files))
    total = len(files)
    progress = Progress(total=total, name='files', step=100)

    bill_processor = BillProcessor()
    seen_bill_ids = []
    for fname in files:
        progress.tick()
        
        # With indexing or events enabled, if the bill metadata file hasn't changed check
        # the bill's latest text file for changes so we can create a text-is-available
        # event and so we can index the bill's text.
        if (not options.congress or options.congress>42) and (bill_index and not options.disable_events) and not File.objects.is_changed(fname) and not options.force:
            m = re.search(r"/(\d+)/bills/([a-z]+)(\d+)\.xml$", fname)

            try:
                b = Bill.objects.get(congress=m.group(1), bill_type=BillType.by_xml_code(m.group(2)), number=m.group(3))
                seen_bill_ids.append(b.id)
                
                # Update the index/events for any bill with recently changed text
                textfile = get_bill_text_metadata(b, None)
                if not textfile:
                    if b.congress >= 103 and b.introduced_date < (datetime.now()-timedelta(days=14)).date():
                        print "No bill text?", fname, b.introduced_date
                    continue
                textfile = textfile["text_file"]
                if os.path.exists(textfile) and File.objects.is_changed(textfile):
                    b.update_index(bill_index) # index the full text
                    b.create_events() # events for new bill text documents
                    File.objects.save_file(textfile)
                    
                continue
            except Bill.DoesNotExist:
                print "Unchanged metadata file but bill doesn't exist:", fname
                pass # just parse as normal
            
        if options.slow:
            time.sleep(1)
            
        tree = etree.parse(fname)
        for node in tree.xpath('/bill'):
            try:
                bill = bill_processor.process(Bill(), node)
            except:
                print fname
                raise
           
            seen_bill_ids.append(bill.id) # don't delete me later
            
            # So far this is just for American Memory bills.
            if node.xpath("string(source/@url)"):
                bill.source_link = unicode(node.xpath("string(source/@url)"))
            else:
                bill.source_link = None

            actions = []
            for axn in tree.xpath("actions/*[@state]"):
                actions.append( (
                	repr(bill_processor.parse_datetime(axn.xpath("string(@datetime)"))),
                	BillStatus.by_xml_code(axn.xpath("string(@state)")),
                	axn.xpath("string(text)"),
                    etree.tostring(axn),
                	) )
                
            bill.sliplawpubpriv = None
            bill.sliplawnum = None
            for axn in tree.xpath("actions/enacted"):
                bill.sliplawpubpriv = "PUB" if axn.get("type") == "public" else "PRI"
                bill.sliplawnum = int(axn.get("number").split("-")[1])
                    
            bill.major_actions = actions
            try:
                bill.save()
            except:
                print bill
                raise

            if bill_index:
                bill.update_index(bill_index)

            if not options.disable_events:
                bill.create_events()
                
        File.objects.save_file(fname)
        
    # delete bill objects that are no longer represented on disk.... this is too dangerous.
    if options.congress and not options.filter:
        # this doesn't work because seen_bill_ids is too big for sqlite!
        for b in Bill.objects.filter(congress=options.congress).exclude(id__in = seen_bill_ids):
            print "Bill is no longer on disk: ", b.id, b
        
    # The rest is for current only...
    
    if options.congress and int(options.congress) != CURRENT_CONGRESS:
        return
        
    # Find what might be coming up this week.
    load_docs_house_gov(options, bill_index)
    load_senate_floor_schedule(options, bill_index)
Пример #52
0
def main(options):
    """
    Process bill terms and bills
    """

    # Terms

    term_processor = TermProcessor()
    terms_parsed = set()

    # Cache existing terms. There aren't so many.
    existing_terms = {}
    for term in BillTerm.objects.all():
        existing_terms[(int(term.term_type), term.name)] = term

    log.info('Processing old bill terms')
    TERMS_FILE = 'data/us/liv.xml'
    tree = etree.parse(TERMS_FILE)
    for node in tree.xpath('/liv/top-term'):
        term = term_processor.process(BillTerm(), node)
        term.term_type = TermType.old
        try:
            # No need to update an existing term because there are no other attributes.
            term = existing_terms[(int(term.term_type), term.name)]
            terms_parsed.add(term.id)
        except:
            log.debug("Created %s" % term)
            term.save()
            term.subterms.clear()

        for subnode in node.xpath('./term'):
            subterm = term_processor.process(BillTerm(), subnode)
            subterm.term_type = TermType.old
            try:
                # No need to update an existing term because there are no other attributes.
                subterm = existing_terms[(int(subterm.term_type),
                                          subterm.name)]
                term.subterms.add(subterm)
                terms_parsed.add(subterm.id)
            except:
                try:
                    log.debug("Created %s" % subterm)
                    subterm.save()
                    term.subterms.add(subterm)

                    existing_terms[(int(subterm.term_type),
                                    subterm.name)] = subterm
                    terms_parsed.add(subterm.id)
                except IntegrityError:
                    log.error('Duplicated term %s' %
                              term_processor.display_node(subnode))

    log.info('Processing new bill terms')
    for FILE in ('data/us/liv111.xml', 'data/us/crsnet.xml'):
        tree = etree.parse(FILE)
        for node in tree.xpath('/liv/top-term'):
            term = term_processor.process(BillTerm(), node)
            term.term_type = TermType.new
            try:
                # No need to update an existing term because there are no other attributes.
                term = existing_terms[(int(term.term_type), term.name)]
                terms_parsed.add(term.id)
            except:
                log.debug("Created %s" % term)
                term.save()
                term.subterms.clear()

            for subnode in node.xpath('./term'):
                subterm = term_processor.process(BillTerm(), subnode)
                subterm.term_type = TermType.new
                try:
                    # No need to update an existing term because there are no other attributes.
                    subterm = existing_terms[(int(subterm.term_type),
                                              subterm.name)]
                    terms_parsed.add(subterm.id)
                    term.subterms.add(subterm)
                except:
                    try:
                        log.debug("Created %s" % term)
                        subterm.save()
                        term.subterms.add(subterm)

                        existing_terms[(int(subterm.term_type),
                                        subterm.name)] = subterm
                        terms_parsed.add(subterm.id)
                    except IntegrityError:
                        log.error('Duplicated term %s' %
                                  term_processor.display_node(subnode))

    for term in existing_terms.values():
        if not term.id in terms_parsed:
            log.debug("Deleted %s" % term)
            term.delete()

    # Bills

    bill_index = None
    if not options.disable_indexing:
        from bill.search_indexes import BillIndex
        bill_index = BillIndex()

    if options.congress and int(options.congress) <= 42:
        files = glob.glob('data/congress/%s/bills/*/*/*.xml' %
                          options.congress)
        log.info('Parsing unitedstates/congress bills of only congress#%s' %
                 options.congress)
    elif options.congress:
        files = glob.glob('data/us/%s/bills/*.xml' % options.congress)
        log.info('Parsing bills of only congress#%s' % options.congress)
    else:
        files = glob.glob('data/us/*/bills/*.xml')

    if options.filter:
        files = [f for f in files if re.match(options.filter, f)]

    log.info('Processing bills: %d files' % len(files))
    total = len(files)
    progress = Progress(total=total, name='files', step=100)

    bill_processor = BillProcessor()
    seen_bill_ids = []
    for fname in files:
        progress.tick()

        # With indexing or events enabled, if the bill metadata file hasn't changed check
        # the bill's latest text file for changes so we can create a text-is-available
        # event and so we can index the bill's text.
        if (not options.congress or options.congress > 42) and (
                bill_index and not options.disable_events
        ) and not File.objects.is_changed(fname) and not options.force:
            m = re.search(r"/(\d+)/bills/([a-z]+)(\d+)\.xml$", fname)

            try:
                b = Bill.objects.get(congress=m.group(1),
                                     bill_type=BillType.by_xml_code(
                                         m.group(2)),
                                     number=m.group(3))
                seen_bill_ids.append(b.id)

                # Update the index/events for any bill with recently changed text
                textfile = get_bill_text_metadata(b, None)
                if not textfile:
                    if b.congress >= 103 and b.introduced_date < (
                            datetime.now() - timedelta(days=14)).date():
                        print "No bill text?", fname, b.introduced_date
                    continue
                textfile = textfile["text_file"]
                if os.path.exists(textfile) and File.objects.is_changed(
                        textfile):
                    bill_index.update_object(
                        b, using="bill")  # index the full text
                    b.create_events()  # events for new bill text documents
                    File.objects.save_file(textfile)

                continue
            except Bill.DoesNotExist:
                print "Unchanged metadata file but bill doesn't exist:", fname
                pass  # just parse as normal

        if options.slow:
            time.sleep(1)

        tree = etree.parse(fname)
        for node in tree.xpath('/bill'):
            try:
                bill = bill_processor.process(Bill(), node)
            except:
                print fname
                raise

            seen_bill_ids.append(bill.id)  # don't delete me later

            if bill.congress >= 93:
                bill.source = "thomas-congproj"
            elif bill.congress >= 82:
                bill.source = "statutesatlarge"
                if bill.current_status == BillStatus.enacted_signed:
                    bill.current_status = BillStatus.enacted_unknown
            elif bill.congress <= 42:
                bill.source = "americanmemory"
            else:
                raise ValueError()

            # So far this is just for American Memory bills.
            if node.xpath("string(source/@url)"):
                bill.source_link = unicode(node.xpath("string(source/@url)"))
            else:
                bill.source_link = None

            actions = []
            for axn in tree.xpath("actions/*[@state]"):
                actions.append((
                    repr(
                        bill_processor.parse_datetime(
                            axn.xpath("string(@datetime)"))),
                    BillStatus.by_xml_code(axn.xpath("string(@state)")),
                    axn.xpath("string(text)"),
                    etree.tostring(axn),
                ))

            bill.sliplawpubpriv = None
            bill.sliplawnum = None
            for axn in tree.xpath("actions/enacted"):
                bill.sliplawpubpriv = "PUB" if axn.get(
                    "type") == "public" else "PRI"
                bill.sliplawnum = int(axn.get("number").split("-")[1])

            bill.major_actions = actions
            try:
                bill.save()
            except:
                print bill
                raise
            if bill_index: bill_index.update_object(bill, using="bill")

            if not options.disable_events:
                bill.create_events()

        File.objects.save_file(fname)

    # delete bill objects that are no longer represented on disk.... this is too dangerous.
    if options.congress and not options.filter:
        # this doesn't work because seen_bill_ids is too big for sqlite!
        for b in Bill.objects.filter(congress=options.congress).exclude(
                id__in=seen_bill_ids):
            print "Bill is no longer on disk: ", b.id, b

    # The rest is for current only...

    if options.congress and int(options.congress) != CURRENT_CONGRESS:
        return

    # Parse docs.house.gov for what might be coming up this week.
    import iso8601
    dhg_html = urllib.urlopen("http://docs.house.gov/floor/").read()
    m = re.search(r"class=\"downloadXML\" href=\"(Download.aspx\?file=.*?)\"",
                  dhg_html)
    if not m:
        log.error(
            'No docs.house.gov download link found at http://docs.house.gov.')
    else:

        def bt_re(bt):
            return re.escape(bt[1]).replace(r"\.", r"\.?\s*")

        try:
            dhg = etree.parse(
                urllib.urlopen("http://docs.house.gov/floor/" +
                               m.group(1))).getroot()
        except:
            print "http://docs.house.gov/floor/" + m.group(1)
            raise
        # iso8601.parse_date(dhg.get("week-date")+"T00:00:00").date()
        for item in dhg.xpath("category/floor-items/floor-item"):
            billname = item.xpath("legis-num")[0].text
            if billname is None: continue  # weird but OK
            m = re.match(
                r"\s*(?:Concur in the Senate Amendment to |Senate Amendment to )?("
                + "|".join(bt_re(bt) for bt in BillType) +
                r")(\d+)\s*(\[Conference Report\]\s*)?$", billname, re.I)
            if not m:
                if not billname.strip().endswith(" __"):
                    log.error(
                        'Could not parse legis-num "%s" in docs.house.gov.' %
                        billname)
            else:
                for bt in BillType:
                    if re.match(bt_re(bt) + "$", m.group(1), re.I):
                        try:
                            bill = Bill.objects.get(congress=CURRENT_CONGRESS,
                                                    bill_type=bt[0],
                                                    number=m.group(2))
                            bill.docs_house_gov_postdate = iso8601.parse_date(
                                item.get("add-date")).replace(tzinfo=None)
                            bill.save()
                            if bill_index:
                                bill_index.update_object(bill, using="bill")

                            if not options.disable_events:
                                bill.create_events()
                        except Bill.DoesNotExist:
                            log.error(
                                'Could not find bill "%s" in docs.house.gov.' %
                                billname)
                        break
                else:
                    log.error(
                        'Could not parse legis-num bill type "%s" in docs.house.gov.'
                        % m.group(1))

    # Parse Senate.gov's "Floor Schedule" blurb for coming up tomorrow.
    now = datetime.now()
    sfs = urllib.urlopen(
        "http://www.senate.gov/pagelayout/legislative/d_three_sections_with_teasers/calendars.htm"
    ).read()
    try:
        sfs = re.search(r"Floor Schedule([\w\W]*)Previous Meeting",
                        sfs).group(1)
        for congress, bill_type, number in re.findall(
                r"http://hdl.loc.gov/loc.uscongress/legislation.(\d+)([a-z]+)(\d+)",
                sfs):
            bill_type = BillType.by_slug(bill_type)
            bill = Bill.objects.get(congress=congress,
                                    bill_type=bill_type,
                                    number=number)
            if bill.senate_floor_schedule_postdate == None or now - bill.senate_floor_schedule_postdate > timedelta(
                    days=7):
                bill.senate_floor_schedule_postdate = now
                bill.save()
                if bill_index: bill_index.update_object(bill, using="bill")
                if not options.disable_events:
                    bill.create_events()
    except Exception as e:
        log.error('Could not parse Senate Floor Schedule: ' + repr(e))
Пример #53
0
def main(options):
    """
    Process committees, subcommittees and
    members of current congress committees.
    """

    BASE_PATH = settings.CONGRESS_LEGISLATORS_PATH

    meeting_processor = CommitteeMeetingProcessor()

    log.info('Processing committees')
    COMMITTEES_FILE = BASE_PATH + 'committees-current.yaml'

    if not File.objects.is_changed(COMMITTEES_FILE) and not options.force:
        log.info('File %s was not changed' % COMMITTEES_FILE)
    else:
        tree = yaml_load(COMMITTEES_FILE)
        total = len(tree)
        progress = Progress(total=total)
        seen_committees = set()
        for committee in tree:
            try:
                cobj = Committee.objects.get(code=committee["thomas_id"])
            except Committee.DoesNotExist:
                print "New committee:", committee["thomas_id"]
                cobj = Committee(code=committee["thomas_id"])

            cobj.committee_type = TYPE_MAPPING[committee["type"]]
            cobj.name = committee["name"]
            cobj.url = committee.get("url", None)
            cobj.obsolete = False
            cobj.committee = None
            cobj.jurisdiction = committee.get("jurisdiction")
            cobj.jurisdiction_link = committee.get("jurisdiction_source")
            cobj.save()
            seen_committees.add(cobj.id)

            for subcom in committee.get('subcommittees', []):
                code = committee["thomas_id"] + subcom["thomas_id"]
                try:
                    sobj = Committee.objects.get(code=code)
                except Committee.DoesNotExist:
                    print "New subcommittee:", code
                    sobj = Committee(code=code)

                sobj.name = subcom["name"]
                sobj.url = subcom.get("url", None)
                sobj.type = None
                sobj.committee = cobj
                sobj.obsolete = False
                sobj.save()
                seen_committees.add(sobj.id)

            progress.tick()

        # Check for non-obsolete committees in the database that aren't in our
        # file.
        other_committees = Committee.objects.filter(obsolete=False).exclude(
            id__in=seen_committees)
        if len(other_committees) > 0:
            print "Marking obsolete:", ", ".join(c.code
                                                 for c in other_committees)
            other_committees.update(obsolete=True)

        File.objects.save_file(COMMITTEES_FILE)

    log.info('Processing committee members')
    MEMBERS_FILE = BASE_PATH + 'committee-membership-current.yaml'
    file_changed = File.objects.is_changed(MEMBERS_FILE)

    if not file_changed and not options.force:
        log.info('File %s was not changed' % MEMBERS_FILE)
    else:
        # map THOMAS IDs to GovTrack IDs
        y = yaml_load(BASE_PATH + "legislators-current.yaml")
        person_id_map = {}
        for m in y:
            if "id" in m and "govtrack" in m["id"] and "thomas" in m["id"]:
                person_id_map[m["id"]["thomas"]] = m["id"]["govtrack"]

        # load committee members
        tree = yaml_load(MEMBERS_FILE)
        total = len(tree)
        progress = Progress(total=total, name='committees')

        # We can delete CommitteeMember objects because we don't have
        # any foreign keys to them.
        CommitteeMember.objects.all().delete()

        # Process committee nodes
        for committee, members in tree.items():
            try:
                cobj = Committee.objects.get(code=committee)
            except Committee.DoesNotExist:
                print "Committee not found:", committee
                continue

            # Process members of current committee node
            for member in members:
                mobj = CommitteeMember()
                mobj.person = Person.objects.get(
                    id=person_id_map[member["thomas"]])
                mobj.committee = cobj
                if "title" in member:
                    mobj.role = ROLE_MAPPING[member["title"]]
                mobj.save()

            progress.tick()

        File.objects.save_file(MEMBERS_FILE)

    log.info('Processing committee schedule')
    for chamber in ("house", "senate"):
        meetings_file = 'data/congress/committee_meetings_%s.json' % chamber
        file_changed = File.objects.is_changed(meetings_file)

        if not file_changed and not options.force:
            log.info('File %s was not changed' % meetings_file)
        else:
            meetings = json.load(open(meetings_file))

            # Process committee event nodes
            for meeting in meetings:
                try:
                    # Associate it with an existing meeting object if GUID is already known.
                    # Must get it like this, vs just assigning the ID as we do in other parsers,
                    # because of the auto_now_add created field, which otherwise misbehaves.
                    try:
                        mobj = CommitteeMeeting.objects.get(
                            guid=meeting['guid'])
                    except CommitteeMeeting.DoesNotExist:
                        mobj = CommitteeMeeting()

                    # Parse.
                    mobj = meeting_processor.process(mobj, meeting)

                    # Attach the meeting to the subcommittee if set.
                    if mobj.subcommittee:
                        mobj.committee = Committee.objects.get(
                            code=mobj.committee.code + mobj.subcommittee)

                    mobj.save()

                    mobj.bills.clear()
                    for bill in meeting["bill_ids"]:
                        try:
                            bill_type, bill_num, bill_cong = re.match(
                                r"([a-z]+)(\d+)-(\d+)$", bill).groups()
                            bill = Bill.objects.get(
                                congress=bill_cong,
                                bill_type=BillType.by_slug(bill_type),
                                number=int(bill_num))
                            mobj.bills.add(bill)
                        except AttributeError:
                            pass  # regex failed
                        except common.enum.NotFound:
                            pass  # invalid bill type code in source data
                        except Bill.DoesNotExist:
                            pass  # we don't know about bill yet
                except Committee.DoesNotExist:
                    log.error(
                        'Could not load Committee object for meeting %s' %
                        meeting_processor.display_node(meeting))

            for committee in Committee.objects.all():
                if not options.disable_events:
                    committee.create_events()

            File.objects.save_file(meetings_file)
#!script

import os.path
from bill.models import Bill, BillType

all_bill_ids = list(Bill.objects.all().values_list('id', flat=True))


def batch(iterable, n=1):
    l = len(iterable)
    for ndx in range(0, l, n):
        yield iterable[ndx:min(ndx + n, l)]


for idset in batch(all_bill_ids, n=2000):
    print("...")
    for bill in Bill.objects.only('congress', 'bill_type',
                                  'number').in_bulk(idset).values():
        fn = "data/congress/%s/bills/%s/%s%d/data.json" % (
            bill.congress, BillType.by_value(bill.bill_type).slug,
            BillType.by_value(bill.bill_type).slug, bill.number)

        if not os.path.exists(fn):
            print(bill.id, bill)
Пример #55
0
def main(options):
    """
    Process committees, subcommittees and
    members of current congress committees.
    """

    BASE_PATH = settings.CONGRESS_LEGISLATORS_PATH
    
    meeting_processor = CommitteeMeetingProcessor()

    log.info('Processing committees')
    COMMITTEES_FILE = BASE_PATH + 'committees-current.yaml'

    if not File.objects.is_changed(COMMITTEES_FILE) and not options.force:
        log.info('File %s was not changed' % COMMITTEES_FILE)
    else:
        tree = yaml_load(COMMITTEES_FILE)
        total = len(tree)
        progress = Progress(total=total)
        seen_committees = set()
        for committee in tree:
            try:
                cobj = Committee.objects.get(code=committee["thomas_id"])
            except Committee.DoesNotExist:
                print "New committee:", committee["thomas_id"]
                cobj = Committee(code=committee["thomas_id"])
               
            cobj.committee_type = TYPE_MAPPING[committee["type"]]
            cobj.name = committee["name"]
            cobj.url = committee.get("url", None)
            cobj.obsolete = False
            cobj.committee = None
            cobj.save()
            seen_committees.add(cobj.id)

            for subcom in committee.get('subcommittees', []):
                code = committee["thomas_id"] + subcom["thomas_id"]
                try:
                    sobj = Committee.objects.get(code=code)
                except Committee.DoesNotExist:
                    print "New subcommittee:", code
                    sobj = Committee(code=code)
                
                sobj.name = subcom["name"]
                sobj.url = subcom.get("url", None)
                sobj.type = None
                sobj.committee = cobj
                sobj.obsolete = False
                sobj.save()
                seen_committees.add(sobj.id)
                
            progress.tick()
            
        # Check for non-obsolete committees in the database that aren't in our
        # file.
        other_committees = Committee.objects.filter(obsolete=False).exclude(id__in=seen_committees)
        if len(other_committees) > 0:
            print "Marking obsolete:", ", ".join(c.code for c in other_committees)
            other_committees.update(obsolete=True)

        File.objects.save_file(COMMITTEES_FILE)
        
    log.info('Processing committee members')
    MEMBERS_FILE = BASE_PATH + 'committee-membership-current.yaml'
    file_changed = File.objects.is_changed(MEMBERS_FILE)

    if not file_changed and not options.force:
        log.info('File %s was not changed' % MEMBERS_FILE)
    else:
        # map THOMAS IDs to GovTrack IDs
        y = yaml_load(BASE_PATH + "legislators-current.yaml")
        person_id_map = { }
        for m in y:
            if "id" in m and "govtrack" in m["id"] and "thomas" in m["id"]:
                person_id_map[m["id"]["thomas"]] = m["id"]["govtrack"]
        
        # load committee members
        tree = yaml_load(MEMBERS_FILE)
        total = len(tree)
        progress = Progress(total=total, name='committees')
        
        # We can delete CommitteeMember objects because we don't have
        # any foreign keys to them.
        CommitteeMember.objects.all().delete()

        # Process committee nodes
        for committee, members in tree.items():
            if committee[0] == "H": continue # House data is out of date
            
            try:
                cobj = Committee.objects.get(code=committee)
            except Committee.DoesNotExist:
                print "Committee not found:", committee
                continue

            # Process members of current committee node
            for member in members:
                mobj = CommitteeMember()
                mobj.person = Person.objects.get(id=person_id_map[member["thomas"]])
                mobj.committee = cobj
                if "title" in member:
                    mobj.role = ROLE_MAPPING[member["title"]]
                mobj.save()
            
            progress.tick()

        File.objects.save_file(MEMBERS_FILE)
        
    return

    log.info('Processing committee schedule')
    SCHEDULE_FILE = 'data/us/112/committeeschedule.xml'
    file_changed = File.objects.is_changed(SCHEDULE_FILE)

    if not file_changed and not options.force:
        log.info('File %s was not changed' % SCHEDULE_FILE)
    else:
        tree = etree.parse(SCHEDULE_FILE)
        
        # We have to clear out all CommitteeMeeting objects when we refresh because
        # we have no unique identifier in the upstream data for a meeting. We might use
        # the meeting's committee & date as an identifier, but since meeting times can
        # change this might have awkward consequences for the end user if we even
        # attempted to track that.

        CommitteeMeeting.objects.all().delete()

        # Process committee event nodes
        for meeting in tree.xpath('/committee-schedule/meeting'):
            try:
                mobj = meeting_processor.process(CommitteeMeeting(), meeting)
                mobj.save()
                
                mobj.bills.clear()
                for bill in meeting.xpath('bill'):
                    bill = Bill.objects.get(congress=bill.get("session"), bill_type=BillType.by_xml_code(bill.get("type")), number=int(bill.get("number")))
                    mobj.bills.add(bill)
            except Committee.DoesNotExist:
                log.error('Could not load Committee object for meeting %s' % meeting_processor.display_node(meeting))

        for committee in Committee.objects.all():
            if not options.disable_events:
                committee.create_events()
            
        File.objects.save_file(SCHEDULE_FILE)
Пример #56
0
def main(options):
    """
    Parse rolls.
    """

    # Setup XML processors
    vote_processor = VoteProcessor()
    option_processor = VoteOptionProcessor()
    voter_processor = VoterProcessor()
    voter_processor.PERSON_CACHE = dict(
        (x.pk, x) for x in Person.objects.all())

    # The pattern which the roll file matches
    # Filename contains info which should be placed to DB
    # along with info extracted from the XML file
    re_path = re.compile('data/us/(\d+)/rolls/([hs])(\w+)-(\d+)\.xml')

    chamber_mapping = {'s': CongressChamber.senate, 'h': CongressChamber.house}

    if options.filter:
        files = glob.glob(options.filter)
        log.info('Parsing rolls matching %s' % options.filter)
    elif options.congress:
        files = glob.glob('data/us/%s/rolls/*.xml' % options.congress)
        log.info('Parsing rolls of only congress#%s' % options.congress)
    else:
        files = glob.glob('data/us/*/rolls/*.xml')
    log.info('Processing votes: %d files' % len(files))
    total = len(files)
    progress = Progress(total=total, name='files', step=10)

    def log_delete_qs(qs):
        if qs.count() == 0: return
        print "Deleting obsoleted records: ", qs
        #if qs.count() > 3:
        #    print "Delete skipped..."
        #    return
        qs.delete()

    seen_obj_ids = set()
    had_error = False

    for fname in files:
        progress.tick()

        match = re_path.search(fname)

        try:
            existing_vote = Vote.objects.get(
                congress=match.group(1),
                chamber=chamber_mapping[match.group(2)],
                session=match.group(3),
                number=match.group(4))
        except Vote.DoesNotExist:
            existing_vote = None

        if not File.objects.is_changed(
                fname
        ) and not options.force and existing_vote != None and not existing_vote.missing_data:
            seen_obj_ids.add(existing_vote.id)
            continue

        try:
            tree = etree.parse(fname)

            ## Look for votes with VP tie breakers.
            #if len(tree.xpath("/roll/voter[@VP='1']")) == 0:
            #    had_error = True # prevent delete at the end
            #    continue

            # Process role object
            roll_node = tree.xpath('/roll')[0]

            # Sqlite is much faster when lots of saves are wrapped in a transaction,
            # and we do a lot of saves because it's a lot of voters.
            from django.db import transaction
            with transaction.atomic():

                vote = vote_processor.process(Vote(), roll_node)
                if existing_vote: vote.id = existing_vote.id
                match = re_path.search(fname)
                vote.congress = int(match.group(1))
                vote.chamber = chamber_mapping[match.group(2)]
                vote.session = match.group(3)
                vote.number = int(match.group(4))

                # Get related bill & amendment.

                for bill_node in roll_node.xpath("bill"):
                    related_bill_num = bill_node.get("number")
                    if 9 <= vote.congress <= 42 and vote.session in ('1', '2'):
                        # Bill numbering from the American Memory colletion is different. The number combines
                        # the session, bill number, and a 0 or 5 for regular or 'half' numbering. Prior to
                        # the 9th congress numbering seems to be wholly assigned by us and not related to
                        # actual numbering, so we skip matching those bills.
                        related_bill_num = "%d%04d%d" % (int(
                            vote.session), int(bill_node.get("number")), 0)
                    try:
                        vote.related_bill = Bill.objects.get(
                            congress=bill_node.get("session"),
                            bill_type=BillType.by_xml_code(
                                bill_node.get("type")),
                            number=related_bill_num)
                    except Bill.DoesNotExist:
                        if vote.congress >= 93:
                            vote.missing_data = True

                for amdt_node in roll_node.xpath("amendment"):
                    if amdt_node.get(
                            "ref"
                    ) == "regular" and vote.related_bill is not None:
                        try:
                            vote.related_amendment = Amendment.objects.get(
                                congress=vote.related_bill.congress,
                                amendment_type=AmendmentType.by_slug(
                                    amdt_node.get("number")[0]),
                                number=amdt_node.get("number")[1:])
                        except Amendment.DoesNotExist:
                            if vote.congress >= 93:
                                print "Missing amendment", fname
                                vote.missing_data = True
                    elif amdt_node.get("ref") == "bill-serial":
                        # It is impossible to associate House votes with amendments just from the House
                        # vote XML because the amendment-num might correspond either with the A___ number
                        # or with the "An amendment, numbered ____" number from the amendment purpose,
                        # and there's really no way to figure out which. Maybe we can use the amendment
                        # sponsor instead?
                        #vote.related_amendment = Amendment.objects.get(bill=vote.related_bill, sequence=amdt_node.get("number"))
                        # Instead, we set related_amendment from the amendment parser. Here, we have to
                        # preserve the related_amendment if it is set.
                        if existing_vote:
                            vote.related_amendment = existing_vote.related_amendment

                # clean up some question text and use the question_details field

                if vote.category in (
                        VoteCategory.passage, VoteCategory.passage_suspension,
                        VoteCategory.veto_override) and vote.related_bill:
                    # For passage votes, set the question to the bill title and put the question
                    # details in the details field.
                    vote.question = vote.related_bill.title
                    vote.question_details = vote.vote_type + " in the " + vote.get_chamber_display(
                    )

                elif vote.category == VoteCategory.amendment and vote.related_amendment:
                    # For votes on amendments, make a better title/explanation.
                    vote.question = vote.related_amendment.title
                    vote.question_details = vote.vote_type + " in the " + vote.get_chamber_display(
                    )

                elif vote.related_bill and vote.question.startswith(
                        "On the Cloture Motion " +
                        vote.related_bill.display_number):
                    vote.question = "Cloture on " + vote.related_bill.title
                elif vote.related_bill and vote.question.startswith(
                        "On Cloture on the Motion to Proceed " +
                        vote.related_bill.display_number):
                    vote.question = "Cloture on " + vote.related_bill.title
                    vote.question_details = "On Cloture on the Motion to Proceed in the " + vote.get_chamber_display(
                    )
                elif vote.related_bill and vote.question.startswith(
                        "On the Motion to Proceed " +
                        vote.related_bill.display_number):
                    vote.question = "Motion to Proceed on " + vote.related_bill.title

                elif vote.related_amendment and vote.question.startswith(
                        "On the Cloture Motion " +
                        vote.related_amendment.get_amendment_type_display() +
                        " " + str(vote.related_amendment.number)):
                    vote.question = "Cloture on " + vote.related_amendment.title
                    vote.question_details = vote.vote_type + " in the " + vote.get_chamber_display(
                    )

                # weird House foratting of bill numbers ("H RES 123 Blah blah")
                if vote.related_bill:
                    vote.question = re.sub(
                        "(On [^:]+): " +
                        vote.related_bill.display_number.replace(
                            ". ", " ").replace(".", " ").upper() + " .*",
                        r"\1: " + truncatewords(vote.related_bill.title, 15),
                        vote.question)

                vote.save()

                seen_obj_ids.add(vote.id)  # don't delete me later

                # Process roll options, overwrite existing options where possible.
                seen_option_ids = set()
                roll_options = {}
                for option_node in roll_node.xpath('./option'):
                    option = option_processor.process(VoteOption(),
                                                      option_node)
                    option.vote = vote
                    if existing_vote:
                        try:
                            option.id = VoteOption.objects.filter(
                                vote=vote, key=option.key
                            )[0].id  # get is better, but I had the database corruption problem
                        except IndexError:
                            pass
                    option.save()
                    roll_options[option.key] = option
                    seen_option_ids.add(option.id)
                log_delete_qs(
                    VoteOption.objects.filter(vote=vote).exclude(
                        id__in=seen_option_ids)
                )  # may cascade and delete the Voters too?

                # Process roll voters, overwriting existing voters where possible.
                if existing_vote:
                    existing_voters = dict(
                        Voter.objects.filter(vote=vote).values_list(
                            "person", "id"))
                seen_voter_ids = set()
                voters = list()
                for voter_node in roll_node.xpath('./voter'):
                    voter = voter_processor.process(roll_options, Voter(),
                                                    voter_node)
                    voter.vote = vote
                    voter.created = vote.created

                    # for VP votes, load the actual person & role...
                    if voter.voter_type == VoterType.vice_president:
                        try:
                            r = PersonRole.objects.get(
                                role_type=RoleType.vicepresident,
                                startdate__lte=vote.created,
                                enddate__gte=vote.created)
                            voter.person_role = r
                            voter.person = r.person
                        except PersonRole.DoesNotExist:
                            # overlapping roles? missing data?
                            log.error(
                                'Could not resolve vice president in %s' %
                                fname)

                    if existing_vote and voter.person:
                        try:
                            voter.id = existing_voters[voter.person.id]
                        except KeyError:
                            pass

                    voters.append(voter)

                    if voter.voter_type == VoterType.unknown and not vote.missing_data:
                        vote.missing_data = True
                        vote.save()

                # pre-fetch the role of each voter
                load_roles_at_date(
                    [x.person for x in voters if x.person != None],
                    vote.created, vote.congress)
                for voter in list(voters):
                    if voter.voter_type != VoterType.vice_president:
                        voter.person_role = voter.person.role
                    # If we couldn't match a role for this person on the date of the vote, and if the voter was Not Voting,
                    # and we're looking at historical data, then this is probably a data error --- the voter wasn't even in office.
                    if voter.person_role is None:
                        if vote.source == VoteSource.keithpoole and voter.option.key == "0":
                            # Drop this record.
                            voters.remove(voter)
                        else:
                            log.error("%s: Could not find role for %s on %s." %
                                      (fname, voter.person, vote.created))
                            vote.missing_data = True
                            vote.save()

                # save all of the records (inserting/updating)
                for voter in voters:
                    voter.save()
                    seen_voter_ids.add(voter.id)

                # remove obsolete voter records
                log_delete_qs(
                    Voter.objects.filter(vote=vote).exclude(
                        id__in=seen_voter_ids)
                )  # possibly already deleted by cascade above

                # pre-calculate totals
                vote.calculate_totals()

                if not options.disable_events:
                    vote.create_event()

            File.objects.save_file(fname)

        except Exception, ex:
            log.error('Error in processing %s' % fname, exc_info=ex)
            had_error = True