def fetch_grouped_history(part): client = api_reader.ApiReader() versions = filter(lambda v: 'by_date' in v, client.regversions(part)['versions']) for version in versions: version['notices'] = [] versions = sorted(convert_to_python(versions), reverse=True, key=lambda v: v['by_date']) today = datetime.today() seen_current = False for version in versions: if version['by_date'] > today: version['timeline'] = 'future' elif not seen_current: seen_current = True version['timeline'] = 'current' else: version['timeline'] = 'past' for notice in client.notices(part)['results']: notice = convert_to_python(notice) for v in (v for v in versions if v['by_date'] == notice['effective_on']): # Continue if a notice with the same fr_url is already in the list if not any(n['fr_url'] == notice['fr_url'] for n in v['notices']): v['notices'].append(notice) for version in versions: version['notices'] = sorted(version['notices'], reverse=True, key=lambda n: n['publication_date']) return versions
def fetch_grouped_history(part): client = api_reader.ApiReader() versions = filter(lambda v: 'by_date' in v, client.regversions(part)['versions']) for version in versions: version['notices'] = [] versions = sorted(convert_to_python(versions), reverse=True, key=lambda v: v['by_date']) today = datetime.today() seen_current = False for version in versions: if version['by_date'] > today: version['timeline'] = 'future' elif not seen_current: seen_current = True version['timeline'] = 'current' else: version['timeline'] = 'past' for notice in client.notices(part)['results']: notice = convert_to_python(notice) for v in (v for v in versions if v['by_date'] == notice['effective_on']): v['notices'].append(notice) for version in versions: version['notices'] = sorted(version['notices'], reverse=True, key=lambda n: n['publication_date']) return versions
def test_further_analyses(self, api_reader): doc1 = {'publication_date': '2009-04-05', 'fr_volume': 21, 'fr_page': 98989, 'reference': ['doc1', '1212-31']} doc2 = {'publication_date': '2010-03-03', 'fr_volume': 22, 'fr_page': 87655, 'reference': ['doc2', '1212-31']} doc3 = {'publication_date': '2010-10-12', 'fr_volume': 22, 'fr_page': 90123, 'reference': ['doc3', '1212-31']} doc4 = {'publication_date': '2009-03-07', 'fr_volume': 21, 'fr_page': 98888, 'reference': ['doc4', '1212-31-b']} api_reader.ApiReader.return_value.layer.return_value = { '1212-31': [doc1, doc2, doc3], '1212-31-b': [doc4] } psv = ParagraphSXSView() self.assertEqual( psv.further_analyses('1212-31', 'doc1', 'v1'), convert_to_python([doc3, doc2])) self.assertEqual( psv.further_analyses('1212-31', 'doc5', 'v1'), convert_to_python([doc3, doc2, doc1])) self.assertEqual( psv.further_analyses('1212-31', 'doc3', 'v1'), convert_to_python([doc2, doc1])) self.assertEqual( psv.further_analyses('1212-31-b', 'doc3', 'v1'), convert_to_python([doc4])) self.assertEqual(psv.further_analyses('1212-31-b', 'doc4', 'v1'), []) self.assertEqual(psv.further_analyses('1212-31-c', 'doc1', 'v1'), [])
def fetch_grouped_history(part): client = api_reader.ApiReader() versions = filter(lambda v: "by_date" in v, client.regversions(part)["versions"]) for version in versions: version["notices"] = [] versions = sorted(convert_to_python(versions), reverse=True, key=lambda v: v["by_date"]) today = datetime.today() seen_current = False for version in versions: if version["by_date"] > today: version["timeline"] = "future" elif not seen_current: seen_current = True version["timeline"] = "current" else: version["timeline"] = "past" for notice in client.notices(part)["results"]: notice = convert_to_python(notice) for v in (v for v in versions if v["by_date"] == notice["effective_on"]): v["notices"].append(notice) for version in versions: version["notices"] = sorted(version["notices"], reverse=True, key=lambda n: n["publication_date"]) return versions
def fetch_grouped_history(part): client = api_reader.ApiReader() versions = [ version for version in client.regversions(part)['versions'] if 'by_date' in version ] for version in versions: version['notices'] = [] versions = sorted(convert_to_python(versions), reverse=True, key=lambda v: v['by_date']) today = datetime.today() seen_present = False for version in versions: if version['by_date'] > today: version['timeline'] = Timeline.future elif not seen_present: seen_present = True version['timeline'] = Timeline.present else: version['timeline'] = Timeline.past for notice in client.notices(part)['results']: notice = convert_to_python(notice) for version in versions: if version['by_date'] == notice.get('effective_on'): version['notices'].append(notice) for version in versions: version['notices'] = sorted(version['notices'], reverse=True, key=lambda n: n['publication_date']) return versions
def notice_data(doc_number): preamble = ApiReader().preamble(doc_number.replace('-', '_')) if preamble is None: raise Http404 notice = ApiReader().notice(doc_number.replace('_', '-')) or {} fields = ("amendments", "cfr_parts", "cfr_title", "comment_doc_id", "comments_close", "dockets", "document_number", "effective_on", "footnotes", "fr_citation", "fr_url", "fr_volume", "meta", "primary_agency", "primary_docket", "publication_date", "regulation_id_numbers", "section_by_section", "supporting_documents", "title", "versions") meta = {} for field in fields: if field in notice: meta[field] = convert_to_python(deepcopy(notice[field])) # If there's no metadata, fall back to getting it from settings: if not meta: meta = getattr(settings, 'PREAMBLE_INTRO', {}).get(doc_number, {}).get('meta', {}) meta = convert_to_python(deepcopy(meta)) today = date.today() if 'comments_close' in meta and 'publication_date' in meta: close_date = meta['comments_close'].date() publish_date = meta['publication_date'].date() if today < publish_date: meta['comment_state'] = CommentState.PREPUB elif today <= close_date: meta['comment_state'] = CommentState.OPEN meta['days_remaining'] = 1 + (close_date - today).days else: meta['comment_state'] = CommentState.CLOSED else: meta['comment_state'] = CommentState.NO_COMMENT # We don't pass along cfr_ref information in a super useful format, yet. # Construct one here: if 'cfr_refs' not in meta and 'cfr_title' in meta and 'cfr_parts' in meta: meta['cfr_refs'] = [{ "title": meta['cfr_title'], "parts": meta['cfr_parts'] }] return preamble, meta, notice
def get_context_data(self, **kwargs): context = super(ParagraphSXSView, self).get_context_data(**kwargs) label_id = context['label_id'] notice_id = context['notice_id'] fr_page = context.get('fr_page') notice = generator.get_notice(notice_id) if not notice: raise error_handling.MissingContentException() notice = convert_to_python(notice) paragraph_sxs = generator.get_sxs(label_id, notice, fr_page) if paragraph_sxs is None: raise error_handling.MissingContentException() notices.add_depths(paragraph_sxs, 3) paragraph_sxs['children'] =\ notices.filter_labeled_children(paragraph_sxs) self.footnote_refs(paragraph_sxs) context['sxs'] = paragraph_sxs # Template assumes a single label context['sxs']['label'] = context['label_id'] context['sxs']['header'] = label_to_text(label_id.split('-'), include_marker=True) context['sxs']['all_footnotes'] = self.footnotes(notice, paragraph_sxs) context['notice'] = notice context['further_analyses'] = self.further_analyses( label_id, notice_id, paragraph_sxs['page'], context['version']) return context
def common_context(doc_number): """All of the "preamble" views share common context, such as preamble data, toc info, etc. This function retrieves that data and returns the results as a dict. This may throw a 404""" preamble = ApiReader().preamble(doc_number) if preamble is None: raise Http404 # @todo - right now we're shimming in fake data; eventually this data # should come from the API intro = getattr(settings, 'PREAMBLE_INTRO', {}).get(doc_number, {}) intro = deepcopy(intro) if intro.get('tree'): preamble['children'].insert(0, intro['tree']) intro['meta'] = convert_to_python(intro.get('meta', {})) if 'comments_close' in intro['meta']: intro['meta']['days_remaining'] = 1 + ( intro['meta']['comments_close'].date() - date.today()).days return { 'cfr_change_toc': CFRChangeToC.for_doc_number(doc_number), "doc_number": doc_number, "meta": intro['meta'], "preamble": preamble, 'preamble_toc': make_preamble_toc(preamble['children']), }
def test_convert_to_python(self): self.assertEqual("example", utils.convert_to_python("example")) self.assertEqual(1, utils.convert_to_python(1)) self.assertEqual((1, 2.0, 8l), utils.convert_to_python((1, 2.0, 8l))) self.assertEqual(datetime(2001, 10, 11), utils.convert_to_python('2001-10-11')) self.assertEqual( ["test", "20020304", datetime(2008, 7, 20)], utils.convert_to_python(['test', '20020304', '2008-07-20'])) self.assertEqual({ 'some': 3, 'then': datetime(1999, 10, 21) }, utils.convert_to_python({ 'some': 3, 'then': '1999-10-21' }))
def get_context_data(self, **kwargs): context = super(ParagraphSXSView, self).get_context_data(**kwargs) label_id = context["label_id"] notice_id = context["notice_id"] fr_page = context.get("fr_page") notice = generator.get_notice(notice_id) if not notice: raise error_handling.MissingContentException() notice = convert_to_python(notice) paragraph_sxs = generator.get_sxs(label_id, notice, fr_page) if paragraph_sxs is None: raise error_handling.MissingContentException() notices.add_depths(paragraph_sxs, 3) paragraph_sxs["children"] = notices.filter_labeled_children(paragraph_sxs) self.footnote_refs(paragraph_sxs) context["sxs"] = paragraph_sxs # Template assumes a single label context["sxs"]["label"] = context["label_id"] context["sxs"]["header"] = label_to_text(label_id.split("-"), include_marker=True) context["sxs"]["all_footnotes"] = self.footnotes(notice, paragraph_sxs) context["notice"] = notice context["further_analyses"] = self.further_analyses( label_id, notice_id, paragraph_sxs["page"], context["version"] ) return context
def test_further_analyses(self, api_reader): doc1 = {"publication_date": "2009-04-05", "fr_volume": 21, "fr_page": 98989, "reference": ["doc1", "1212-31"]} doc2 = {"publication_date": "2010-03-03", "fr_volume": 22, "fr_page": 87655, "reference": ["doc2", "1212-31"]} doc3 = {"publication_date": "2010-10-12", "fr_volume": 22, "fr_page": 90123, "reference": ["doc3", "1212-31"]} doc4 = {"publication_date": "2009-03-07", "fr_volume": 21, "fr_page": 98888, "reference": ["doc4", "1212-31-b"]} api_reader.ApiReader.return_value.layer.return_value = {"1212-31": [doc1, doc2, doc3], "1212-31-b": [doc4]} psv = ParagraphSXSView() self.assertEqual(psv.further_analyses("1212-31", "doc1", "v1", 98989), convert_to_python([doc3, doc2])) self.assertEqual(psv.further_analyses("1212-31", "doc5", "v1", 0), convert_to_python([doc3, doc2, doc1])) self.assertEqual(psv.further_analyses("1212-31", "doc3", "v1", 90123), convert_to_python([doc2, doc1])) self.assertEqual(psv.further_analyses("1212-31-b", "doc3", "v1", 90123), convert_to_python([doc4])) self.assertEqual(psv.further_analyses("1212-31-b", "doc4", "v1", 98888), []) self.assertEqual(psv.further_analyses("1212-31-c", "doc1", "v1", 98989), []) # Same notice + label. Different page doc5 = {"publication_date": "2009-04-05", "fr_volume": 21, "fr_page": 10101, "reference": ["doc1", "1212-31"]} api_reader.ApiReader.return_value.layer.return_value = { "1212-31": [doc1, doc2, doc3, doc5], "1212-31-b": [doc4], } self.assertEqual(psv.further_analyses("1212-31", "doc1", "v1", 98989), convert_to_python([doc5, doc3, doc2])) self.assertEqual(psv.further_analyses("1212-31", "doc1", "v1", 10101), convert_to_python([doc3, doc2, doc1])) self.assertEqual(psv.further_analyses("1212-31", "doc3", "v1", 90123), convert_to_python([doc5, doc2, doc1]))
def test_further_analyses(self, api_reader): doc1 = { 'publication_date': '2009-04-05', 'fr_volume': 21, 'fr_page': 98989, 'reference': ['doc1', '1212-31'] } doc2 = { 'publication_date': '2010-03-03', 'fr_volume': 22, 'fr_page': 87655, 'reference': ['doc2', '1212-31'] } doc3 = { 'publication_date': '2010-10-12', 'fr_volume': 22, 'fr_page': 90123, 'reference': ['doc3', '1212-31'] } doc4 = { 'publication_date': '2009-03-07', 'fr_volume': 21, 'fr_page': 98888, 'reference': ['doc4', '1212-31-b'] } api_reader.ApiReader.return_value.layer.return_value = { '1212-31': [doc1, doc2, doc3], '1212-31-b': [doc4] } psv = ParagraphSXSView() self.assertEqual(psv.further_analyses('1212-31', 'doc1', 'v1'), convert_to_python([doc3, doc2])) self.assertEqual(psv.further_analyses('1212-31', 'doc5', 'v1'), convert_to_python([doc3, doc2, doc1])) self.assertEqual(psv.further_analyses('1212-31', 'doc3', 'v1'), convert_to_python([doc2, doc1])) self.assertEqual(psv.further_analyses('1212-31-b', 'doc3', 'v1'), convert_to_python([doc4])) self.assertEqual(psv.further_analyses('1212-31-b', 'doc4', 'v1'), []) self.assertEqual(psv.further_analyses('1212-31-c', 'doc1', 'v1'), [])
def further_analyses(self, label_id, notice_id, version): """Grab other analyses for this same paragraph (limiting to those visible from this regulation version.) Make them in descending order""" sxs_layer_data = api_reader.ApiReader().layer('analyses', label_id, version) if label_id not in sxs_layer_data: return [] else: return [convert_to_python(a) for a in reversed(sxs_layer_data[label_id]) if a['reference'] != [notice_id, label_id]]
def check_version(label_id, version): """ We check if the version of this regulation exists, and the user is only referencing a section that does not exist. """ reg_part = label_id.split('-')[0] client = api_reader.ApiReader() vr = client.regversions(reg_part) requested_version = [v for v in vr['versions'] if v['version'] == version] if len(requested_version) > 0: requested_version = convert_to_python(requested_version) return requested_version[0]
def further_analyses(self, label_id, notice_id, fr_page, version): """Grab other analyses for this same paragraph (limiting to those visible from this regulation version.) Make them in descending order""" sxs_layer_data = api_reader.ApiReader().layer( 'analyses', 'cfr', label_id, version) if label_id not in sxs_layer_data: return [] else: return [convert_to_python(a) for a in reversed(sxs_layer_data[label_id]) if (a['reference'] != [notice_id, label_id] or a['fr_page'] != fr_page)]
def regulation_meta(cfr_part, version): """ Return the contents of the meta layer, without using a tree. """ layer_data = api_reader.ApiReader().layer('meta', 'cfr', cfr_part, version) if layer_data is None: logger.warning("404 when fetching Meta for %s@%s", cfr_part, version) layer_data = {} if not layer_data.get(cfr_part): logger.warning("Empty meta data for %s@%s. Misparsed?", cfr_part, version) meta = {} for data in layer_data.get(cfr_part, []): meta.update(data) return convert_to_python(meta)
def fetch_regulations_and_future_versions(): """ Returns a dict for all the regulations in the API. The dict includes lists of future versions for each regulation. """ client = api_reader.ApiReader() all_versions = client.all_regulations_versions() all_versions = convert_to_python(all_versions) regulations_future = {} #We're only interested in future endpoint versions for v in all_versions['versions']: if v['regulation'] not in regulations_future: regulations_future[v['regulation']] = [] if 'by_date' in v: regulations_future[v['regulation']].append(v) return regulations_future
def fetch_regulations_and_future_versions(): """ Returns a dict for all the regulations in the API. The dict includes lists of future versions for each regulation. """ client = api_reader.ApiReader() all_versions = client.all_regulations_versions() all_versions = convert_to_python(all_versions) regulations_future = {} # We're only interested in future endpoint versions for v in all_versions['versions']: if v['regulation'] not in regulations_future: regulations_future[v['regulation']] = [] if 'by_date' in v: regulations_future[v['regulation']].append(v) return regulations_future
def test_convert_to_python(self): self.assertEqual("example", utils.convert_to_python("example")) self.assertEqual(1, utils.convert_to_python(1)) self.assertEqual((1, 2.0, 8), utils.convert_to_python((1, 2.0, 8))) self.assertEqual(datetime(2001, 10, 11), utils.convert_to_python('2001-10-11')) self.assertEqual(["test", "20020304", datetime(2008, 7, 20)], utils.convert_to_python(['test', '20020304', '2008-07-20'])) self.assertEqual({'some': 3, 'then': datetime(1999, 10, 21)}, utils.convert_to_python({'some': 3, 'then': '1999-10-21'}))
def __init__(self, layer_data): self.layer_data = convert_to_python(layer_data)
def notice_data(doc_number): preamble = ApiReader().preamble(doc_number.replace("-", "_")) if preamble is None: raise Http404 notice = ApiReader().notice(doc_number.replace("_", "-")) or {} fields = ( "amendments", "cfr_parts", "cfr_title", "comment_doc_id", "comments_close", "dockets", "document_number", "effective_on", "footnotes", "fr_citation", "fr_url", "fr_volume", "meta", "primary_agency", "primary_docket", "publication_date", "regulation_id_numbers", "section_by_section", "supporting_documents", "title", "versions", ) meta = {} for field in fields: if field in notice: meta[field] = convert_to_python(deepcopy(notice[field])) # If there's no metadata, fall back to getting it from settings: if not meta: meta = getattr(settings, "PREAMBLE_INTRO", {}).get(doc_number, {}).get("meta", {}) meta = convert_to_python(deepcopy(meta)) today = date.today() if "comments_close" in meta and "publication_date" in meta: close_date = meta["comments_close"].date() publish_date = meta["publication_date"].date() if today < publish_date: meta["comment_state"] = CommentState.PREPUB elif today <= close_date: meta["comment_state"] = CommentState.OPEN meta["days_remaining"] = 1 + (close_date - today).days else: meta["comment_state"] = CommentState.CLOSED else: meta["comment_state"] = CommentState.NO_COMMENT # We don't pass along cfr_ref information in a super useful format, yet. # Construct one here: if "cfr_refs" not in meta and "cfr_title" in meta and "cfr_parts" in meta: meta["cfr_refs"] = [{"title": meta["cfr_title"], "parts": meta["cfr_parts"]}] return preamble, meta, notice