def test_get_stream_data_revision(self): """ Test that get_stream_data fetches the stream_data correctly from a revision object. """ stream_data = get_stream_data(self.revision.as_page_object(), 'body') self.assertEqual(stream_data[0]['type'], 'text') self.assertEqual(stream_data[0]['value'], 'some text')
def migrate_streamfield_forward(page_or_revision, streamfield_name, new_type): """ Migrate a StreamField belonging to the page or revision """ old_stream_data = get_stream_data(page_or_revision, streamfield_name) new_stream_data = [] migrated = False block_conversions = { 'image_text_25_75_group': '25-75', 'image_text_50_50_group': '50-50', } for block in old_stream_data: block_type = block['type'] if block_type in block_conversions: new_block = { 'type': new_type, 'value': image_text_group_to_info_unit_group( block['value'], block_conversions[block_type] ) } new_stream_data.append(new_block) migrated = True else: new_stream_data.append(block) if migrated: set_stream_data( page_or_revision, streamfield_name, new_stream_data )
def test_get_stream_data_revision(self): """ Test that get_stream_data fetches the stream_data correctly from a revision object. """ stream_data = get_stream_data(self.revision, 'body') self.assertEqual(stream_data[0]['type'], 'text') self.assertEqual(stream_data[0]['value'], 'some text')
def update_sidefoot(): draft_pages = [] for page in DocumentDetailPage.objects.all(): url = page.get_url() if not page.live: continue if 'policy-compliance/enforcement/actions' not in url: continue if page.has_unpublished_changes: draft_pages.append(url) continue stream_data = get_stream_data(page, 'sidefoot') for field in stream_data: if field['type'] == 'related_metadata': field_content = field['value']['content'] for block in field_content: # Switch File number to Docker number if block['value'].get('heading', '') == 'File number': block['value']['heading'] = 'Docket number' break set_stream_data(page.specific, 'sidefoot', stream_data) if len(draft_pages) > 0: print('Skipped the following draft pages:', ' '.join(draft_pages)) else: print('No draft pages found, all valid enforcement pages updated')
def test_get_stream_data_page(self): """ Test that get_stream_data fetches the stream_data correctly from a page object. """ stream_data = get_stream_data(self.page, 'body') self.assertEqual(stream_data[0]['type'], 'text') self.assertEqual(stream_data[0]['value'], 'some text')
def migrate_streamfield_forward(page_or_revision, streamfield_name, new_type): """ Migrate a StreamField belonging to the page or revision """ old_stream_data = get_stream_data(page_or_revision, streamfield_name) new_stream_data = [] migrated = False block_conversions = { 'image_text_25_75_group': '25-75', 'image_text_50_50_group': '50-50', } for block in old_stream_data: block_type = block['type'] if block_type in block_conversions: new_block = { 'type': new_type, 'value': image_text_group_to_info_unit_group( block['value'], block_conversions[block_type]) } new_stream_data.append(new_block) migrated = True else: new_stream_data.append(block) if migrated: set_stream_data(page_or_revision, streamfield_name, new_stream_data)
def migrate_streamfield_forward(page_or_revision, streamfield_name): """ Migrate a StreamField belonging to the page or revision """ old_stream_data = get_stream_data(page_or_revision, streamfield_name) new_stream_data = [] migrated = False block_conversions = { 'half_width_link_blob_group': '50-50', 'third_width_link_blob_group': '33-33-33', } for block in old_stream_data: block_type = block['type'] if block_type in block_conversions: new_block = { 'type': 'info_unit_group', 'value': link_blob_group_to_info_unit_group( block['value'], block_conversions[block_type] ) } new_stream_data.append(new_block) migrated = True else: new_stream_data.append(block) if migrated: set_stream_data( page_or_revision, streamfield_name, new_stream_data )
def migrate_streamfield_forward(page_or_revision, streamfield_name): """ Migrate a StreamField belonging to the page or revision """ old_stream_data = get_stream_data(page_or_revision, streamfield_name) new_stream_data = [] migrated = False block_conversions = { 'half_width_link_blob_group': '50-50', 'third_width_link_blob_group': '33-33-33', } for block in old_stream_data: block_type = block['type'] if block_type in block_conversions: new_block = { 'type': 'info_unit_group', 'value': link_blob_group_to_info_unit_group( block['value'], block_conversions[block_type]) } new_stream_data.append(new_block) migrated = True else: new_stream_data.append(block) if migrated: set_stream_data(page_or_revision, streamfield_name, new_stream_data)
def get_registration_form_from_page(page_id): page = Page.objects.get(pk=page_id).specific content = get_stream_data(page, 'content') for block in content: if 'conference_registration_form' == block['type']: return block raise RuntimeError('no registration form found on {}'.format(page))
def get_registration_form_from_page(page_id): page = Page.objects.get(pk=page_id).specific content = get_stream_data(page, 'content') for block in content: if 'conference_registration_form' == block['type']: return block raise RuntimeError('no registration form found on {}'.format(page))
def assemble_output(): strip_tags = re.compile(r'<[^<]+?>') rows = [] for page in EnforcementActionPage.objects.all(): if not page.live: continue url = 'https://consumerfinance.gov' + page.get_url() if 'policy-compliance/enforcement/actions' not in url: continue page_categories = ','.join(c.get_name_display() for c in page.categories.all()) row = { 'Matter name': page.title, 'URL': url, 'Category': page_categories, 'Preview text': clean_and_strip(page.preview_description) } stream_data = get_stream_data(page, 'sidefoot') for field in stream_data: if field['type'] == 'related_metadata': field_content = field['value']['content'] for block in field_content: if block['value'].get('heading', '') == 'Date filed': row['Date filed'] = str(block['value'].get('date')) elif block['value'].get('heading', '') == 'Status': row['Status'] = strip_tags.sub( '', block['value'].get('blob', '')) elif block['value'].get('heading', '') == 'File number': row['File number'] = strip_tags.sub( '', block['value'].get('blob', '')) stream_data_content = get_stream_data(page, 'content') for field in stream_data_content: if field['type'] == 'full_width_text': field_full_width_text = field['value'] for block in field_full_width_text: if block['type'] == 'content': row['Content'] = clean_and_strip(block['value']) rows.append(row) return rows
def migrate_page(page, field_name, mapper): old_stream_data = get_stream_data(page, field_name) new_stream_data = [] migrated = False for field in old_stream_data: if 'hero' == field['type']: field['value'] = mapper(field['value']) migrated = True new_stream_data.append(field) if migrated: print('migrated page {}'.format(page.slug)) set_stream_data(page, field_name, new_stream_data)
def update_categories(): draft_pages = [] for page in DocumentDetailPage.objects.all(): url = page.get_url() if not page.live: continue if 'policy-compliance/enforcement/actions' not in url: continue if page.has_unpublished_changes: draft_pages.append(url) continue stream_data = get_stream_data(page, 'sidefoot') # Remove inline Category, use the page category instead for field in stream_data: if field['type'] == 'related_metadata': field_content = field['value']['content'] new_content = [{ 'type': 'categories', 'value': { 'heading': 'Category', 'show_categories': True } }] for block in field_content: if block['value'].get('heading', '') != 'Category': new_content.append(block) field['value']['content'] = new_content break set_stream_data(page.specific, 'sidefoot', stream_data) # Update page categories according to defined map update_page_category(page) if len(draft_pages) > 0: print('Skipped the following draft pages:', ' '.join(draft_pages)) else: print('No draft pages found') print('Inline categories removed and page categories updated.')
def test_get_stream_data_revision_no_field(self): """ Test that get an empty list for fields that don't exist on revisions """ stream_data = get_stream_data(self.revision, 'notbody') self.assertEqual(stream_data, [])
def test_get_stream_data_revision_no_field(self): """ Test that get an empty list for fields that don't exist on revisions """ stream_data = get_stream_data(self.revision, 'notbody') self.assertEqual(stream_data, [])