def test_Service_works_if_serviceBenefits_is_not_set(self): del self.fixture['serviceBenefits'] self.service = Service( self.fixture, content_loader.get_manifest('g-cloud-6', 'display_service'), self._lots_by_slug) assert not hasattr(self.service, 'benefits')
def setup_module(module): # TODO we should have example subset search_filter manifests as fixtures content_loader.load_manifest('g-cloud-6', 'services', 'services_search_filters') content_loader.load_manifest('g-cloud-9', 'services', 'services_search_filters') module.filter_groups = filters_for_lot( "saas", content_loader.get_manifest('g-cloud-6', 'services_search_filters')).values() module.g9_filter_groups = filters_for_lot( 'cloud-software', content_loader.get_manifest('g-cloud-9', 'services_search_filters')).values()
def test_service_properties_available_via_summary_manifest(self): service = Service( self.fixture, content_loader.get_manifest('g-cloud-6', 'display_service').filter( {'lot': 'iaas'}), self._lots_by_slug) assert service.summary_manifest.sections[0].name == 'Support' assert len(service.summary_manifest.sections) == 30 assert len(service.summary_manifest.sections[0].questions) == 5
def test_declaration_attribute_is_correctly_set_on_meta( self, declaration, expected): service = Service(self.fixture, content_loader.get_manifest('g-cloud-6', 'display_service'), self._lots_by_slug, declaration=declaration) assert hasattr(service.meta, 'declaration') assert service.meta.declaration == expected
def setup_method(self, method): super().setup_method(method) self.fixture = _get_fixture_data() self.fixture = self.fixture['services'] self._lots_by_slug = framework_helpers.get_lots_by_slug( self._get_framework_fixture_data('g-cloud-6')['frameworks']) self.service = Service( self.fixture, content_loader.get_manifest('g-cloud-6', 'display_service'), self._lots_by_slug)
def test_identical_data(self): # these two should be identical in as far as the data we're concerned about service_data_a = { "lot": "cloud-support", "serviceName": "On the range", "serviceFeaturesSupport": [ "A blue enamelled saucepan", "A black iron kettle", ], "irrelevantThing": "Five coiled spring housebells", } service_data_b = { "lot": "cloud-support", "serviceName": "On the range", "serviceFeaturesSupport": [ "A blue enamelled saucepan", "A black iron kettle", ], "irrelevantThing": "Six coiled spring housebells", "anotherIrrelevancy": "A curvilinear rope", } content_sections = content_loader.get_manifest( "g-cloud-9", 'edit_service_as_admin', ).filter(service_data_b).sections assert not tuple( html_diff_tables_from_sections_iter( content_sections, service_data_a, service_data_b))
def __init__(self, search_api_client, search_api_url, frameworks_by_slug, include_markup=False): # Get core data self.framework_slug = search_api_client.get_index_from_search_api_url( search_api_url) framework = frameworks_by_slug[self.framework_slug] content_manifest = content_loader.get_manifest( self.framework_slug, 'services_search_filters') lots_by_slug = get_lots_by_slug(framework) # We need to get buyer-frontend query params from our saved search API URL. search_query_params = search_api_client.get_frontend_params_from_search_api_url( search_api_url) search_query_params = ungroup_request_filters(search_query_params, content_manifest) search_query_params_multidict = MultiDict(search_query_params) current_lot_slug = search_query_params_multidict.get('lot', None) filters = filters_for_lot(current_lot_slug, content_manifest, all_lots=framework['lots']) clean_request_query_params = clean_request_args( search_query_params_multidict, filters.values(), lots_by_slug) # Now build the buyer-frontend URL representing the saved Search API URL self.url = construct_url_from_base_and_params( url_for('main.search_services'), search_query_params) # Get the saved Search API URL result set and build the search summary. search_api_response = search_api_client._get(search_api_url) self.search_summary = SearchSummary( search_api_response['meta']['total'], clean_request_query_params.copy(), filters.values(), lots_by_slug)
def list_opportunities(framework_family): frameworks = data_api_client.find_frameworks()['frameworks'] frameworks = [v for v in frameworks if v['framework'] == framework_family] framework = get_latest_live_framework_or_404(frameworks, framework_family) abort_if_not_further_competition_framework(framework) lots_by_slug = get_lots_by_slug(framework) current_lot_slug = get_valid_lot_from_args_or_none(request.args, lots_by_slug) content_manifest = content_loader.get_manifest(framework['slug'], 'briefs_search_filters') filters = filters_for_lot(current_lot_slug, content_manifest, all_lots=framework['lots']) clean_request_query_params = clean_request_args(request.args, filters.values(), lots_by_slug) try: if int(request.args.get('page', 1)) <= 0: abort(404) except ValueError: abort(404) index = 'briefs-digital-outcomes-and-specialists' doc_type = 'briefs' updated_request_args = None # This will exclude anything with a 'withdrawn' status if 'statusOpenClosed' not in clean_request_query_params.keys(): updated_request_args = MultiDict([('statusOpenClosed', 'open'), ('statusOpenClosed', 'closed')]) updated_request_args.update(clean_request_query_params) search_api_response = search_api_client.search( index=index, doc_type=doc_type, **build_search_query( updated_request_args if updated_request_args else clean_request_query_params, filters.values(), content_manifest, lots_by_slug)) # Convert the values of certain attributes to their label counterparts content = content_loader.get_manifest(framework['slug'], 'briefs_search_filters') for brief in search_api_response['documents']: if brief.get('specialistRole'): brief['specialistRole'] = content.summary(brief).get_question( 'specialistRole').value brief['location'] = content.summary(brief).get_question( 'location').value search_results_obj = SearchResults( search_api_response, lots_by_slug, highlight_fields=frozenset(('summary', )), ) # Get the results per page from the Search API meta data (or fall back to Buyer FE config setting) results_per_page = search_api_response['meta'].get( 'results_per_page', current_app.config["DM_SEARCH_PAGE_SIZE"]) # Get prev/next link info and number of pages pagination_config = pagination(search_results_obj.total, results_per_page, get_page_from_request(request)) search_summary = SearchSummary(search_api_response['meta']['total'], clean_request_query_params.copy(), filters.values(), lots_by_slug) category_filter_group = filters.pop( 'categories') if 'categories' in filters else None lots = [lot for lot in framework['lots'] if lot['allowsBrief']] view_name = 'list_opportunities' selected_category_tree_filters = build_lots_and_categories_link_tree( framework, lots, category_filter_group, request, updated_request_args if updated_request_args else clean_request_query_params, content_manifest, doc_type, index, Href( url_for('.{}'.format(view_name), framework_family=framework['framework'])), search_api_client) filter_form_hidden_fields_by_name = { f["name"]: f for f in selected_category_tree_filters[1:] } current_lot = lots_by_slug.get(current_lot_slug) set_filter_states(filters.values(), request) for filter_groups in filters.values(): for filter_instance in filter_groups['filters']: if 'label' in filter_instance: filter_instance['label'] = capitalize_first( filter_instance['label']) filter_instance['text'] = capitalize_first( filter_instance['label']) filter_instance['attributes'] = { 'aria-controls': 'search-summary-accessible-hint-wrapper' } clear_filters_url = get_request_url_without_any_filters( request, filters, view_name, framework_family=framework_family) search_query = query_args_for_pagination(clean_request_query_params) template_args = dict( briefs=search_results_obj.search_results, category_tree_root=selected_category_tree_filters[0], clear_filters_url=clear_filters_url, current_lot=current_lot, doc_type=doc_type, filters=filters.values(), filter_form_hidden_fields=filter_form_hidden_fields_by_name.values(), form_action=url_for('.list_opportunities', framework_family=framework_family), framework=framework, framework_family=framework['framework'], framework_family_name='Digital Outcomes and Specialists', lot_names=tuple(lot['name'] for lot in lots_by_slug.values() if lot['allowsBrief']), outcomes={ 'awarded': 'awarded', 'cancelled': 'cancelled', 'closed': 'awaiting outcome', 'unsuccessful': 'no suitable suppliers' }, pagination=pagination_config, search_keywords=get_keywords_from_request(request), search_query=search_query, summary=search_summary.markup(), total=search_results_obj.total, view_name=view_name, ) if request.args.get('live-results'): from flask import jsonify live_results_dict = { "results": { "selector": "#js-dm-live-search-results", "html": render_template("search/_results_wrapper.html", **template_args) }, "categories": { "selector": "#js-dm-live-search-categories", "html": render_template("search/_categories_wrapper.html", **template_args) }, "summary": { "selector": "#js-dm-live-search-summary", "html": render_template("search/_summary.html", **template_args) }, "summary-accessible-hint": { "selector": "#js-dm-live-search-summary-accessible-hint", "html": render_template("search/_summary_accessible_hint.html", **template_args) }, "filter-title": { "selector": "#js-dm-live-filter-title", "html": render_template("search/_filter_title.html", **template_args) }, } return jsonify(live_results_dict) return render_template('search/briefs.html', **template_args)
def get_brief_by_id(framework_family, brief_id): frameworks = data_api_client.find_frameworks()['frameworks'] frameworks = [ framework for framework in frameworks if framework['framework'] == framework_family ] framework = get_latest_live_framework_or_404(frameworks, framework_family) abort_if_not_further_competition_framework(framework) briefs = data_api_client.get_brief(brief_id) brief = briefs.get('briefs') if brief['status'] not in PUBLISHED_BRIEF_STATUSES or brief['framework'][ 'family'] != framework_family: abort(404, "Opportunity '{}' can not be found".format(brief_id)) brief_responses = data_api_client.find_brief_responses( brief_id=brief_id, status=",".join(ALL_BRIEF_RESPONSE_STATUSES), with_data=False, ).get('briefResponses') winning_response, winning_supplier_size = None, None if brief['status'] == 'awarded': winning_response = next( response for response in brief_responses if response["id"] == brief['awardedBriefResponseId']) winning_supplier_size = format_winning_supplier_size( winning_response["supplierOrganisationSize"]) brief_responses_stats = count_brief_responses_by_size_and_status( brief_responses) if brief['status'] not in PUBLISHED_BRIEF_STATUSES or brief['framework'][ 'family'] != framework_family: abort(404, "Opportunity '{}' can not be found".format(brief_id)) try: has_supplier_responded_to_brief = (current_user.supplier_id in [ res['supplierId'] for res in brief_responses if res["status"] in COMPLETED_BRIEF_RESPONSE_STATUSES ]) except AttributeError: has_supplier_responded_to_brief = False # Get Q&A in format suitable for govukSummaryList for index, question in enumerate(brief['clarificationQuestions']): question["key"] = { "html": f"{str(index + 1)}. " f"{text_to_html(question['question'], format_links=True, preserve_line_breaks=True)}" } question["value"] = { "html": text_to_html(question["answer"], format_links=True, preserve_line_breaks=True) } brief_content = content_loader.get_manifest(brief['frameworkSlug'], 'display_brief').filter(brief) # Get attributes in format suitable for govukSummaryList brief_summary = brief_content.summary(brief) for section in brief_summary: section.summary_list = to_summary_list_rows(section.questions, format_links=True, filter_empty=False) return render_template( 'brief.html', brief=brief, brief_responses_stats=brief_responses_stats, content=brief_content, brief_content_summary=brief_summary, has_supplier_responded_to_brief=has_supplier_responded_to_brief, winning_response=winning_response, winning_supplier_size=winning_supplier_size, )
def test_common_properties( self, framework_slug, lot_slug, service_data_a, service_data_b, expected_rem_qs, expected_add_qs, table_preamble_template, ): # because there is no single canonical "correct" representation of a diff between two documents, we can't just # test the output verbatim as it would be a fragile test. instead we can test for a bunch of properties that # must always be true of an output we would consider valid service_data_a, service_data_b = (dict(s, lot=lot_slug) for s in ( service_data_a, service_data_b, )) content_sections = content_loader.get_manifest( framework_slug, 'edit_service_as_admin', ).filter(service_data_b).sections with self.app.app_context(): diffs = OrderedDict( (q_id, html_diff) for sec_slug, q_id, html_diff in html_diff_tables_from_sections_iter( content_sections, service_data_a, service_data_b, table_preamble_template=table_preamble_template, )) for question_id, html_diff in diffs.items(): table_element = html.fragment_fromstring(html_diff) # these should all have been removed assert not table_element.xpath(".//a") assert not table_element.xpath(".//colgroup") assert not table_element.xpath(".//*[@id]") # there should be a non-empty caption tag if and only if table_preamble_template is supplied if table_preamble_template is None: assert not table_element.xpath(".//caption") else: assert table_element.xpath( "./caption[normalize-space(string())]") # all td.line-content.removal elements should appear in the same (expected) column for tr in table_element.xpath( "./tbody/tr[./td[contains(@class, 'line-content')][contains(@class, 'removal')]]" ): assert len( tr.xpath( "./td[contains(@class, 'line-content')][contains(@class, 'removal')]" )) == 1 assert len( tr.xpath( "./td[contains(@class, 'line-content')][contains(@class, 'removal')]/preceding-sibling::*" )) == self._expected_removal_content_column # all td.line-content.addition elements should appear in the same (expected) column for tr in table_element.xpath( "./tbody/tr[./td[contains(@class, 'line-content')][contains(@class, 'addition')]]" ): assert len( tr.xpath( "./td[contains(@class, 'line-content')][contains(@class, 'addition')]" )) == 1 assert len( tr.xpath( "./td[contains(@class, 'line-content')][contains(@class, 'addition')]/preceding-sibling::*" )) == self._expected_addition_content_column # the only del elements should appear in td.line-content.removal elements assert len(table_element.xpath(".//del")) == len( table_element.xpath( "./tbody/tr/td[contains(@class, 'line-content')][contains(@class, 'removal')]/del" )) # and there shouldn't be any td.line-content.removal elements that don't have at least one del element assert not table_element.xpath( ".//td[contains(@class, 'line-content')][contains(@class, 'removal')][not(.//del)]" ) # the only ins elements should appear in td.line-content.addition elements assert len(table_element.xpath(".//ins")) == len( table_element.xpath( "./tbody/tr/td[contains(@class, 'line-content')][contains(@class, 'addition')]/ins" )) # and there shouldn't be any td.line-content.addition elements that don't have at least one ins element assert not table_element.xpath( ".//td[contains(@class, 'line-content')][contains(@class, 'addition')][not(.//ins)]" ) # content should have been purged of all nbsps assert not table_element.xpath( ".//td[contains(@class, 'line-content')][contains(string(), $nbsp)]", nbsp=u"\u00a0", ) # yes, this is awfully familiar code from the innards of html_diff_tables_from_sections_iter so there's a # degree to which we're marking our own homework with this, but it's a little difficult to see an # alternative expected_content_a, expected_content_b = ( [ ( line or " " ) # diff outputs an extraneous space in some blank line cases, which is ok by us for line in (q.splitlines() if isinstance(q, str) else q) ] for q in (r.get(question_id, []) for r in ( service_data_a, service_data_b, ))) # assert some things about the content in each line-content column for expected_content, expected_content_column in ( ( expected_content_a, self._expected_removal_content_column, ), ( expected_content_b, self._expected_addition_content_column, ), ): # the collapsed string content of the collection of tds from the expected column which have a non-empty # line-number td directly preceding them should equal the expected content. note here we're not giving # any leeway for extra whitespace because the intention is to be able to display this with whitespace- # preserving css. but that could always be relaxed if totally necessary. also note if there were nbsps # in our data this would not work because they are purged unconditionally. assert [ ( elem.xpath("string()") or " " ) # normalizing blank lines to single spaces, reason mentioned above for elem in table_element.xpath( "./tbody/tr/td[$i][contains(@class, 'line-content')]" "[normalize-space(string(./preceding-sibling::td[1][contains(@class, 'line-number')]))]", # xpath's element indexing is 1-based i=expected_content_column + 1, ) ] == expected_content # assert some things about each row for tr in table_element.xpath("./tbody/tr"): # note here how xpath's element indexing is 1-based content_remside = tr.xpath( "string(./td[$i])", i=self._expected_removal_content_column + 1) content_addside = tr.xpath( "string(./td[$i])", i=self._expected_addition_content_column + 1) # in lines where we have additions/removals,,, if tr.xpath( "./td[contains(@class, 'line-content')]" + "[contains(@class, 'addition') or contains(@class, 'removal')]" ): # row should have content on at least one side assert content_addside or content_remside # if no content on one side, all content on other side should be in a del/ins if not content_remside: assert content_addside == tr.xpath( "string(./td[contains(@class, 'line-content')][contains(@class, 'addition')]/ins)" ) if not content_addside: assert content_remside == tr.xpath( "string(./td[contains(@class, 'line-content')][contains(@class, 'removal')]/del)" ) # line number should be on a side if and only if there is content on that side assert bool( tr.xpath( "string(./td[contains(@class, 'line-content')][contains(@class, 'removal')])" ) ) == bool( tr.xpath( "normalize-space(string(./td[contains(@class, 'line-number')]" + "[contains(@class, 'line-number-removal')]))")) assert bool( tr.xpath( "string(./td[contains(@class, 'line-content')][contains(@class, 'addition')])" ) ) == bool( tr.xpath( "normalize-space(string(./td[contains(@class, 'line-number')]" + "[contains(@class, 'line-number-add')]))")) # line-content tds which are empty should have line-non-existent class assert all( bool("line-non-existent" in td.attrib.get("class", "")) == (not td.xpath("string()")) for td in tr.xpath( "./td[contains(@class, 'line-content')]")) else: # but if there aren't any additions/removals... # the content should be equal on both sides assert content_remside == content_addside # there shouldn't be any line-non-existent tds assert not tr.xpath( "./td[contains(@class, 'line-non-existent')]") for question in chain.from_iterable(section.questions for section in content_sections): # check a question we expect to have removals does and ones we expect not to ...doesn't. assert bool((question_id in diffs) and html.fragment_fromstring( diffs[question_id] ).xpath( "./tbody/tr/td[contains(@class, 'line-content')][contains(@class, 'removal')]" )) == (question_id in expected_rem_qs) # check a question we expect to have additions does and ones we expect not to ...doesn't. assert bool((question_id in diffs) and html.fragment_fromstring( diffs[question_id] ).xpath( "./tbody/tr/td[contains(@class, 'line-content')][contains(@class, 'addition')]" )) == (question_id in expected_add_qs) # check a question we expect to have neither additions or removals to not be present in diffs at all assert (question_id in diffs) == (question_id in expected_rem_qs or question_id in expected_add_qs)