Esempio n. 1
0
    def test_export(
        self,
        setup_es,
        request_sortby,
        orm_ordering,
    ):
        """Test export of company search results."""
        CompanyFactory.create_batch(3)
        CompanyFactory.create_batch(2, hq=True)

        setup_es.indices.refresh()

        data = {}
        if request_sortby:
            data['sortby'] = request_sortby

        url = reverse('api-v3:search:company-export')

        with freeze_time('2018-01-01 11:12:13'):
            response = self.api_client.post(url, data=data)

        assert response.status_code == status.HTTP_200_OK
        assert parse_header(response.get('Content-Type')) == ('text/csv', {'charset': 'utf-8'})
        assert parse_header(response.get('Content-Disposition')) == (
            'attachment', {'filename': 'Data Hub - Companies - 2018-01-01-11-12-13.csv'},
        )

        sorted_company = Company.objects.order_by(orm_ordering, 'pk')
        reader = DictReader(StringIO(response.getvalue().decode('utf-8-sig')))

        assert reader.fieldnames == list(SearchCompanyExportAPIView.field_titles.values())

        expected_row_data = [
            {
                'Name': company.name,
                'Link': f'{settings.DATAHUB_FRONTEND_URL_PREFIXES["company"]}/{company.pk}',
                'Sector': get_attr_or_none(company, 'sector.name'),
                'Country': get_attr_or_none(company, 'registered_address_country.name'),
                'UK region': get_attr_or_none(company, 'uk_region.name'),
                'Archived': company.archived,
                'Date created': company.created_on,
                'Number of employees': get_attr_or_none(company, 'employee_range.name'),
                'Annual turnover': get_attr_or_none(company, 'turnover_range.name'),
                'Headquarter type':
                    (get_attr_or_none(company, 'headquarter_type.name') or '').upper(),
            }
            for company in sorted_company
        ]

        assert list(dict(row) for row in reader) == format_csv_data(expected_row_data)
Esempio n. 2
0
    def test_export(self, opensearch_with_collector, request_sortby, orm_ordering):
        """Test export large capital opportunity search results."""
        url = reverse('api-v4:search:large-capital-opportunity-export')

        CompleteLargeCapitalOpportunityFactory()
        with freeze_time('2018-01-01 11:12:13'):
            LargeCapitalOpportunityFactory()

        opensearch_with_collector.flush_and_refresh()

        data = {}
        if request_sortby:
            data['sortby'] = request_sortby

        with freeze_time('2018-01-01 11:12:13'):
            response = self.api_client.post(url, data=data)

        assert response.status_code == status.HTTP_200_OK
        assert parse_header(response.get('Content-Disposition')) == (
            'attachment', {
                'filename': 'Data Hub - Large capital opportunities - 2018-01-01-11-12-13.csv',
            },
        )

        sorted_opportunities = LargeCapitalOpportunity.objects.order_by(orm_ordering, 'pk')
        response_text = response.getvalue().decode('utf-8-sig')
        reader = DictReader(StringIO(response_text))

        assert reader.fieldnames == list(
            SearchLargeCapitalOpportunityExportAPIView.field_titles.values(),
        )

        expected_row_data = [
            _build_expected_export_response(opportunity) for opportunity in sorted_opportunities
        ]

        expected_rows = format_csv_data(expected_row_data)

        # item is an ordered dict so is cast to a dict to make the comparison easier to
        # interpret in the event of the assert actual_rows == expected_rows failing.
        actual_rows = [dict(item) for item in reader]

        assert actual_rows == expected_rows
Esempio n. 3
0
    def test_export(self, es_with_collector, request_sortby, orm_ordering):
        """Test export large capital investor profile search results."""
        url = reverse('api-v4:search:large-investor-profile-export')

        CompleteLargeCapitalInvestorProfileFactory(
            investable_capital=10000,
            global_assets_under_management=20000,
        )
        with freeze_time('2018-01-01 11:12:13'):
            LargeCapitalInvestorProfileFactory(
                investable_capital=300,
                global_assets_under_management=200,
            )

        es_with_collector.flush_and_refresh()

        data = {}
        if request_sortby:
            data['sortby'] = request_sortby

        with freeze_time('2018-01-01 11:12:13'):
            response = self.api_client.post(url, data=data)

        assert response.status_code == status.HTTP_200_OK
        assert parse_header(response.get('Content-Disposition')) == (
            'attachment',
            {
                'filename':
                'Data Hub - Large capital profiles - 2018-01-01-11-12-13.csv',
            },
        )

        sorted_profiles = LargeCapitalInvestorProfile.objects.order_by(
            orm_ordering, 'pk')
        response_text = response.getvalue().decode('utf-8-sig')
        reader = DictReader(StringIO(response_text))

        assert reader.fieldnames == list(
            SearchLargeInvestorProfileExportAPIView.field_titles.values(), )

        expected_row_data = [{
            'Date created':
            profile.created_on,
            'Global assets under management':
            profile.global_assets_under_management,
            'Investable capital':
            profile.investable_capital,
            'Investor company':
            get_attr_or_none(
                profile,
                'investor_company.name',
            ),
            'Investor description':
            profile.investor_description,
            'Notes on locations':
            profile.notes_on_locations,
            'Investor type':
            get_attr_or_none(
                profile,
                'investor_type.name',
            ),
            'Required checks conducted':
            get_attr_or_none(
                profile,
                'required_checks_conducted.name',
            ),
            'Minimum return rate':
            get_attr_or_none(
                profile,
                'minimum_return_rate.name',
            ),
            'Minimum equity percentage':
            get_attr_or_none(
                profile,
                'minimum_equity_percentage.name',
            ),
            'Date last modified':
            profile.modified_on,
            'UK regions of interest':
            join_attr_values(profile.uk_region_locations.order_by('name'), ),
            'Restrictions':
            join_attr_values(profile.restrictions.order_by('name'), ),
            'Time horizons':
            join_attr_values(profile.time_horizons.order_by('name'), ),
            'Investment types':
            join_attr_values(profile.investment_types.order_by('name'), ),
            'Deal ticket sizes':
            join_attr_values(profile.deal_ticket_sizes.order_by('name'), ),
            'Desired deal roles':
            join_attr_values(profile.desired_deal_roles.order_by('name'), ),
            'Required checks conducted by':
            get_attr_or_none(
                profile,
                'required_checks_conducted_by.name',
            ),
            'Required checks conducted on':
            profile.required_checks_conducted_on,
            'Other countries being considered':
            join_attr_values(
                profile.other_countries_being_considered.order_by('name'), ),
            'Construction risks':
            join_attr_values(profile.construction_risks.order_by('name'), ),
            'Data Hub profile reference':
            str(profile.pk),
            'Asset classes of interest':
            join_attr_values(
                profile.asset_classes_of_interest.order_by('name'), ),
            'Data Hub link':
            (f'{settings.DATAHUB_FRONTEND_URL_PREFIXES["company"]}'
             f'/{profile.investor_company.pk}/investments/large-capital-profile'
             ),
        } for profile in sorted_profiles]

        expected_rows = format_csv_data(expected_row_data)

        # item is an ordered dict so is cast to a dict to make the comparison easier to
        # interpret in the event of the assert actual_rows == expected_rows failing.
        actual_rows = [dict(item) for item in reader]

        assert actual_rows == expected_rows
    def test_export(
        self,
        opensearch_with_collector,
        request_sortby,
        orm_ordering,
    ):
        """Test export of company search results."""
        companies_1 = CompanyFactory.create_batch(
            3,
            turnover=None,
            is_turnover_estimated=None,
            number_of_employees=None,
            is_number_of_employees_estimated=None,
        )
        companies_2 = CompanyFactory.create_batch(
            2,
            hq=True,
            turnover=100,
            is_turnover_estimated=True,
            number_of_employees=95,
            is_number_of_employees_estimated=True,
        )

        for company in (*companies_1, *companies_2):
            CompanyExportCountryFactory.create_batch(
                3,
                company=company,
                country=factory.Iterator(Country.objects.order_by('?'), ),
                status=factory.Iterator([
                    CompanyExportCountry.Status.CURRENTLY_EXPORTING,
                    CompanyExportCountry.Status.FUTURE_INTEREST,
                    CompanyExportCountry.Status.CURRENTLY_EXPORTING,
                ], ),
            )

        opensearch_with_collector.flush_and_refresh()

        data = {}
        if request_sortby:
            data['sortby'] = request_sortby

        url = reverse('api-v4:search:company-export')

        with freeze_time('2018-01-01 11:12:13'):
            response = self.api_client.post(url, data=data)

        assert response.status_code == status.HTTP_200_OK
        assert parse_header(response.get('Content-Type')) == ('text/csv', {
            'charset':
            'utf-8'
        })
        assert parse_header(response.get('Content-Disposition')) == (
            'attachment',
            {
                'filename': 'Data Hub - Companies - 2018-01-01-11-12-13.csv'
            },
        )

        sorted_company = Company.objects.order_by(orm_ordering, 'pk')
        reader = DictReader(StringIO(response.getvalue().decode('utf-8-sig')))

        assert reader.fieldnames == list(
            SearchCompanyExportAPIView().field_titles.values())

        expected_row_data = [{
            'Name':
            company.name,
            'Link':
            f'{settings.DATAHUB_FRONTEND_URL_PREFIXES["company"]}/{company.pk}',
            'Sector':
            get_attr_or_none(company, 'sector.name'),
            'Area':
            get_attr_or_none(company, 'address_area.name'),
            'Country':
            get_attr_or_none(company, 'address_country.name'),
            'UK region':
            get_attr_or_none(company, 'uk_region.name'),
            'Countries exported to':
            ', '.join([
                e.country.name for e in company.export_countries.filter(
                    status=CompanyExportCountry.Status.CURRENTLY_EXPORTING,
                ).order_by('country__name')
            ]),
            'Countries of interest':
            ', '.join([
                e.country.name for e in company.export_countries.filter(
                    status=CompanyExportCountry.Status.FUTURE_INTEREST,
                ).order_by('country__name')
            ]),
            'Archived':
            company.archived,
            'Date created':
            company.created_on,
            'Number of employees':
            (company.number_of_employees
             if company.number_of_employees is not None else get_attr_or_none(
                 company, 'employee_range.name')),
            'Annual turnover':
            (f'${company.turnover}' if company.turnover is not None else
             get_attr_or_none(company, 'turnover_range.name')),
            'Headquarter type':
            (get_attr_or_none(company, 'headquarter_type.name') or '').upper(),
        } for company in sorted_company]

        assert list(dict(row)
                    for row in reader) == format_csv_data(expected_row_data)
Esempio n. 5
0
    def test_export(
        self,
        opensearch_with_collector,
        request_sortby,
        orm_ordering,
        requests_mock,
        accepts_dit_email_marketing,
    ):
        """Test export of contact search results."""
        ArchivedContactFactory()
        ContactWithOwnAddressFactory()
        ContactFactory()
        ContactWithOwnAreaFactory()

        # These are to test date of and team of latest interaction a bit more thoroughly
        CompanyInteractionFactory.create_batch(2)
        CompanyInteractionFactory(contacts=ContactFactory.create_batch(2))
        interaction_with_multiple_teams = CompanyInteractionFactory()
        InteractionDITParticipantFactory.create_batch(
            2,
            interaction=interaction_with_multiple_teams,
        )

        opensearch_with_collector.flush_and_refresh()

        data = {}
        if request_sortby:
            data['sortby'] = request_sortby

        url = reverse('api-v3:search:contact-export')

        with freeze_time('2018-01-01 11:12:13'):
            response = self.api_client.post(url, data=data)

        assert response.status_code == status.HTTP_200_OK
        assert parse_header(response.get('Content-Type')) == ('text/csv', {
            'charset':
            'utf-8'
        })
        assert parse_header(response.get('Content-Disposition')) == (
            'attachment',
            {
                'filename': 'Data Hub - Contacts - 2018-01-01-11-12-13.csv'
            },
        )

        sorted_contacts = Contact.objects.annotate(
            computed_address_country_name=Coalesce(
                'address_country__name',
                'company__address_country__name',
            ), ).order_by(
                orm_ordering,
                'pk',
            )

        matcher = requests_mock.get(
            f'{settings.CONSENT_SERVICE_BASE_URL}'
            f'{CONSENT_SERVICE_PERSON_PATH_LOOKUP}',
            text=generate_hawk_response({
                'results': [{
                    'email':
                    contact.email,
                    'consents': [
                        CONSENT_SERVICE_EMAIL_CONSENT_TYPE,
                    ] if accepts_dit_email_marketing else [],
                } for contact in sorted_contacts],
            }),
            status_code=status.HTTP_200_OK,
        )

        reader = DictReader(StringIO(response.getvalue().decode('utf-8-sig')))
        assert reader.fieldnames == list(
            SearchContactExportAPIView.field_titles.values())

        expected_row_data = format_csv_data([{
            'Name':
            contact.name,
            'Job title':
            contact.job_title,
            'Date created':
            contact.created_on,
            'Archived':
            contact.archived,
            'Link':
            f'{settings.DATAHUB_FRONTEND_URL_PREFIXES["contact"]}/{contact.pk}',
            'Company':
            get_attr_or_none(contact, 'company.name'),
            'Company sector':
            get_attr_or_none(contact, 'company.sector.name'),
            'Company link':
            f'{settings.DATAHUB_FRONTEND_URL_PREFIXES["company"]}/{contact.company.pk}',
            'Company UK region':
            get_attr_or_none(contact, 'company.uk_region.name'),
            'Area': (contact.company.address_area
                     and contact.company.address_area.name)
            if contact.address_same_as_company else
            (contact.address_area and contact.address_area.name),
            'Country':
            contact.company.address_country.name if
            contact.address_same_as_company else contact.address_country.name,
            'Postcode':
            contact.company.address_postcode
            if contact.address_same_as_company else contact.address_postcode,
            'Phone number':
            contact.full_telephone_number,
            'Email address':
            contact.email,
            'Accepts DIT email marketing':
            accepts_dit_email_marketing,
            'Date of latest interaction':
            max(contact.interactions.all(), key=attrgetter('date')).date
            if contact.interactions.all() else None,
            'Teams of latest interaction':
            _format_interaction_team_names(
                max(contact.interactions.all(), key=attrgetter('date')), )
            if contact.interactions.exists() else None,
            'Created by team':
            get_attr_or_none(contact, 'created_by.dit_team.name'),
        } for contact in sorted_contacts])

        actual_row_data = [dict(row) for row in reader]
        assert len(actual_row_data) == len(expected_row_data)
        for index, row in enumerate(actual_row_data):
            assert row == expected_row_data[index]
        assert matcher.call_count == 1
        assert matcher.last_request.query == urllib.parse.urlencode(
            {'email': [c.email for c in sorted_contacts]},
            doseq=True,
        )
Esempio n. 6
0
    def test_export(self, setup_es, request_sortby, orm_ordering):
        """Test export of investment project search results."""
        url = reverse('api-v3:search:investment_project-export')

        InvestmentProjectFactory()
        InvestmentProjectFactory(cdms_project_code='cdms-code')
        VerifyWinInvestmentProjectFactory()
        won_project = WonInvestmentProjectFactory()
        InvestmentProjectTeamMemberFactory.create_batch(3, investment_project=won_project)

        InvestmentProjectFactory(
            name='project for subsidiary',
            investor_company=CompanyFactory(
                global_headquarters=CompanyFactory(
                    one_list_tier_id=OneListTier.objects.first().id,
                    one_list_account_owner=AdviserFactory(),
                ),
            ),
        )

        setup_es.indices.refresh()

        data = {}
        if request_sortby:
            data['sortby'] = request_sortby

        with freeze_time('2018-01-01 11:12:13'):
            response = self.api_client.post(url, data=data)

        assert response.status_code == status.HTTP_200_OK
        assert parse_header(response.get('Content-Disposition')) == (
            'attachment', {'filename': 'Data Hub - Investment projects - 2018-01-01-11-12-13.csv'},
        )

        sorted_projects = InvestmentProject.objects.order_by(orm_ordering, 'pk')
        response_text = response.getvalue().decode('utf-8-sig')
        reader = DictReader(StringIO(response_text))

        assert reader.fieldnames == list(SearchInvestmentExportAPIView.field_titles.values())

        expected_row_data = [
            {
                'Date created': project.created_on,
                'Project reference': project.project_code,
                'Project name': project.name,
                'Investor company': project.investor_company.name,
                'Investor company town or city': project.investor_company.address_town,
                'Country of origin':
                    get_attr_or_none(project, 'investor_company.address_country.name'),
                'Investment type': get_attr_or_none(project, 'investment_type.name'),
                'Status': project.get_status_display(),
                'Stage': get_attr_or_none(project, 'stage.name'),
                'Link':
                    f'{settings.DATAHUB_FRONTEND_URL_PREFIXES["investmentproject"]}'
                    f'/{project.pk}',
                'Actual land date': project.actual_land_date,
                'Estimated land date': project.estimated_land_date,
                'FDI value': get_attr_or_none(project, 'fdi_value.name'),
                'Sector': get_attr_or_none(project, 'sector.name'),
                'Date of latest interaction': None,
                'Project manager': get_attr_or_none(project, 'project_manager.name'),
                'Client relationship manager':
                    get_attr_or_none(project, 'client_relationship_manager.name'),
                'Global account manager': self._get_global_account_manager_name(project),
                'Project assurance adviser':
                    get_attr_or_none(project, 'project_assurance_adviser.name'),
                'Other team members': join_attr_values(project.team_members.all(), 'adviser.name'),
                'Delivery partners': join_attr_values(project.delivery_partners.all()),
                'Possible UK regions': join_attr_values(project.uk_region_locations.all()),
                'Actual UK regions': join_attr_values(project.actual_uk_regions.all()),
                'Specific investment programme':
                    get_attr_or_none(project, 'specific_programme.name'),
                'Referral source activity':
                    get_attr_or_none(project, 'referral_source_activity.name'),
                'Referral source activity website':
                    get_attr_or_none(project, 'referral_source_activity_website.name'),
                'Total investment': project.total_investment,
                'New jobs': project.number_new_jobs,
                'Average salary of new jobs': get_attr_or_none(project, 'average_salary.name'),
                'Safeguarded jobs': project.number_safeguarded_jobs,
                'Level of involvement': get_attr_or_none(project, 'level_of_involvement.name'),
                'Likelihood to land': get_attr_or_none(project, 'likelihood_to_land.name'),
                'R&D budget': project.r_and_d_budget,
                'Associated non-FDI R&D project': project.non_fdi_r_and_d_budget,
                'New to world tech': project.new_tech_to_uk,
                'FDI type': project.fdi_type,
                'Foreign equity investment': project.foreign_equity_investment,
                'GVA multiplier': get_attr_or_none(project, 'gva_multiplier.multiplier'),
                'GVA': project.gross_value_added,
            }
            for project in sorted_projects
        ]

        expected_rows = format_csv_data(expected_row_data)

        # item is an ordered dict so is cast to a dict to make the comparison easier to
        # interpret in the event of the assert actual_rows == expected_rows failing.
        actual_rows = [dict(item) for item in reader]

        # Support for ordering was added to StringAgg in Django 2.2. However, it is not
        # currently used due to https://code.djangoproject.com/ticket/30315. While that
        # remains the case, our StringAgg fields are unordered and we use this workaround to
        # compare them.
        unordered_fields = (
            'Other team members',
            'Delivery partners',
            'Possible UK regions',
            'Actual UK regions',
        )

        for row in chain(actual_rows, expected_rows):
            for field in unordered_fields:
                row[field] = frozenset(row[field].split(', '))

        assert actual_rows == expected_rows
Esempio n. 7
0
    def test_export(
        self,
        es_with_collector,
        request_sortby,
        orm_ordering,
    ):
        """Test export of interaction search results."""
        factories = (
            OrderCancelledFactory,
            OrderCompleteFactory,
            OrderFactory,
            OrderPaidFactory,
            OrderSubscriberFactory,
            OrderWithAcceptedQuoteFactory,
            OrderWithCancelledQuoteFactory,
            OrderWithOpenQuoteFactory,
            OrderWithoutAssigneesFactory,
            OrderWithoutLeadAssigneeFactory,
            ApprovedRefundFactory,
            RequestedRefundFactory,
        )

        order_with_multiple_refunds = OrderPaidFactory()
        ApprovedRefundFactory(
            order=order_with_multiple_refunds,
            requested_amount=order_with_multiple_refunds.total_cost / 5,
        )
        ApprovedRefundFactory(
            order=order_with_multiple_refunds,
            requested_amount=order_with_multiple_refunds.total_cost / 4,
        )
        ApprovedRefundFactory(
            order=order_with_multiple_refunds,
            requested_amount=order_with_multiple_refunds.total_cost / 3,
        )

        for factory_ in factories:
            factory_()

        es_with_collector.flush_and_refresh()

        data = {}
        if request_sortby:
            data['sortby'] = request_sortby

        url = reverse('api-v3:search:order-export')

        with freeze_time('2018-01-01 11:12:13'):
            response = self.api_client.post(url, data=data)

        assert response.status_code == status.HTTP_200_OK
        assert parse_header(response.get('Content-Type')) == ('text/csv', {
            'charset':
            'utf-8'
        })
        assert parse_header(response.get('Content-Disposition')) == (
            'attachment',
            {
                'filename': 'Data Hub - Orders - 2018-01-01-11-12-13.csv'
            },
        )

        sorted_orders = Order.objects.order_by(orm_ordering, 'pk')
        reader = DictReader(StringIO(response.getvalue().decode('utf-8-sig')))

        assert reader.fieldnames == list(
            SearchOrderExportAPIView.field_titles.values())
        sorted_orders_and_refunds = ((order,
                                      order.refunds.filter(
                                          status=RefundStatus.approved))
                                     for order in sorted_orders)

        expected_row_data = [{
            'Order reference':
            order.reference,
            'Net price':
            Decimal(order.subtotal_cost) / 100,
            'Net refund':
            Decimal(sum(refund.net_amount
                        for refund in refunds), ) / 100 if refunds else None,
            'Status':
            order.get_status_display(),
            'Link':
            order.get_datahub_frontend_url(),
            'Sector':
            order.sector.name,
            'Market':
            order.primary_market.name,
            'UK region':
            order.uk_region.name,
            'Company':
            order.company.name,
            'Company country':
            order.company.address_country.name,
            'Company UK region':
            get_attr_or_none(order, 'company.uk_region.name'),
            'Company link':
            f'{settings.DATAHUB_FRONTEND_URL_PREFIXES["company"]}'
            f'/{order.company.pk}',
            'Contact':
            order.contact.name,
            'Contact job title':
            order.contact.job_title,
            'Contact link':
            f'{settings.DATAHUB_FRONTEND_URL_PREFIXES["contact"]}'
            f'/{order.contact.pk}',
            'Lead adviser':
            get_attr_or_none(order.get_lead_assignee(), 'adviser.name'),
            'Created by team':
            get_attr_or_none(order, 'created_by.dit_team.name'),
            'Date created':
            order.created_on,
            'Delivery date':
            order.delivery_date,
            'Date quote sent':
            get_attr_or_none(order, 'quote.created_on'),
            'Date quote accepted':
            get_attr_or_none(order, 'quote.accepted_on'),
            'Date payment received':
            order.paid_on,
            'Date completed':
            order.completed_on,
        } for order, refunds in sorted_orders_and_refunds]

        assert list(dict(row)
                    for row in reader) == format_csv_data(expected_row_data)
Esempio n. 8
0
    def test_export(
        self,
        es_with_collector,
        request_sortby,
        orm_ordering,
    ):
        """Test export of contact search results."""
        ArchivedContactFactory()
        ContactWithOwnAddressFactory()
        ContactFactory()

        # These are to test date of and team of latest interaction a bit more thoroughly
        CompanyInteractionFactory.create_batch(2)
        CompanyInteractionFactory(contacts=ContactFactory.create_batch(2))
        interaction_with_multiple_teams = CompanyInteractionFactory()
        InteractionDITParticipantFactory.create_batch(
            2,
            interaction=interaction_with_multiple_teams,
        )

        es_with_collector.flush_and_refresh()

        data = {}
        if request_sortby:
            data['sortby'] = request_sortby

        url = reverse('api-v3:search:contact-export')

        with freeze_time('2018-01-01 11:12:13'):
            response = self.api_client.post(url, data=data)

        assert response.status_code == status.HTTP_200_OK
        assert parse_header(response.get('Content-Type')) == ('text/csv', {
            'charset':
            'utf-8'
        })
        assert parse_header(response.get('Content-Disposition')) == (
            'attachment',
            {
                'filename': 'Data Hub - Contacts - 2018-01-01-11-12-13.csv'
            },
        )

        sorted_contacts = Contact.objects.annotate(
            computed_address_country_name=Coalesce(
                'address_country__name',
                'company__address_country__name',
            ), ).order_by(
                orm_ordering,
                'pk',
            )
        reader = DictReader(StringIO(response.getvalue().decode('utf-8-sig')))

        assert reader.fieldnames == list(
            SearchContactExportAPIView.field_titles.values())

        # E123 is ignored as there are seemingly unresolvable indentation errors in the dict below
        expected_row_data = [  # noqa: E123
            {
                'Name':
                contact.name,
                'Job title':
                contact.job_title,
                'Date created':
                contact.created_on,
                'Archived':
                contact.archived,
                'Link':
                f'{settings.DATAHUB_FRONTEND_URL_PREFIXES["contact"]}/{contact.pk}',
                'Company':
                get_attr_or_none(contact, 'company.name'),
                'Company sector':
                get_attr_or_none(contact, 'company.sector.name'),
                'Company link':
                f'{settings.DATAHUB_FRONTEND_URL_PREFIXES["company"]}/{contact.company.pk}',
                'Company UK region':
                get_attr_or_none(contact, 'company.uk_region.name'),
                'Country':
                contact.company.address_country.name
                if contact.address_same_as_company else
                contact.address_country.name,
                'Postcode':
                contact.company.address_postcode if
                contact.address_same_as_company else contact.address_postcode,
                'Phone number':
                ' '.join(
                    (contact.telephone_countrycode, contact.telephone_number)),
                'Email address':
                contact.email,
                'Accepts DIT email marketing':
                contact.accepts_dit_email_marketing,
                'Date of latest interaction':
                max(contact.interactions.all(), key=attrgetter('date')).date
                if contact.interactions.all() else None,
                'Teams of latest interaction':
                _format_interaction_team_names(
                    max(contact.interactions.all(), key=attrgetter('date')), )
                if contact.interactions.exists() else None,
                'Created by team':
                get_attr_or_none(contact, 'created_by.dit_team.name'),
            } for contact in sorted_contacts
        ]

        actual_row_data = [dict(row) for row in reader]
        assert actual_row_data == format_csv_data(expected_row_data)
Esempio n. 9
0
    def test_interaction_export(
        self,
        setup_es,
        request_sortby,
        orm_ordering,
    ):
        """
        Test export of interaction search results with a policy feedback user.

        Checks that all interaction kinds except for policy feedback are included in the export.
        """
        # Faker generates job titles containing commas which complicates comparisons,
        # so all contact job titles are explicitly set
        company = CompanyFactory()
        CompanyInteractionFactory(
            company=company,
            contacts=[
                ContactFactory(company=company, job_title='Engineer'),
                ContactFactory(company=company, job_title=None),
                ContactFactory(company=company, job_title=''),
            ],
        )
        EventServiceDeliveryFactory(
            company=company,
            contacts=[
                ContactFactory(company=company, job_title='Managing director'),
            ],
        )
        InvestmentProjectInteractionFactory(
            company=company,
            contacts=[
                ContactFactory(company=company, job_title='Exports manager'),
            ],
        )
        ServiceDeliveryFactory(
            company=company,
            contacts=[
                ContactFactory(company=company, job_title='Sales director'),
            ],
        )
        CompanyInteractionFactoryWithPolicyFeedback(
            company=company,
            contacts=[
                ContactFactory(company=company,
                               job_title='Business development manager'),
            ],
            policy_areas=PolicyArea.objects.order_by('?')[:2],
            policy_issue_types=PolicyIssueType.objects.order_by('?')[:2],
        )

        setup_es.indices.refresh()

        data = {}
        if request_sortby:
            data['sortby'] = request_sortby

        url = reverse('api-v3:search:interaction-export')

        with freeze_time('2018-01-01 11:12:13'):
            response = self.api_client.post(url, data=data)

        assert response.status_code == status.HTTP_200_OK
        assert parse_header(response.get('Content-Type')) == ('text/csv', {
            'charset':
            'utf-8'
        })
        assert parse_header(response.get('Content-Disposition')) == (
            'attachment',
            {
                'filename': 'Data Hub - Interactions - 2018-01-01-11-12-13.csv'
            },
        )

        sorted_interactions = Interaction.objects.all().order_by(
            orm_ordering,
            'pk',
        )
        reader = DictReader(StringIO(response.getvalue().decode('utf-8-sig')))

        assert reader.fieldnames == list(
            SearchInteractionExportAPIView.field_titles.values())

        expected_row_data = [{
            'Date':
            interaction.date,
            'Type':
            interaction.get_kind_display(),
            'Service':
            get_attr_or_none(interaction, 'service.name'),
            'Subject':
            interaction.subject,
            'Link':
            f'{settings.DATAHUB_FRONTEND_URL_PREFIXES["interaction"]}'
            f'/{interaction.pk}',
            'Company':
            get_attr_or_none(interaction, 'company.name'),
            'Company link':
            f'{settings.DATAHUB_FRONTEND_URL_PREFIXES["company"]}'
            f'/{interaction.company.pk}',
            'Company country':
            get_attr_or_none(
                interaction,
                'company.address_country.name',
            ),
            'Company UK region':
            get_attr_or_none(interaction, 'company.uk_region.name'),
            'Company sector':
            get_attr_or_none(interaction, 'company.sector.name'),
            'Contacts':
            _format_expected_contacts(interaction),
            'Adviser':
            get_attr_or_none(interaction, 'dit_adviser.name'),
            'Service provider':
            get_attr_or_none(interaction, 'dit_team.name'),
            'Event':
            get_attr_or_none(interaction, 'event.name'),
            'Communication channel':
            get_attr_or_none(interaction, 'communication_channel.name'),
            'Service delivery status':
            get_attr_or_none(
                interaction,
                'service_delivery_status.name',
            ),
            'Net company receipt':
            interaction.net_company_receipt,
            'Policy issue types':
            join_attr_values(interaction.policy_issue_types.all()),
            'Policy areas':
            join_attr_values(interaction.policy_areas.all(), separator='; '),
            'Policy feedback notes':
            interaction.policy_feedback_notes,
        } for interaction in sorted_interactions]

        actual_row_data = [_format_actual_csv_row(row) for row in reader]
        assert actual_row_data == format_csv_data(expected_row_data)