예제 #1
0
def bulk_non_compliance_final_warning_letter_file(context):
    context.non_compliance_final_warning_letter_bulk_file \
        = RESOURCE_FILE_PATH.joinpath('bulk_processing_files', 'non_compliance_final_warning_letter_bulk_test.csv')

    context.non_compliance_final_warning_letter_case_ids = [
        case['payload']['collectionCase']['id']
        for case in context.case_created_events
    ]

    with open(context.non_compliance_final_warning_letter_bulk_file,
              'w') as non_compliance_final_warning_letter_bulk_write:
        writer = csv.DictWriter(non_compliance_final_warning_letter_bulk_write,
                                fieldnames=[
                                    'CASE_ID', 'NC_STATUS',
                                    'FIELDCOORDINATOR_ID', 'FIELDOFFICER_ID'
                                ])
        writer.writeheader()

        for case_id in context.non_compliance_final_warning_letter_case_ids:
            writer.writerow({
                'CASE_ID': case_id,
                'NC_STATUS': 'NCFW',
                'FIELDCOORDINATOR_ID': '10000',
                'FIELDOFFICER_ID': '100010'
            })

    # Upload the file to a real bucket if one is configured
    if Config.BULK_NON_COMPLIANCE_BUCKET_NAME:
        clear_bucket(Config.BULK_NON_COMPLIANCE_BUCKET_NAME)
        upload_file_to_bucket(
            context.non_compliance_final_warning_letter_bulk_file,
            f'non_compliance_final_warning_letter_acceptance_tests_'
            f'{datetime.utcnow().strftime("%Y%m%d-%H%M%S")}.csv',
            Config.BULK_NON_COMPLIANCE_BUCKET_NAME)
예제 #2
0
def bulk_questionnaire_link_file(context):
    context.qid_link_bulk_file = RESOURCE_FILE_PATH.joinpath(
        'bulk_processing_files', 'questionnaire_link_bulk_test.csv')

    context.qid_link_case_ids = [
        case['payload']['collectionCase']['id']
        for case in context.case_created_events
    ]

    with open(context.qid_link_bulk_file, 'w') as qid_link_bulk_write:
        writer = csv.DictWriter(qid_link_bulk_write,
                                fieldnames=['case_id', 'qid'])
        writer.writeheader()

        for i, case_id in enumerate(context.qid_link_case_ids):
            writer.writerow({
                'case_id':
                case_id,
                'qid':
                context.unlinked_uacs[i]['payload']['uac']['questionnaireId']
            })

    # Upload the file to a real bucket if one is configured
    if Config.BULK_QID_LINK_BUCKET_NAME:
        clear_bucket(Config.BULK_QID_LINK_BUCKET_NAME)
        upload_file_to_bucket(
            context.qid_link_bulk_file, f'qid_link_acceptance_tests_'
            f'{datetime.utcnow().strftime("%Y%m%d-%H%M%S")}.csv',
            Config.BULK_QID_LINK_BUCKET_NAME)
예제 #3
0
def _build_bulk_refusal_file_with_gubbins(context, dodgy_ids=False):
    # Build a bulk refusal file with a row for each the stored case created event
    context.bulk_refusals_file = RESOURCE_FILE_PATH.joinpath(
        'bulk_processing_files', 'refusal_bulk_test.csv')
    context.bulk_refusals = {}

    for case_created in context.case_created_events:
        case_id = case_created['payload']['collectionCase']['id']

        if dodgy_ids:
            case_id = str(uuid.uuid4())

        context.bulk_refusals[case_id] = random.choice(
            ('HARD_REFUSAL', 'EXTRAORDINARY_REFUSAL'))
    test_helper.assertGreater(
        len(context.bulk_refusals), 0,
        'Must have at least one refusal for this test to be valid')

    with open(context.bulk_refusals_file, 'w') as bulk_refusal_file_write:
        writer = csv.DictWriter(bulk_refusal_file_write,
                                fieldnames=['case_id', 'refusal_type'])
        writer.writeheader()
        for case_id, refusal_type in context.bulk_refusals.items():
            writer.writerow({'case_id': case_id, 'refusal_type': refusal_type})

    # Upload the file to a real bucket if one is configured
    if Config.BULK_REFUSAL_BUCKET_NAME:
        clear_bucket(Config.BULK_REFUSAL_BUCKET_NAME)
        upload_file_to_bucket(
            context.bulk_refusals_file,
            f'refusals_acceptance_tests_{datetime.utcnow().strftime("%Y%m%d-%H%M%S")}.csv',
            Config.BULK_REFUSAL_BUCKET_NAME)
예제 #4
0
def build_uninvalidated_address_bulk_file(context):
    # Build a bulk un-invalid address file with a row for each the stored case created event
    context.bulk_uninvalidated_addresses_file = RESOURCE_FILE_PATH.joinpath(
        'bulk_processing_files', 'uninvalidated_addresses_bulk_test.csv')
    context.bulk_uninvalidated_addresses = []
    for case_created in context.case_created_events:
        context.bulk_uninvalidated_addresses.append({
            'CASE_ID':
            case_created['payload']['collectionCase']['id'],
        })
    test_helper.assertGreater(
        len(context.bulk_uninvalidated_addresses), 0,
        'Must have at least one update for this test to be valid')
    with open(context.bulk_uninvalidated_addresses_file,
              'w') as bulk_invalidated_addresses_file_write:
        writer = csv.DictWriter(
            bulk_invalidated_addresses_file_write,
            fieldnames=list(context.bulk_uninvalidated_addresses[0].keys()))
        writer.writeheader()
        for row in context.bulk_uninvalidated_addresses:
            writer.writerow(row)

    # Upload the file to a real bucket if one is configured
    if Config.BULK_UNINVALIDATED_ADDRESS_BUCKET_NAME:
        clear_bucket(Config.BULK_UNINVALIDATED_ADDRESS_BUCKET_NAME)
        upload_file_to_bucket(
            context.bulk_uninvalidated_addresses_file,
            f'uninvalidated_addresses_acceptance_tests_'
            f'{datetime.utcnow().strftime("%Y%m%d-%H%M%S")}.csv',
            Config.BULK_UNINVALIDATED_ADDRESS_BUCKET_NAME)
예제 #5
0
def build_invalid_address_file(context):
    # Build a bulk invalid address file with a row for each the stored case created event
    context.bulk_invalid_address_file = RESOURCE_FILE_PATH.joinpath(
        'bulk_processing_files', 'invalid_addresses_bulk_test.csv')
    context.bulk_invalid_addresses = {}
    for case_created in context.case_created_events:
        context.bulk_invalid_addresses[case_created['payload'][
            'collectionCase']['id']] = 'TEST_INVALID_REASON'
    test_helper.assertGreater(
        len(context.bulk_invalid_addresses), 0,
        'Must have at least one refusal for this test to be valid')
    with open(context.bulk_invalid_address_file,
              'w') as bulk_invalid_file_write:
        writer = csv.DictWriter(bulk_invalid_file_write,
                                fieldnames=['case_id', 'reason'])
        writer.writeheader()
        for case_id, reason in context.bulk_invalid_addresses.items():
            writer.writerow({'case_id': case_id, 'reason': reason})

    # Upload the file to a real bucket if one is configured
    if Config.BULK_INVALID_ADDRESS_BUCKET_NAME:
        clear_bucket(Config.BULK_INVALID_ADDRESS_BUCKET_NAME)
        upload_file_to_bucket(
            context.bulk_invalid_address_file,
            f'invalid_addresses_acceptance_tests_{datetime.utcnow().strftime("%Y%m%d-%H%M%S")}.csv',
            Config.BULK_INVALID_ADDRESS_BUCKET_NAME)
예제 #6
0
def _load_sample(context, sample_file_name):
    sample_file_path = RESOURCE_FILE_PATH.joinpath('sample_files',
                                                   sample_file_name)
    return load_sample_file(sample_file_path,
                            context.collection_exercise_id,
                            context.action_plan_id,
                            store_loaded_sample_units=True,
                            host=Config.RABBITMQ_HOST,
                            port=Config.RABBITMQ_PORT,
                            vhost=Config.RABBITMQ_VHOST,
                            exchange=Config.RABBITMQ_EXCHANGE,
                            user=Config.RABBITMQ_USER,
                            password=Config.RABBITMQ_PASSWORD,
                            queue_name=Config.RABBITMQ_SAMPLE_INBOUND_QUEUE)
예제 #7
0
def supply_bulk_new_address_file(context, bulk_new_address_file):
    context.bulk_new_address_file = RESOURCE_FILE_PATH.joinpath(
        'bulk_processing_files', bulk_new_address_file)
    context.bulk_new_addresses = []
    with open(context.bulk_new_address_file, 'r') as bulk_new_address_read:
        reader = csv.DictReader(bulk_new_address_read)
        for row in reader:
            context.bulk_new_addresses.append(row)

    # Upload the file to a real bucket if one is configured
    if Config.BULK_NEW_ADDRESS_BUCKET_NAME:
        clear_bucket(Config.BULK_NEW_ADDRESS_BUCKET_NAME)
        upload_file_to_bucket(
            context.bulk_new_address_file,
            f'new_addresses_acceptance_tests_{datetime.utcnow().strftime("%Y%m%d-%H%M%S")}.csv',
            Config.BULK_NEW_ADDRESS_BUCKET_NAME)
예제 #8
0
def bulk_deactivate_uac_file(context):
    # Build a bulk deactivate uac file
    context.bulk_deactivate_uac_file = RESOURCE_FILE_PATH.joinpath(
        'bulk_processing_files', 'deactivate_uac_bulk_test.csv')
    context.bulk_deactivate_uac = []

    for uac_updated in context.uac_created_events:
        context.bulk_deactivate_uac.append(
            uac_updated['payload']['uac']['questionnaireId'])

    with open(context.bulk_deactivate_uac_file,
              'w') as bulk_deactivate_uac_write:
        writer = csv.DictWriter(bulk_deactivate_uac_write, fieldnames=['qid'])
        writer.writeheader()
        for qid in context.bulk_deactivate_uac:
            writer.writerow({'qid': qid})

    # Upload the file to a real bucket if one is configured
    if Config.BULK_DEACTIVATE_UAC_BUCKET_NAME:
        clear_bucket(Config.BULK_DEACTIVATE_UAC_BUCKET_NAME)
        upload_file_to_bucket(
            context.bulk_deactivate_uac_file,
            f'deactivate_uac_acceptance_tests_{datetime.utcnow().strftime("%Y%m%d-%H%M%S")}.csv',
            Config.BULK_DEACTIVATE_UAC_BUCKET_NAME)
예제 #9
0
def build_address_updates_file(context):
    # Build a bulk invalid address file with a row for each the stored case created event
    context.bulk_address_updates_file = RESOURCE_FILE_PATH.joinpath(
        'bulk_processing_files', 'address_updates_bulk_test.csv')
    context.bulk_address_updates = []
    for case_created in context.case_created_events:
        context.bulk_address_updates.append({
            'CASE_ID':
            case_created['payload']['collectionCase']['id'],
            'UPRN':
            '123456789',
            'ESTAB_UPRN':
            '987654321',
            'ESTAB_TYPE':
            'ROYAL HOUSEHOLD',
            'ABP_CODE':
            '4321',
            'ORGANISATION_NAME':
            'foo_incorporated',
            'ADDRESS_LINE1':
            'foo flat1',
            'ADDRESS_LINE2':
            'foo some road',
            'ADDRESS_LINE3':
            'foo somewhere',
            'TOWN_NAME':
            'foo some town',
            'POSTCODE':
            'F00 BAR',
            'LATITUDE':
            '0.0',
            'LONGITUDE':
            '127.0',
            'OA':
            'foo_1',
            'LSOA':
            'foo_2',
            'MSOA':
            'foo_3',
            'LAD':
            'foo_4',
            'HTC_WILLINGNESS':
            '5',
            'HTC_DIGITAL':
            '3',
            'TREATMENT_CODE':
            'HH_LP1E',
            'FIELDCOORDINATOR_ID':
            'ABC123',
            'FIELDOFFICER_ID':
            'XYZ999',
            'CE_EXPECTED_CAPACITY':
            '10',
            'CE_SECURE':
            '1',
            'PRINT_BATCH':
            '99',
        })
    test_helper.assertGreater(
        len(context.bulk_address_updates), 0,
        'Must have at least one update for this test to be valid')
    with open(context.bulk_address_updates_file,
              'w') as bulk_updates_file_write:
        writer = csv.DictWriter(bulk_updates_file_write,
                                fieldnames=list(
                                    context.bulk_address_updates[0].keys()))
        writer.writeheader()
        for row in context.bulk_address_updates:
            writer.writerow(row)

    # Upload the file to a real bucket if one is configured
    if Config.BULK_ADDRESS_UPDATE_BUCKET_NAME:
        clear_bucket(Config.BULK_INVALID_ADDRESS_BUCKET_NAME)
        upload_file_to_bucket(
            context.bulk_address_updates_file,
            f'address_updates_acceptance_tests_{datetime.utcnow().strftime("%Y%m%d-%H%M%S")}.csv',
            Config.BULK_ADDRESS_UPDATE_BUCKET_NAME)