Example #1
0
def generate_input_file(
    file_number,
    input_base_content,
    input_folder,
    input_file,
    record_count,
    file_count,
    encrypted_key,
    plaintext_key,
    master_key,
    initialisation_vector,
):
    """Creates a local input file for historic data and returns the location and file name.

    Keyword arguments:
    file_number -- the number of the current file per topic
    input_base_content -- the content of the input template file
    input_folder -- the location to create input files in
    input_file -- the filename to create
    record_count -- the number of records per file
    file_count -- the number of desired files
    encrypted_key -- the encrypted version of the plaintext key
    plaintext_key -- the plaintext data key for encrypting the data file
    master_key -- the master key used to encrypt the data key
    initialisation_vector -- the initialisation vector to use for the encryption
    """
    global keys
    global key_method

    console_printer.print_info(
        f"Generating input file number {str(file_number)}")

    local_keys = keys
    file_contents = ""
    file_full_path = os.path.join(input_folder, input_file)

    for record_number in range(1, int(record_count) + 1):
        current_key_index = get_current_key_index(key_method, file_number,
                                                  record_number, record_count)
        key_for_record = local_keys[current_key_index][0]

        (timestamp,
         timestamp_string) = date_helper.add_milliseconds_to_timestamp(
             _base_datetime_timestamp, file_number + record_number, True)

        db_object = generate_uncrypted_record(timestamp_string,
                                              input_base_content,
                                              key_for_record, plaintext_key)

        file_contents += json.dumps(json.loads(db_object)) + "\n"

    encrypted_contents = generate_encrypted_record(initialisation_vector,
                                                   file_contents,
                                                   plaintext_key, True)

    with open(file_full_path, "wb") as data:
        data.write(encrypted_contents)
def generate_updated_claimant_file_for_existing_claimant(
    citizen_id,
    person_id,
    fixture_files_root,
    fixture_data_folder,
    input_template_name,
    s3_input_bucket,
    local_files_temp_folder,
    s3_output_prefix,
    seconds_timeout,
    increment,
):
    """Returns an updated claimant file according to incoming ids for existing claimant.

    Keyword arguments:
    citizen_id - the id of the claimant
    person_id - the id of the person
    fixture_files_root -- the local path to the feature file to send
    fixture_data_folder -- the folder from the root of the fixture data
    input_template_name -- the input template name for the files
    s3_input_bucket - the bucket for the remote fixture files
    local_files_temp_folder -- the root folder for the temporary files to sit in
    s3_output_prefix -- the output path for the edited file in s3
    seconds_timeout -- the timeout in seconds for the test
    increment -- a unique number for this claimant unique within this specific test context
    """
    global _base_datetime_timestamp

    console_printer.print_info(
        f"Generating claimant data for citizen id of '{citizen_id}'")

    (timestamp, timestamp_string) = date_helper.add_milliseconds_to_timestamp(
        _base_datetime_timestamp, increment, True)

    nino = generate_national_insurance_number(citizen_id)
    claimant_db_object = _generate_claimant_db_object(citizen_id, person_id,
                                                      nino, increment)
    claimant_file_data = [(citizen_id, timestamp_string, claimant_db_object)]

    kafka_input_file_data = [("citizenId", claimant_file_data)]

    return_data = generate_return_data(
        kafka_input_file_data,
        input_template_name,
        s3_input_bucket,
        fixture_data_folder,
        local_files_temp_folder,
        fixture_files_root,
        s3_output_prefix,
        seconds_timeout,
    )

    return return_data
Example #3
0
def generate_kafka_files(
    test_run_name,
    s3_input_bucket,
    input_template_name,
    output_template_name,
    new_uuid,
    local_files_temp_folder,
    fixture_files_root,
    s3_output_prefix,
    record_count,
    topic_name,
    snapshots_output_folder,
    seconds_timeout,
    fixture_data_folder,
    dlq_template_name=None,
    snapshot_record_template_name=None,
    with_timestamp=True,
    custom_base_timestamp=None,
):
    """Returns array of generated kafka data as tuples of (input s3 location, output local file).

    Keyword arguments:
    test_run_name -- unique name for this test run
    s3_input_bucket - the bucket for the remote fixture files
    input_template_name -- the input template file
    output_template_name -- the output template file (None if no output needed)
    dlq_template_name -- the output template file (None if no dlq file needed)
    snapshot_record_template_name -- the snapshot record template file (None if no dlq file needed)
    new_uuid -- the uuid to use for the id (None to generate one for each file)
    local_files_temp_folder -- the root folder for the temporary files to sit in
    fixture_files_root -- the local path to the feature file to send
    s3_output_prefix -- the output path for the edited file in s3
    with_timestamp -- True for adding a timestamp to the files
    custom_base_timestamp -- if adding a timestamp, override here or generic base will be used
    record_count -- the number of records to send
    topic_name -- the topic name which records are generated for
    snapshots_output_folder -- the snapshots output folder (None if no snapshot file needed)
    seconds_timeout -- the timeout in seconds for the test
    fixture_data_folder -- the folder from the root of the fixture data
    """
    base_timestamp = (_base_datetime_timestamp if custom_base_timestamp is None
                      else custom_base_timestamp)
    file_string = "files" if int(record_count) > 1 else "file"

    console_printer.print_info(
        f"Generating '{record_count}' Kafka '{file_string}' for topic of '{topic_name}' "
        +
        f"using input template of '{input_template_name}', output template of '{output_template_name}', "
        + f"id of '{new_uuid}' and base timestamp of '{base_timestamp}'")

    generated_files = []

    for record_number in range(1, int(record_count) + 1):
        key = new_uuid if new_uuid is not None else uuid.uuid4()
        (timestamp,
         timestamp_string) = ((None, None) if not with_timestamp else
                              date_helper.add_milliseconds_to_timestamp(
                                  base_timestamp, record_number, True))

        generated_files.append(
            _generate_kafka_file(
                test_run_name,
                s3_input_bucket,
                input_template_name,
                output_template_name,
                dlq_template_name,
                snapshot_record_template_name,
                key,
                local_files_temp_folder,
                fixture_files_root,
                s3_output_prefix,
                topic_name,
                snapshots_output_folder,
                seconds_timeout,
                fixture_data_folder,
                timestamp_string,
            ))

    return generated_files
def generate_updated_contract_and_statement_files_for_existing_claimant(
    citizen_id,
    contract_id,
    fixture_files_root,
    fixture_data_folder,
    input_data_file_name,
    input_template_name,
    s3_input_bucket,
    local_files_temp_folder,
    s3_output_prefix,
    seconds_timeout,
):
    """Returns an updated contract and statement files according to incoming data for existing claimant.

    Keyword arguments:
    citizen_id - the id of the claimant
    contract_id - the id of the contract
    fixture_files_root -- the local path to the feature file to send
    fixture_data_folder -- the folder from the root of the fixture data
    input_data_file_name -- the input file name containing the data
    input_template_name -- the input template name for the files
    s3_input_bucket - the bucket for the remote fixture files
    local_files_temp_folder -- the root folder for the temporary files to sit in
    s3_output_prefix -- the output path for the edited file in s3
    seconds_timeout -- the timeout in seconds for the test
    """
    global _base_datetime_timestamp

    data_file_name = os.path.join(fixture_files_root, fixture_data_folder,
                                  input_data_file_name)
    input_data = yaml.safe_load(open(data_file_name))

    claimant_file_data = []
    contract_file_data = []
    statement_file_data = []
    increment = 0

    for item in input_data:
        (timestamp,
         timestamp_string) = date_helper.add_milliseconds_to_timestamp(
             _base_datetime_timestamp, increment + 1, True)
        if "count" in item:
            count = 0
            while count < item["count"]:
                unique_suffix = f"{increment}{count}"
                (
                    contract_db_object,
                    statement_db_objects_array,
                ) = _generate_contract_and_statement_db_objects(
                    contract_id, item, [citizen_id], unique_suffix,
                    timestamp_string)
                count += 1

            contract_file_data.append(
                (contract_id, timestamp_string, contract_db_object))
            statement_file_data.extend([(statement_id, timestamp_string,
                                         statement_db_object) for (
                                             statement_id,
                                             statement_db_object,
                                         ) in statement_db_objects_array])
        else:
            (
                contract_db_object,
                statement_db_objects_array,
            ) = _generate_contract_and_statement_db_objects(
                contract_id, item, [citizen_id], increment, timestamp_string)

            contract_file_data.append(
                (contract_id, timestamp_string, contract_db_object))
            statement_file_data.extend([(statement_id, timestamp_string,
                                         statement_db_object) for (
                                             statement_id,
                                             statement_db_object,
                                         ) in statement_db_objects_array])
        increment += 1

    kafka_input_file_data = [
        ("contractId", contract_file_data),
        ("statementId", statement_file_data),
    ]

    return_data = generate_return_data(
        kafka_input_file_data,
        input_template_name,
        s3_input_bucket,
        fixture_data_folder,
        local_files_temp_folder,
        fixture_files_root,
        s3_output_prefix,
        seconds_timeout,
    )

    return return_data
def generate_claimant_api_kafka_files(
    s3_input_bucket,
    input_data_file_name,
    input_template_name,
    new_uuid,
    local_files_temp_folder,
    fixture_files_root,
    s3_output_prefix,
    seconds_timeout,
    fixture_data_folder,
):
    """Returns array of generated kafka data as tuples of (input s3 location, output local file).

    Keyword arguments:
    s3_input_bucket - the bucket for the remote fixture files
    input_data_file_name -- the input file name containing the data
    input_template_name -- the name of the input template file
    new_uuid -- the uuid to use for the id (None to generate one for each file)
    local_files_temp_folder -- the root folder for the temporary files to sit in
    fixture_files_root -- the local path to the feature file to send
    s3_output_prefix -- the output path for the edited file in s3
    seconds_timeout -- the timeout in seconds for the test
    fixture_data_folder -- the folder from the root of the fixture data
    """
    global _base_datetime_timestamp

    console_printer.print_info(
        f"Generating UCFS claimant API Kafka files " +
        f"using input data file of '{input_data_file_name}', input template of '{input_template_name}', "
        +
        f"id of '{new_uuid}' and base timestamp of '{_base_datetime_timestamp}'"
    )

    data_file_name = os.path.join(fixture_files_root, fixture_data_folder,
                                  input_data_file_name)
    input_data = yaml.safe_load(open(data_file_name))

    claimant_file_data = []
    contract_file_data = []
    statement_file_data = []
    all_ninos = []
    all_ids = []

    increment = 0
    for item in input_data:
        console_printer.print_info(
            f"Generating claimant data for item of '{item}'")
        contract_id = new_uuid if new_uuid is not None else uuid.uuid4()
        citizen_id = new_uuid if new_uuid is not None else uuid.uuid4()
        person_id = new_uuid if new_uuid is not None else uuid.uuid4()

        (timestamp,
         timestamp_string) = date_helper.add_milliseconds_to_timestamp(
             _base_datetime_timestamp, increment, True)

        if "count" in item:
            count = 0
            while count < item["count"]:
                nino = generate_national_insurance_number(citizen_id)
                unique_suffix = f"{increment}{count}"
                claimant_db_object = _generate_claimant_db_object(
                    citizen_id, person_id, nino, unique_suffix)
                all_ninos.append(nino)
                (
                    contract_db_object,
                    statement_db_objects_array,
                ) = _generate_contract_and_statement_db_objects(
                    contract_id, item, [citizen_id], unique_suffix,
                    timestamp_string)

                claimant_file_data.append(
                    (citizen_id, timestamp_string, claimant_db_object))
                all_ids.append(citizen_id)
                contract_file_data.append(
                    (contract_id, timestamp_string, contract_db_object))
                all_ids.append(contract_id)
                statement_file_data.extend([(statement_id, timestamp_string,
                                             statement_db_object) for (
                                                 statement_id,
                                                 statement_db_object,
                                             ) in statement_db_objects_array])
                all_ids.extend([
                    statement_id for (
                        statement_id,
                        statement_db_object,
                    ) in statement_db_objects_array
                ])

                count += 1
        else:
            nino = generate_national_insurance_number(citizen_id)
            claimant_db_objects_array = [(
                citizen_id,
                _generate_claimant_db_object(citizen_id, person_id, nino,
                                             increment),
            )]
            all_ninos.append(nino)
            if "partner_nino" in item:
                citizen_id = new_uuid if new_uuid is not None else uuid.uuid4()
                person_id = new_uuid if new_uuid is not None else uuid.uuid4()
                partner_nino = generate_national_insurance_number(citizen_id)
                increment += 1
                claimant_db_objects_array.append((
                    citizen_id,
                    _generate_claimant_db_object(citizen_id, person_id,
                                                 partner_nino, increment),
                ))
                all_ninos.append(partner_nino)

            citizen_ids_array = [
                citizen_id
                for (citizen_id,
                     claimant_db_object) in claimant_db_objects_array
            ]
            (
                contract_db_object,
                statement_db_objects_array,
            ) = _generate_contract_and_statement_db_objects(
                contract_id, item, citizen_ids_array, increment,
                timestamp_string)

            claimant_file_data.extend([
                (citizen_id, timestamp_string, claimant_db_object)
                for (citizen_id,
                     claimant_db_object) in claimant_db_objects_array
            ])
            all_ids.extend([
                citizen_id
                for (citizen_id,
                     claimant_db_object) in claimant_db_objects_array
            ])
            contract_file_data.append(
                (contract_id, timestamp_string, contract_db_object))
            all_ids.append(contract_id)
            statement_file_data.extend([(statement_id, timestamp_string,
                                         statement_db_object) for (
                                             statement_id,
                                             statement_db_object,
                                         ) in statement_db_objects_array])
            all_ids.extend([
                statement_id for (
                    statement_id,
                    statement_db_object,
                ) in statement_db_objects_array
            ])

        increment += 1

    kafka_input_file_data = [
        ("citizenId", claimant_file_data),
        ("contractId", contract_file_data),
        ("statementId", statement_file_data),
    ]

    return_data = generate_return_data(
        kafka_input_file_data,
        input_template_name,
        s3_input_bucket,
        fixture_data_folder,
        local_files_temp_folder,
        fixture_files_root,
        s3_output_prefix,
        seconds_timeout,
    )

    return (return_data, all_ninos, all_ids)
def generate_corporate_data_files(
    test_run_name,
    s3_input_bucket,
    input_template_name,
    output_template_name,
    new_uuid,
    local_files_temp_folder,
    fixture_files_root,
    s3_output_prefix,
    record_count,
    topic_name,
    seconds_timeout,
    timestamp_override,
):
    """Returns array of generated corporate data as tuples of (input s3 location, input local file, output local file).

    Keyword arguments:
    test_run_name -- unique name for this test run
    s3_input_bucket - the bucket for the remote fixture files
    input_template_name -- the input template file
    output_template_name -- the output template file
    new_uuid -- the uuid to use for the id (None to generate one for each file)
    local_files_temp_folder -- the root folder for the temporary files to sit in
    fixture_files_root -- the local path to the feature file to send
    s3_output_prefix -- the output path for the edited file in s3
    record_count -- the number of records to send
    topic_name -- the topic name which records are generated for
    timestamp_override -- the base timestamp or None to use default of "2018-11-01T03:02:01.001"
    """
    global _base_datetime_timestamp
    timestamp_to_use = (timestamp_override if timestamp_override is not None
                        else _base_datetime_timestamp)

    file_string = "files" if int(record_count) > 1 else "file"

    console_printer.print_info(
        f"Generating '{record_count}' corporate data '{file_string}' for topic of '{topic_name}' "
        +
        f"using input template of '{input_template_name}', output template of '{output_template_name}', "
        + f"id of '{new_uuid}' and base timestamp of '{timestamp_to_use}'")

    generated_files = []

    for record_number in range(1, int(record_count) + 1):
        key = new_uuid if new_uuid is not None else uuid.uuid4()
        (timestamp,
         timestamp_string) = date_helper.add_milliseconds_to_timestamp(
             timestamp_to_use, record_number, True)

        generated_files.append(
            _generate_corporate_data_file(
                test_run_name,
                s3_input_bucket,
                input_template_name,
                output_template_name,
                key,
                local_files_temp_folder,
                fixture_files_root,
                s3_output_prefix,
                topic_name,
                seconds_timeout,
                timestamp,
                timestamp_string,
            ))

    return generated_files