Exemple #1
0
def test_filewriter_clears_stop_time_between_jobs(docker_compose_stop_command):
    producer = create_producer()
    start_time = unix_time_milliseconds(datetime.utcnow()) - 1000
    stop_time = start_time + 1000
    # Ensure TEST_sampleEnv topic exists
    publish_f142_message(producer, "TEST_sampleEnv",
                         int(unix_time_milliseconds(datetime.utcnow())))
    check(producer.flush(5) == 0, "Unable to flush kafka messages.")

    topic = "TEST_writerCommand"
    publish_run_start_message(
        producer,
        "commands/nexus_structure.json",
        "output_file_with_stop_time.nxs",
        topic=topic,
        job_id="should_start_then_stop",
        start_time=int(start_time),
        stop_time=int(stop_time),
    )
    check(producer.flush(5) == 0, "Unable to flush kafka messages.")
    sleep(30)
    job_id = publish_run_start_message(
        producer,
        "commands/nexus_structure.json",
        "output_file_no_stop_time.nxs",
        topic=topic,
        job_id="should_start_but_not_stop",
    )
    check(producer.flush(5) == 0, "Unable to flush kafka messages.")
    sleep(30)
    msgs = consume_everything("TEST_writerStatus")

    stopped = False
    started = False
    message = msgs[-1]
    status_info = deserialise_x5f2(message.value())
    message = json.loads(status_info.status_json)
    if message["start_time"] > 0 and message["job_id"] == job_id:
        started = True
    if message["stop_time"] == 0 and message["job_id"] == "":
        stopped = True

    assert started
    assert not stopped

    # Clean up by stopping writing
    publish_run_stop_message(producer, job_id=job_id)
    check(producer.flush(5) == 0, "Unable to flush kafka messages.")
    sleep(3)
Exemple #2
0
def build_and_run(options, request):
    project = project_from_options(os.path.dirname(__file__), options)
    cmd = TopLevelCommand(project)
    start_time = str(int(unix_time_milliseconds(datetime.utcnow())))
    run_containers(cmd, options)

    def fin():
        # Stop the containers then remove them and their volumes (--volumes option)
        print("containers stopping", flush=True)
        try:
            # Used for when there are multiple filewriter instances
            # as the service is not called "filewriter"
            multiple_log_options = dict(options)
            multiple_log_options["SERVICE"] = ["filewriter1", "filewriter2"]
            cmd.logs(multiple_log_options)
        except:
            log_options = dict(options)
            log_options["SERVICE"] = ["filewriter"]
            cmd.logs(log_options)
        options["--timeout"] = 30
        cmd.down(options)
        print("containers stopped", flush=True)

    # Using a finalizer rather than yield in the fixture means
    # that the containers will be brought down even if tests fail
    request.addfinalizer(fin)
    # Return the start time so the filewriter knows when to start consuming data
    # from to get all data which was published
    return start_time
def test_filewriter_clears_stop_time(
        docker_compose_stop_command_does_not_persist):
    producer = create_producer()
    sleep(10)
    topic = "TEST_writerCommand"
    send_writer_command("commands/commandwithstoptime.json",
                        producer,
                        topic=topic,
                        stop_time=str(
                            int(unix_time_milliseconds(datetime.utcnow()))))

    sleep(10)
    send_writer_command("commands/commandwithnostoptime.json",
                        producer,
                        topic=topic)

    sleep(10)
    msgs = poll_everything("TEST_writerStatus")

    stopped = False
    started = False
    for message in msgs:
        message = str(message.value(), encoding='utf-8')
        if "\"code\":\"START\"" in message and\
                "\"job_id\":\"a8e31c99-8df9-4123-8060-2e009d84a0df\"" in message:
            started = True
        if "\"code\":\"CLOSE\"" in message and\
                "\"job_id\":\"a8e31c99-8df9-4123-8060-2e009d84a0df\"" in message:
            stopped = True

    assert started
    assert not stopped
Exemple #4
0
def test_two_different_writer_modules_with_same_flatbuffer_id(docker_compose):
    producer = create_producer()
    start_time = unix_time_milliseconds(datetime.utcnow()) - 10000
    for i in range(10):
        publish_f142_message(
            producer,
            "TEST_sampleEnv",
            int(start_time + i * 1000),
            source_name="test_source_1",
        )
        publish_f142_message(
            producer,
            "TEST_sampleEnv",
            int(start_time + i * 1000),
            source_name="test_source_2",
        )
    check(producer.flush(5) == 0, "Unable to flush kafka messages.")
    # Start file writing
    publish_run_start_message(
        producer,
        "commands/nexus_structure_multiple_modules.json",
        "output_file_multiple_modules.nxs",
        start_time=int(start_time),
        stop_time=int(start_time + 5 * 1000),
    )
    # Give it some time to accumulate data
    sleep(10)

    filepath = "output-files/output_file_multiple_modules.nxs"
    with OpenNexusFileWhenAvailable(filepath) as file:
        assert (
            len(file["entry/sample/dataset1/time"][:]) > 0
            and len(file["entry/sample/dataset1/value"][:]) > 0
        ), "f142 module should have written this dataset, it should have written a value and time"

        assert (
            "cue_timestamp_zero" not in file["entry/sample/dataset2"]
        ), "f142_test module should have written this dataset, it writes cue_index but no cue_timestamp_zero"
        assert (len(file["entry/sample/dataset2/cue_index"][:]) >
                0), "Expected index values, found none."
        for i in range(len(file["entry/sample/dataset2/cue_index"][:])):
            assert (file["entry/sample/dataset2/cue_index"][i] == i
                    ), "Expect consecutive integers to be written by f142_test"
Exemple #5
0
def build_and_run(options, request):
    project = project_from_options(os.path.dirname(__file__), options)
    cmd = TopLevelCommand(project)
    run_containers(cmd, options)

    start_time = str(int(unix_time_milliseconds(datetime.utcnow())))

    def fin():
        # Stop the containers then remove them and their volumes (--volumes option)
        print("containers stopping", flush=True)
        log_options = dict(options)
        cmd.logs(log_options)
        options["--timeout"] = 30
        cmd.down(options)
        print("containers stopped", flush=True)

    # Using a finalizer rather than yield in the fixture means
    # that the containers will be brought down even if tests fail
    request.addfinalizer(fin)

    return start_time
def test_ignores_commands_with_incorrect_job_id(docker_compose):
    producer = create_producer()
    sleep(10)

    # Ensure TEST_sampleEnv topic exists
    publish_f142_message(
        producer, "TEST_sampleEnv", int(unix_time_milliseconds(datetime.utcnow()))
    )

    sleep(10)

    # Start file writing
    job_id = publish_run_start_message(
        producer,
        "commands/nexus_structure.json",
        "output_file_jobid.nxs",
        start_time=int(docker_compose),
    )

    sleep(10)

    # Request stop but with slightly wrong job_id
    publish_run_stop_message(producer, job_id[:-1])

    msgs = consume_everything("TEST_writerStatus")

    # Poll a few times on the status topic and check the final message read
    # indicates that is still running
    message = msgs[-1]
    status_info = deserialise_x5f2(message.value())
    message = json.loads(status_info.status_json)
    if message["file_being_written"] == "":
        running = False
    else:
        running = True

    assert running
def test_filewriter_clears_stop_time_between_jobs(docker_compose_stop_command):
    producer = create_producer()
    sleep(10)
    topic = "TEST_writerCommand"
    send_writer_command(
        os.path.join("filewriter_tests", "commands",
                     "commandwithstoptime.json"),
        producer,
        topic=topic,
        stop_time=str(int(unix_time_milliseconds(datetime.utcnow()))),
    )

    sleep(10)
    send_writer_command(
        os.path.join("filewriter_tests", "commands",
                     "commandwithnostoptime.json"),
        producer,
        topic=topic,
    )

    sleep(10)
    msgs = consume_everything("TEST_writerStatus")

    stopped = False
    started = False
    for message in msgs:
        message = str(message.value(), encoding="utf-8")
        if ('"code":"START"' in message and
                '"job_id":"a8e31c99-8df9-4123-8060-2e009d84a0df"' in message):
            started = True
        if ('"code":"CLOSE"' in message and
                '"job_id":"a8e31c99-8df9-4123-8060-2e009d84a0df"' in message):
            stopped = True

    assert started
    assert not stopped
Exemple #8
0
def build_and_run(options, request, local_path=None, wait_for_debugger=False):
    if wait_for_debugger and local_path is None:
        warnings.warn(
            "Option specified to wait for debugger to attach, but this "
            "can only be used if a local build path is provided")

    project = project_from_options(os.path.dirname(__file__), options)
    cmd = TopLevelCommand(project)
    start_time = str(int(unix_time_milliseconds(datetime.utcnow())))
    run_containers(cmd, options)

    if local_path is not None:
        # Launch local build of file writer
        full_path_of_file_writer_exe = os.path.join(local_path, "bin",
                                                    "kafka-to-nexus")
        log_file = open("logs/file-writer-logs.txt", "w")
        proc = Popen(
            [
                full_path_of_file_writer_exe,
                "-c",
                "./config-files/local_file_writer_config.ini",
            ],
            stdout=log_file,
        )
        if wait_for_debugger:
            proc.send_signal(
                signal.SIGSTOP
            )  # Pause the file writer until we've had chance to attach a debugger
            input(
                f"\n"
                f"Attach a debugger to process id {proc.pid} now if you wish, then press enter to continue: "
            )
            print(
                "You'll need to tell the debugger to continue after it has attached, "
                'for example type "continue" if using gdb.')
            proc.send_signal(signal.SIGCONT)

    def fin():
        # Stop the containers then remove them and their volumes (--volumes option)
        print("containers stopping", flush=True)
        if local_path is None:
            try:
                # Used for when there are multiple filewriter instances
                # as the service is not called "filewriter"
                multiple_log_options = dict(options)
                multiple_log_options["SERVICE"] = [
                    "filewriter1", "filewriter2"
                ]
                cmd.logs(multiple_log_options)
            except:
                log_options = dict(options)
                log_options["SERVICE"] = ["filewriter"]
                cmd.logs(log_options)
        else:
            proc.kill()
        options["--timeout"] = 30
        cmd.down(options)
        print("containers stopped", flush=True)

    # Using a finalizer rather than yield in the fixture means
    # that the containers will be brought down even if tests fail
    request.addfinalizer(fin)
    # Return the start time so the filewriter knows when to start consuming data
    # from to get all data which was published
    return start_time