コード例 #1
0
    def test_multi_task_multi_model_with_control_with_no_setup(self):
        TEST_INPUT = "./tests/sample_files/test_input/multi_task_multi_model_with_control_no_setup.json"
        read_input_task = ReadInputTask()

        output = read_input_task.run(TEST_INPUT)

        self.assertEquals(output["probing_setup"]["train_size"], 0.60)
コード例 #2
0
    def test_multi_task_multi_model_with_control_with_setup(self):
        TEST_INPUT = (
            "./tests/sample_files/test_input/multi_task_multi_model_with_control.json"
        )
        read_input_task = ReadInputTask()

        output = read_input_task.run(TEST_INPUT)

        self.assertEquals(len(output["tasks"][0]["models"][0]["model_labels"]), 10)
コード例 #3
0
    def test_multi_task_wrong_intra_metric(self):
        TEST_INPUT = "./tests/sample_files/test_input/wrong_intra.json"
        read_input_task = ReadInputTask()

        # output = read_input_task.run(TEST_INPUT)

        self.assertRaises(SystemExit, read_input_task.run, TEST_INPUT)
コード例 #4
0
    def test_train_probing(self):
        TEST_INPUT = (
            "./tests/sample_files/test_input/multi_task_multi_model_with_control.json"
        )
        read_input_task = ReadInputTask()

        output = read_input_task.run(TEST_INPUT)

        prepare_data_probing_task = PrepareDataForProbingTask()

        dataset = prepare_data_probing_task.run(
            output["tasks"], output["probing_setup"]
        )

        train_probing_task = TrainProbingTask()

        probing_output = train_probing_task.run(dataset, output["probing_setup"])
コード例 #5
0
 def test_wrong_json_format(self):
     TEST_INPUT = "./tests/sample_files/test_input/problematic_json_file.json"
     read_input_task = ReadInputTask()
     self.assertRaises(SystemExit, read_input_task.run, TEST_INPUT)
コード例 #6
0
 def test_file_does_not_exist(self):
     TEST_INPUT = "./this/does/not/exist"
     read_input_task = ReadInputTask()
     self.assertRaises(SystemExit, read_input_task.run, TEST_INPUT)
コード例 #7
0
    def test_wrong_key_name(self):

        TEST_INPUT_2 = "./tests/sample_files/test_input/wrong_template_format.json"
        read_input_task = ReadInputTask()

        self.assertRaises(SystemExit, read_input_task.run, TEST_INPUT_2)
コード例 #8
0
    def test_missing_key(self):

        TEST_INPUT_1 = "./tests/sample_files/test_input/missing_tasks_key.json"
        read_input_task = ReadInputTask()

        self.assertRaises(SystemExit, read_input_task.run, TEST_INPUT_1)
コード例 #9
0
    def test_wrong_split_size(self):
        TEST_INPUT = "./tests/sample_files/test_input/wrong_split_size.json"
        read_input_task = ReadInputTask()

        self.assertRaises(SystemExit, read_input_task.run, TEST_INPUT)
コード例 #10
0
import click
import prefect
from dynaconf import settings
from loguru import logger
from prefect import Flow, task
from prefect.engine.flow_runner import FlowRunner
from probe_ably.core.tasks.metric_task import ProcessMetricTask
from probe_ably.core.tasks.probing import PrepareDataForProbingTask, TrainProbingTask
from probe_ably.core.tasks.utils import ReadInputTask, VisualiaztionTask

INPUT_FILE = "./tests/sample_files/test_input/multi_task_multi_model_with_control.json"
read_input_task = ReadInputTask()
prepare_data_probing = PrepareDataForProbingTask()
train_probing_task = TrainProbingTask()
process_metric_task = ProcessMetricTask()
visualization_task = VisualiaztionTask()


@click.command()
@click.option("--config_file", help="Probing Configuration File")
def run_probing(config_file):
    with Flow("Running Probe") as flow1:
        parsed_input = read_input_task(config_file)
        prepared_data = prepare_data_probing(parsed_input["tasks"],
                                             parsed_input["probing_setup"])
        train_results = train_probing_task(prepared_data,
                                           parsed_input["probing_setup"])
        processed_results = process_metric_task(train_results,
                                                parsed_input["probing_setup"])
        visualization_task(processed_results)
    FlowRunner(flow=flow1).run()