예제 #1
0
    def run_model(
        self,
        dataset: Dataset,
        write_out: bool = False,
        log_progress: int = 0
    ) -> Tuple[List[ExecutionResult], Dict[str, List], Dict[str, List]]:
        """Run the model on a given dataset.

        Args:
            dataset: The dataset on which the model will be executed.
            write_out: Flag whether the outputs should be printed to a file
                defined in the dataset object.
            log_progress: log progress every X seconds

        Returns:
            A list of `ExecutionResult`s and a dictionary of the output series.
        """
        if not self._model_built:
            self.build_model()
        if not self._vars_loaded:
            self.load_variables()

        with self.graph.as_default():
            return run_on_dataset(self.model.tf_manager,
                                  self.model.runners,
                                  self.model.dataset_runner,
                                  dataset,
                                  self.model.postprocess,
                                  write_out=write_out,
                                  log_progress=log_progress)
예제 #2
0
def main():
    # pylint: disable=no-member,broad-except
    if len(sys.argv) != 3:
        print("Usage: run.py <run_ini_file> <test_datasets>")
        exit(1)

    test_datasets = Configuration()
    test_datasets.add_argument('test_datasets')

    args, sess = initialize_for_running(sys.argv[1])

    datasets_args = test_datasets.load_file(sys.argv[2])
    print("")

    try:
        for dataset in datasets_args.test_datasets:
            check_dataset_and_coders(dataset, args.encoders)
    except Exception as exc:
        log(exc.message, color='red')
        exit(1)

    for dataset in datasets_args.test_datasets:
        _, _, evaluation = run_on_dataset(
            sess, args.runner, args.encoders + [args.decoder], args.decoder,
            dataset, args.evaluation, args.postprocess, write_out=True)
        if evaluation:
            print_dataset_evaluation(dataset.name, evaluation)
예제 #3
0
def post_request():
    start_time = datetime.datetime.now()
    request_data = request.get_json()

    if request_data is None:
        response_data = {"error": "No data were provided."}
        code = 400
    else:
        args = APP.config['args']

        try:
            dataset = Dataset("request", request_data, {})
            # TODO check the dataset
            # check_dataset_and_coders(dataset, args.encoders)

            _, response_data = run_on_dataset(
                args.tf_manager, args.runners,
                dataset, args.postprocess, write_out=False)
            code = 200
        # pylint: disable=broad-except
        except Exception as exc:
            response_data = {'error': str(exc)}
            code = 400

    response_data['duration'] = (
        datetime.datetime.now() - start_time).total_seconds()
    json_response = json.dumps(response_data)
    response = flask.Response(json_response,
                              content_type='application/json; charset=utf-8')
    response.headers.add('content-length', len(json_response.encode('utf-8')))
    response.status_code = code
    return response
예제 #4
0
def main() -> None:
    # pylint: disable=no-member,broad-except
    if len(sys.argv) != 3:
        print("Usage: run.py <run_ini_file> <test_datasets>")
        exit(1)

    test_datasets = Configuration()
    test_datasets.add_argument('test_datasets')
    test_datasets.add_argument('variables')

    CONFIG.load_file(sys.argv[1])
    CONFIG.build_model()
    test_datasets.load_file(sys.argv[2])
    test_datasets.build_model()
    datesets_model = test_datasets.model
    initialize_for_running(CONFIG.model.output, CONFIG.model.tf_manager,
                           datesets_model.variables)

    print("")

    evaluators = [(e[0], e[0], e[1]) if len(e) == 2 else e
                  for e in CONFIG.model.evaluation]

    for dataset in datesets_model.test_datasets:
        execution_results, output_data = run_on_dataset(
            CONFIG.model.tf_manager,
            CONFIG.model.runners,
            dataset,
            CONFIG.model.postprocess,
            write_out=True)
        # TODO what if there is no ground truth
        eval_result = evaluation(evaluators, dataset, CONFIG.model.runners,
                                 execution_results, output_data)
        if eval_result:
            print_final_evaluation(dataset.name, eval_result)
예제 #5
0
    def run_model(self,
                  dataset: Dataset,
                  write_out: bool = False,
                  log_progress: int = 0) -> Tuple[
                      List[ExecutionResult], Dict[str, List], Dict[str, List]]:
        """Run the model on a given dataset.

        Args:
            dataset: The dataset on which the model will be executed.
            write_out: Flag whether the outputs should be printed to a file
                defined in the dataset object.
            log_progress: log progress every X seconds

        Returns:
            A list of `ExecutionResult`s and a dictionary of the output series.
        """
        if not self._model_built:
            self.build_model()
        if not self._vars_loaded:
            self.load_variables()

        with self.graph.as_default():
            return run_on_dataset(
                self.model.tf_manager,
                self.model.runners,
                self.model.dataset_runner,
                dataset,
                self.model.postprocess,
                write_out=write_out,
                log_progress=log_progress)
예제 #6
0
def main():
    # pylint: disable=no-member,broad-except
    if len(sys.argv) != 3:
        print("Usage: run.py <run_ini_file> <test_datasets>")
        exit(1)

    test_datasets = Configuration()
    test_datasets.add_argument('test_datasets')

    args, sess = initialize_for_running(sys.argv[1])

    datasets_args = test_datasets.load_file(sys.argv[2])
    print("")

    try:
        for dataset in datasets_args.test_datasets:
            check_dataset_and_coders(dataset, args.encoders)
    except Exception as exc:
        log(str(exc), color='red')
        exit(1)

    for dataset in datasets_args.test_datasets:
        _, _, evaluation = run_on_dataset(sess,
                                          args.runner,
                                          args.encoders + [args.decoder],
                                          args.decoder,
                                          dataset,
                                          args.evaluation,
                                          args.postprocess,
                                          write_out=True)
        if evaluation:
            print_dataset_evaluation(dataset.name, evaluation)
예제 #7
0
def post_request():
    start_time = datetime.datetime.now()
    request_data = request.get_json()

    if request_data is None:
        response_data = {"error": "No data were provided."}
        code = 400
    else:
        args = APP.config['args']
        sess = APP.config['sess']

        try:
            dataset = Dataset("request", request_data, {})
            check_dataset_and_coders(dataset, args.encoders)

            result, _, _ = run_on_dataset(
                sess, args.runner, args.encoders + [args.decoder], args.decoder,
                dataset, args.evaluation, args.postprocess, write_out=True)
            response_data = {args.decoder.data_id: result}
            code = 200
        #pylint: disable=broad-except
        except Exception as exc:
            response_data = {'error': str(exc)}
            code = 400

    response_data['duration'] = (datetime.datetime.now() - start_time).total_seconds()
    json_response = json.dumps(response_data)
    response = flask.Response(json_response,
                              content_type='application/json; charset=utf-8')
    response.headers.add('content-length', len(json_response.encode('utf-8')))
    response.status_code = code
    return response
예제 #8
0
def run(data):  # pragma: no cover
    args = APP.config["args"]
    dataset = Dataset("request", data, {})
    # TODO check the dataset
    # check_dataset_and_coders(dataset, args.encoders)

    _, response_data = run_on_dataset(args.tf_manager,
                                      args.runners,
                                      dataset,
                                      args.postprocess,
                                      write_out=False)

    return response_data
예제 #9
0
    def run_model(
        self,
        dataset: Dataset,
        write_out: bool = False,
        batch_size: int = None,
        log_progress: int = 0
    ) -> Tuple[List[ExecutionResult], Dict[str, List[Any]]]:
        """Run the model on a given dataset.

        Args:
            dataset: The dataset on which the model will be executed.
            write_out: Flag whether the outputs should be printed to a file
                defined in the dataset object.
            batch_size: size of the minibatch
            log_progress: log progress every X seconds

        Returns:
            A list of `ExecutionResult`s and a dictionary of the output series.
        """
        if not self._model_built:
            self.build_model()
        if not self._vars_loaded:
            self.load_variables()

        toklevel = self.model.runners_batching_scheme.token_level_batching
        assert self.model.runners_batching_scheme.batch_bucket_span is None

        batching_scheme = BatchingScheme(batch_size=batch_size
                                         or self.model.runners_batch_size,
                                         batch_bucket_span=None,
                                         token_level_batching=toklevel,
                                         bucketing_ignore_series=[])

        with self.graph.as_default():
            # TODO: check_dataset_and_coders(dataset, self.model.runners)
            return run_on_dataset(self.model.tf_manager,
                                  self.model.runners,
                                  dataset,
                                  self.model.postprocess,
                                  write_out=write_out,
                                  log_progress=log_progress,
                                  batching_scheme=batching_scheme)
예제 #10
0
def main() -> None:
    # pylint: disable=no-member,broad-except
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument("config",
                        metavar="INI-FILE",
                        help="the configuration file of the experiment")
    parser.add_argument('datasets',
                        metavar='INI-TEST-DATASETS',
                        help="the configuration of the test datasets")
    parser.add_argument("-g",
                        "--grid",
                        dest="grid",
                        action="store_true",
                        help="look at the SGE variables for slicing the data")
    args = parser.parse_args()

    test_datasets = Configuration()
    test_datasets.add_argument('test_datasets')
    test_datasets.add_argument('variables')

    CONFIG.load_file(args.config)
    CONFIG.build_model()
    test_datasets.load_file(args.datasets)
    test_datasets.build_model()
    datasets_model = test_datasets.model
    initialize_for_running(CONFIG.model.output, CONFIG.model.tf_manager,
                           datasets_model.variables)

    print("")

    evaluators = [(e[0], e[0], e[1]) if len(e) == 2 else e
                  for e in CONFIG.model.evaluation]

    if args.grid and len(datasets_model.test_datasets) > 1:
        raise ValueError("Only one test dataset supported when using --grid")

    for dataset in datasets_model.test_datasets:
        if args.grid:
            if ("SGE_TASK_FIRST" not in os.environ
                    or "SGE_TASK_LAST" not in os.environ
                    or "SGE_TASK_STEPSIZE" not in os.environ
                    or "SGE_TASK_ID" not in os.environ):
                raise EnvironmentError(
                    "Some SGE environment variables are missing")

            length = int(os.environ["SGE_TASK_STEPSIZE"])
            start = int(os.environ["SGE_TASK_ID"]) - 1
            end = int(os.environ["SGE_TASK_LAST"]) - 1

            if start + length > end:
                length = end - start + 1

            log("Running grid task {} starting at {} with step {}".format(
                start // length, start, length))

            dataset = dataset.subset(start, length)

        if CONFIG.model.runners_batch_size is None:
            runners_batch_size = CONFIG.model.batch_size
        else:
            runners_batch_size = CONFIG.model.runners_batch_size

        execution_results, output_data = run_on_dataset(
            CONFIG.model.tf_manager,
            CONFIG.model.runners,
            dataset,
            CONFIG.model.postprocess,
            write_out=True,
            batch_size=runners_batch_size,
            log_progress=60)
        # TODO what if there is no ground truth
        eval_result = evaluation(evaluators, dataset, CONFIG.model.runners,
                                 execution_results, output_data)
        if eval_result:
            print_final_evaluation(dataset.name, eval_result)