Пример #1
0
def main(debug=False, args=None):
    """Start the app. We will see if we need this anyway."""
    logger.info(
        '>>>>> Starting development server at http://{}/api/ <<<<<'.format(
            flask_app.config['SERVER_NAME']))
    # flask_app.run(debug=settings.FLASK_DEBUG)
    # flask_app.run(debug=config_json["FLASK_DEBUG"])
    flask_app.run(debug=debug)
    return 0
Пример #2
0
def delete(model: type, id: str) -> bool:
    """ delete the db model instance identified by ID if it exists,
    return ``False`` otherwise.
    """
    matches = model.query.filter_by(id=id)
    if matches.count() > 0:
        matches.delete()
        db.session.commit()
        return True
    else:
        logger.info(f"Can't delete {model.__name__} `{id}`: not found!")
        return False
Пример #3
0
def _run_processor(
    executable: str,
    mets_url: str,
    resolver: Resolver,
    workspace: Workspace,
    log_level: str,
    input_file_grp: str,
    output_file_grp: str,
    parameter: dict,
) -> subprocess.CompletedProcess:
    """ run an OCRD processor executable with the specified configuration, wait for the
    execution to complete, and return a :class:`subprocess.CompletedProcess` object.

    Basically the only difference from ``ocrd.processor.helper.run_cli`` is that
    ``/dev/stderr`` is being redirected to ``/dev/stdout``, and ``/dev/stdout`` output
    of the spawned subprocess is being captured and copied to the current logger.

    """
    args = [
        executable,
        '--working-dir',
        workspace.directory,
        '--mets',
        mets_url,
        '--log-level',
        log_level,
        '--input-file-grp',
        input_file_grp,
        '--output-file-grp',
        output_file_grp,
    ]
    if parameter:
        args += ['--parameter', parameter]
    logger.info(f'Start processor subprocess: `{" ".join(args)}`')
    result = subprocess.run(args,
                            check=False,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.STDOUT)
    logger.info(f'Processor subprocess completed: `{" ".join(result.args)}')
    for processor_stdout_line in result.stdout.decode().split('\n'):
        logger.info(processor_stdout_line)
    logger.info(
        f'Processor subprocess returned with exit code {result.returncode}')
    return result
Пример #4
0
def main():
    """What should I do, when I'm called directly?"""
    logger.info("> Starting development server at http://%s/api/ <<<<<" %
             flask_app.config['SERVER_NAME'])
    flask_app.run(host="0.0.0.0", debug=True)
Пример #5
0
def task_success_handler(result, *args, **kwargs):
    logger.debug(f"task_success_handler -> result: {result}, "
                 f"args: {args}, kwargs: {kwargs}")
    uid = result.get('uid')
    update_task(uid, 'SUCCESS', result)
    logger.info(f"Success on task {uid} with a result of {result}.")
Пример #6
0
def task_postrun_handler(task_id, task, retval, state, *args, **kwargs):
    logger.debug(f"task_postrun_handler -> task: {task_id}, task: {task}, "
                 f"args: {args}, kwargs: {kwargs}")
    uid = kwargs.get('args')[-1].get('uid')
    update_task(uid, state)
    logger.info(f"Finished processing task {uid}.")
Пример #7
0
def task_prerun_handler(task_id, task, *args, **kwargs):
    logger.debug(f"task_prerun_handler -> task: {task_id}, task: {task}, "
                 f"args: {args}, kwargs: {kwargs}")
    uid = kwargs.get('args')[0].get('uid')
    update_task(uid, 'STARTED')
    logger.info(f"Start processing task {uid}.")
Пример #8
0
def run_task(self, task: dict) -> dict:
    """ Create a task an run the given workflow. """
    logger_path = current_app.config["LOGGER_PATH"]
    log_file = f"{logger_path}/task-{task['uid']}.log"
    task_log_handler = logger.add(log_file, format='{message}')

    logger.info(f'Start processing task {task["uid"]}.')

    # Create workspace
    from ocrd_butler.app import flask_app
    with flask_app.app_context():
        dst_dir = "{}/{}".format(current_app.config["OCRD_BUTLER_RESULTS"],
                                 task["uid"])
        resolver = Resolver()
        workspace = prepare_workspace(task, resolver, dst_dir)
        logger.info(f"Prepare workspace for task '{task['uid']}'.")

    task_processors = task["workflow"]["processors"]
    mets_url = "{}/mets.xml".format(dst_dir)

    # TODO: Steps could be saved along the other task information to get a
    # more informational task.
    previous_processor = None

    for processor in task_processors:
        input_file_grp = determine_input_file_grp(task, processor,
                                                  previous_processor)
        previous_processor = processor

        # Its possible to override the parameters of the processor in the task.
        kwargs = {"parameter": {}}
        if "parameters" in processor:
            kwargs["parameter"].update(processor["parameters"])
        if processor["name"] in task["parameters"]:
            kwargs["parameter"].update(task["parameters"][processor["name"]])
        parameter = json.dumps(kwargs["parameter"])

        logger.info(
            f'Start processor {processor["name"]}. {json.dumps(processor)}.')

        logger.info(
            f'Run processor {processor["name"]} on METS file {mets_url}.')
        result = _run_processor(
            processor["executable"],
            mets_url=mets_url,
            resolver=resolver,
            workspace=workspace,
            log_level="DEBUG",
            input_file_grp=input_file_grp,
            output_file_grp=processor["output_file_grp"],
            parameter=parameter,
        )

        if result.returncode != 0:
            logger.info(f'Finished processing task {task["uid"]}.')
            raise Exception(
                f"Processor {processor['name']} failed with exit code {result.returncode}."
            )

        workspace.reload_mets()
        logger.info(
            f'Finished processor {processor["name"]} for task {task["uid"]}.')

    logger.info(f'Finished processing task {task["uid"]}.')
    logger.remove(task_log_handler)

    return {"id": task["id"], "uid": task["uid"], "result_dir": dst_dir}