Пример #1
0
def call(jobs):
    for job in jobs:
        with job.JobContext():
            exit_code, std_out, std_error = executeOrRun("command", ["tree"] +
                                                         job.args[1:],
                                                         capture_output=True)

            job.write_error(std_error)
            job.write_output(std_out)
            job.set_status(exit_code)
Пример #2
0
    def execute(self, skip_on_success=False):
        """Execute the the command, and associated verification and event detail commands.

        Returns 0 if all commands succeeded, non-0 if any failed."""
        # For "command" and "bashScript" type delegate tools, e.g.
        # individual commandline statements or bash scripts, we interpolate
        # the necessary values into the script's source
        args = []
        if self.type in ["command", "bashScript"]:
            self.command = self.replacement_dict.replace(self.command)[0]
        # For other command types, we translate the entries from
        # replacement_dict into GNU-style long options, e.g.
        # [%fileName%, foo] => --file-name=foo
        else:
            args = self.replacement_dict.to_gnu_options()
        self.job.print_output("Command to execute:", self.command)
        self.job.print_output("-----")
        self.job.print_output("Command stdout:")
        self.exit_code, self.std_out, std_err = executeOrRun(
            self.type, self.command, arguments=args, capture_output=True)
        self.job.write_output(self.std_out)
        self.job.write_error(std_err)
        self.job.print_output("-----")
        self.job.print_output("Command exit code:", self.exit_code)
        if self.exit_code == 0 and self.verification_command:
            self.job.print_output("Running verification command",
                                  self.verification_command)
            self.job.print_output("-----")
            self.job.print_output("Command stdout:")
            self.exit_code = self.verification_command.execute(
                skip_on_success=True)
            self.job.print_output("-----")
            self.job.print_output("Verification Command exit code:",
                                  self.exit_code)

        if self.exit_code == 0 and self.event_detail_command:
            self.job.print_output("Running event detail command",
                                  self.event_detail_command)
            self.event_detail_command.execute(skip_on_success=True)

        # If unsuccesful
        if self.exit_code != 0:
            self.job.print_error("Failed:", self.fpcommand)
            self.job.print_error("Standard out:", self.std_out)
            self.job.print_error("Standard error:", std_err)
        else:
            if (not skip_on_success) and self.on_success:
                self.on_success(self, self.opts, self.replacement_dict)
        return self.exit_code
Пример #3
0
def extract_aip(job, aip_path, extract_path):
    os.makedirs(extract_path)
    command = f"atool --extract-to={extract_path} -V0 {aip_path}"
    job.pyprint("Running extraction command:", command)
    exit_code, stdout, stderr = executeOrRun("command",
                                             command,
                                             capture_output=True)
    job.write_output(stdout)
    job.write_error(stderr)
    if exit_code != 0:
        raise Exception("Error extracting AIP")

    aip_identifier, ext = os.path.splitext(os.path.basename(aip_path))
    if ext in (".bz2", ".gz"):
        aip_identifier, _ = os.path.splitext(aip_identifier)
    return os.path.join(extract_path, aip_identifier)
Пример #4
0
def main(job, target, output):
    args = ["bulk_extractor", target, "-o", output, "-M", "250", "-q", "-1"]
    try:
        os.makedirs(output)

        _, stdout, stderr = executeOrRun("command", args, capture_output=True)

        job.write_output(stdout)
        job.write_error(stderr)

        # remove empty BulkExtractor logs
        for filename in os.listdir(output):
            filepath = os.path.join(output, filename)
            if os.path.getsize(filepath) == 0:
                os.remove(filepath)
        return 0
    except Exception as e:
        return e
Пример #5
0
def rename(source, destination, printfn=print, should_exit=False):
    """Used to move/rename directories. This function was before used to wrap the operation with sudo."""
    if source == destination:
        # Handle this case so that we don't try to move a directory into itself
        printfn("Source and destination are the same, nothing to do.")
        return 0

    command = ["mv", source, destination]
    exitCode, stdOut, stdError = executeOrRun("command",
                                              command,
                                              "",
                                              printing=False)
    if exitCode:
        printfn("exitCode:", exitCode, file=sys.stderr)
        printfn(stdOut, file=sys.stderr)
        printfn(stdError, file=sys.stderr)
        if should_exit:
            exit(exitCode)

    return exitCode
Пример #6
0
def call(jobs):
    for job in jobs:
        with job.JobContext():
            sip_dir = job.args[1]
            sip_name = job.args[2]

            source_dir = os.path.join(sip_dir, sip_name, "data", "objects",
                                      "submissionDocumentation")
            submission_docs_dir = os.path.join(sip_dir,
                                               "submissionDocumentation")

            os.makedirs(submission_docs_dir, mode=0o770, exist_ok=True)

            exit_code, std_out, std_error = executeOrRun(
                "command",
                ["cp", "-R", source_dir, submission_docs_dir],
                capture_output=True,
            )

            job.write_error(std_error)
            job.write_output(std_out)
            job.set_status(exit_code)
Пример #7
0
def _extract(job, path, destination):
    command = [
        "unar",
        "-force-directory",
        "-output-directory",
        str(destination),
        str(path),
    ]
    exit_code, stdout, stderr = executeOrRun("command",
                                             command,
                                             capture_output=True)
    if exit_code > 0:
        raise RetrievalError(
            "Extraction failed, unar quit with exit code {exit_code}")

    output_dir = next(destination.iterdir())

    # Strip leading container.
    if len(list(output_dir.glob("*"))) == 1:
        candidate = next(output_dir.iterdir())
        if candidate.is_dir():
            return candidate

    return output_dir
Пример #8
0
def main(job, file_path, file_uuid, sip_uuid):
    setup_dicts()

    failed = False

    # Check to see whether the file has already been characterized; don't try
    # to characterize it a second time if so.
    if FPCommandOutput.objects.filter(file_id=file_uuid).count() > 0:
        return 0

    try:
        format = FormatVersion.active.get(
            fileformatversion__file_uuid=file_uuid)
    except FormatVersion.DoesNotExist:
        rules = format = None

    if format:
        rules = FPRule.active.filter(format=format.uuid,
                                     purpose="characterization")

    # Characterization always occurs - if nothing is specified, get one or more
    # defaults specified in the FPR.
    if not rules:
        # A3M-TODO DEFAULT CHARACTERIZATION DISABLED
        # rules = FPRule.active.filter(purpose="default_characterization")
        return 0

    for rule in rules:
        if (rule.command.script_type == "bashScript"
                or rule.command.script_type == "command"):
            args = []
            command_to_execute = replace_string_values(rule.command.command,
                                                       file_=file_uuid,
                                                       sip=sip_uuid,
                                                       type_="file")
        else:
            rd = ReplacementDict.frommodel(file_=file_uuid,
                                           sip=sip_uuid,
                                           type_="file")
            args = rd.to_gnu_options()
            command_to_execute = rule.command.command

        exitstatus, stdout, stderr = executeOrRun(
            rule.command.script_type,
            command_to_execute,
            arguments=args,
            capture_output=True,
        )

        job.write_output(stdout)
        job.write_error(stderr)

        if exitstatus != 0:
            job.write_error(
                "Command {} failed with exit status {}; stderr:".format(
                    rule.command.description, exitstatus))
            failed = True
            continue
        # fmt/101 is XML - we want to collect and package any XML output, while
        # allowing other commands to execute without actually collecting their
        # output in the event that they are writing their output to disk.
        # FPCommandOutput can have multiple rows for a given file,
        # distinguished by the rule that produced it.
        if (rule.command.output_format
                and rule.command.output_format.pronom_id == "fmt/101"):
            try:
                etree.fromstring(  # nosec B320
                    stdout.encode("utf8"),
                    etree.XMLParser(resolve_entities=False, no_network=True),
                )
                _insert_command_output(file_uuid, rule.uuid, stdout)
                job.write_output(
                    'Saved XML output for command "{}" ({})'.format(
                        rule.command.description, rule.command.uuid))
            except etree.XMLSyntaxError:
                failed = True
                job.write_error(
                    'XML output for command "{}" ({}) was not valid XML; not saving to database'
                    .format(rule.command.description, rule.command.uuid))
        else:
            job.write_error(
                'Tool output for command "{}" ({}) is not XML; not saving to database'
                .format(rule.command.description, rule.command.uuid))

    if failed:
        return 255
    else:
        return 0
Пример #9
0
def compress_aip(job, compression, compression_level, sip_directory, sip_name,
                 sip_uuid):
    """Compresses AIP according to compression algorithm and level.
    compression = AIP compression algorithm, format: <program>-<algorithm>, eg. 7z-lzma, pbzip2-
    compression_level = AIP compression level, integer between 1 and 9 inclusive
    sip_directory = Absolute path to the directory where the SIP is
    sip_name = User-provided name of the SIP
    sip_uuid = SIP UUID

    Example inputs:
    compressAIP.py
        7z-lzma
        5
        %sharedDirectory%/watchedDirectories/workFlowDecisions/compressionAIPDecisions/ep-d87d5845-bd07-4200-b1a4-928e0cb6e1e4/
        ep
        d87d5845-bd07-4200-b1a4-928e0cb6e1e4
    """
    if compression_level == "0":
        compression_level = "1"

    # Default is uncompressed.
    compression = int(compression)
    ProcessingConfig.AIPCompressionAlgorithm.Name(compression)
    if compression == ProcessingConfig.AIP_COMPRESSION_ALGORITHM_UNSPECIFIED:
        compression = ProcessingConfig.AIP_COMPRESSION_ALGORITHM_UNCOMPRESSED

    # Translation to make compress_aip happy.
    mapping = {
        ProcessingConfig.AIP_COMPRESSION_ALGORITHM_UNCOMPRESSED: ("None", ""),
        ProcessingConfig.AIP_COMPRESSION_ALGORITHM_TAR: (
            "gzip",
            "tar.gzip",
        ),  # A3M-TODO: support
        ProcessingConfig.AIP_COMPRESSION_ALGORITHM_TAR_BZIP2:
        ("pbzip2", "pbzip2"),
        ProcessingConfig.AIP_COMPRESSION_ALGORITHM_TAR_GZIP:
        ("gzip", "tar.gzip"),
        ProcessingConfig.AIP_COMPRESSION_ALGORITHM_S7_COPY: ("7z", "copy"),
        ProcessingConfig.AIP_COMPRESSION_ALGORITHM_S7_BZIP2: ("7z", "bzip2"),
        ProcessingConfig.AIP_COMPRESSION_ALGORITHM_S7_LZMA: ("7z", "lzma"),
    }

    try:
        program, compression_algorithm = mapping[compression]
    except KeyError:
        msg = f"Invalid program-compression algorithm: {compression}"
        job.pyprint(msg, file=sys.stderr)
        return 255

    archive_path = f"{sip_name}-{sip_uuid}"
    uncompressed_location = sip_directory + archive_path

    # Even though no actual compression is taking place,
    # the location still needs to be set in the unit to ensure that the
    # %AIPFilename% variable is set appropriately.
    # Setting it to an empty string ensures the common
    # "%SIPDirectory%%AIPFilename%" pattern still points at the right thing.
    if program == "None":
        update_unit(sip_uuid, uncompressed_location)
        return 0

    job.pyprint("Compressing {} with {}, algorithm {}, level {}".format(
        uncompressed_location, program, compression_algorithm,
        compression_level))

    if program == "7z":
        compressed_location = uncompressed_location + ".7z"
        command = '/usr/bin/7z a -bd -t7z -y -m0={algorithm} -mx={level} -mta=on -mtc=on -mtm=on -mmt=on "{compressed_location}" "{uncompressed_location}"'.format(
            algorithm=compression_algorithm,
            level=compression_level,
            uncompressed_location=uncompressed_location,
            compressed_location=compressed_location,
        )
        tool_info_command = (
            r'echo program="7z"\; '
            r'algorithm="{}"\; '
            'version="`7z | grep Version`"'.format(compression_algorithm))
    elif program == "pbzip2":
        compressed_location = uncompressed_location + ".tar.bz2"
        command = '/bin/tar -c --directory "{sip_directory}" "{archive_path}" | /usr/bin/pbzip2 --compress -{level} > "{compressed_location}"'.format(
            sip_directory=sip_directory,
            archive_path=archive_path,
            level=compression_level,
            compressed_location=compressed_location,
        )
        tool_info_command = (
            r'echo program="pbzip2"\; '
            r'algorithm="{}"\; '
            'version="$((pbzip2 -V) 2>&1)"'.format(compression_algorithm))
    elif program == "gzip":
        compressed_location = uncompressed_location + ".tar.gz"
        command = '/bin/tar -c --directory "{sip_directory}" "{archive_path}" | /bin/gzip -{level} > "{compressed_location}"'.format(
            sip_directory=sip_directory,
            archive_path=archive_path,
            level=compression_level,
            compressed_location=compressed_location,
        )
        tool_info_command = (
            r'echo program="gzip"\; '
            r'algorithm="{}"\; '
            'version="$((gzip -V) 2>&1)"'.format(compression_algorithm))
    else:
        msg = f"Program {program} not recognized, exiting script prematurely."
        job.pyprint(msg, file=sys.stderr)
        return 255

    job.pyprint("Executing command:", command)
    exit_code, std_out, std_err = executeOrRun("bashScript",
                                               command,
                                               capture_output=True)
    job.write_output(std_out)
    job.write_error(std_err)

    # Add new AIP File
    file_uuid = sip_uuid
    databaseFunctions.insertIntoFiles(
        fileUUID=file_uuid,
        filePath=compressed_location.replace(sip_directory, "%SIPDirectory%",
                                             1),
        sipUUID=sip_uuid,
        use="aip",
    )

    # Add compression event
    job.pyprint("Tool info command:", tool_info_command)
    _, tool_info, tool_info_err = executeOrRun("bashScript",
                                               tool_info_command,
                                               capture_output=True)
    job.write_output(tool_info)
    job.write_error(tool_info_err)
    tool_output = f'Standard Output="{std_out}"; Standard Error="{std_err}"'
    databaseFunctions.insertIntoEvents(
        eventType="compression",
        eventDetail=tool_info,
        eventOutcomeDetailNote=tool_output,
        fileUUID=file_uuid,
    )

    update_unit(sip_uuid, compressed_location)

    return exit_code
Пример #10
0
 def _execute_rule_command(self, rule):
     """Execute the FPR command of FPR rule ``rule`` against the file passed
     in to this client script. The output of that command determines what we
     print to stdout and stderr, and the nature of the validation event that
     we save to the db. We also copy the MediaConch policy file to the logs/
     directory of the AIP if it has not already been copied there.
     """
     result = "passed"
     command_to_execute, args = self._get_command_to_execute(rule)
     self.job.pyprint("Running", rule.command.description)
     exitstatus, stdout, stderr = executeOrRun(
         rule.command.script_type,
         command_to_execute,
         arguments=args,
         printing=False,
         capture_output=True,
     )
     try:
         output = json.loads(stdout)
     except ValueError:
         logger.exception(
             "Unable to load an object from the malformed JSON: \n%s", stderr
         )
         raise
     if self.file_type in ("preservation", "original"):
         self._save_to_logs_dir(output)
     if exitstatus == 0:
         self.job.pyprint(
             "Command {} completed with output {}".format(
                 rule.command.description, stdout
             )
         )
     else:
         self.job.print_error(
             "Command {} failed with exit status {}; stderr:".format(
                 rule.command.description, exitstatus
             ),
             stderr,
         )
         return "failed"
     event_detail = (
         'program="{tool.description}";'
         ' version="{tool.version}"'.format(tool=rule.command.tool)
     )
     if output.get("eventOutcomeInformation") != "pass":
         self.job.print_error(
             "Command {descr} returned a non-pass outcome "
             "for the policy check;\n\noutcome: "
             "{outcome}\n\ndetails: {details}.".format(
                 descr=rule.command.description,
                 outcome=output.get("eventOutcomeInformation"),
                 details=output.get("eventOutcomeDetailNote"),
             )
         )
         result = "failed"
     self.job.pyprint(
         "Creating policy checking event for {} ({})".format(
             self.file_path, self.file_uuid
         )
     )
     # Manually-normalized access derivatives have no file UUID so we can't
     # create a validation event for them. TODO/QUESTION: should we use the
     # UUID that was assigned to the manually normalized derivative during
     # transfer, i.e., the one that we retrieve in
     # ``_get_manually_normalized_access_derivative_file_uuid`` above?
     if not self.is_manually_normalized_access_derivative:
         databaseFunctions.insertIntoEvents(
             fileUUID=self.file_uuid,
             eventType="validation",  # From PREMIS controlled vocab.
             eventDetail=event_detail,
             eventOutcome=output.get("eventOutcomeInformation"),
             eventOutcomeDetailNote=output.get("eventOutcomeDetailNote"),
         )
     return result
Пример #11
0
def main(job, transfer_uuid, sip_directory, date, task_uuid, delete=False):
    file_path_cache = {}
    files = File.objects.filter(transfer=transfer_uuid,
                                removedtime__isnull=True)
    if not files:
        job.pyprint("No files found for transfer: ", transfer_uuid)

    transfer_mdl = Transfer.objects.get(uuid=transfer_uuid)

    # We track whether or not anything was extracted because that controls what
    # the next microservice chain link will be.
    # If something was extracted, then a new identification step has to be
    # kicked off on those files; otherwise, we can go ahead with the transfer.
    extracted = False

    for file_ in files:
        try:
            format_id = FileFormatVersion.objects.get(file_uuid=file_.uuid)
        # Can't do anything if the file wasn't identified in the previous step
        except:
            job.pyprint(
                "Not extracting contents from",
                os.path.basename(file_.currentlocation),
                " - file format not identified",
                file=sys.stderr,
            )
            continue
        if format_id.format_version is None:
            job.pyprint(
                "Not extracting contents from",
                os.path.basename(file_.currentlocation),
                " - file format not identified",
                file=sys.stderr,
            )
            continue
        # Extraction commands are defined in the FPR just like normalization
        # commands
        try:
            command = FPCommand.active.get(
                fprule__format=format_id.format_version,
                fprule__purpose="extract",
                fprule__enabled=True,
            )
        except FPCommand.DoesNotExist:
            job.pyprint(
                "Not extracting contents from",
                os.path.basename(file_.currentlocation),
                " - No rule found to extract",
                file=sys.stderr,
            )
            continue

        # Check if file has already been extracted
        if already_extracted(file_):
            job.pyprint(
                "Not extracting contents from",
                os.path.basename(file_.currentlocation),
                " - extraction already happened.",
                file=sys.stderr,
            )
            continue

        file_to_be_extracted_path = file_.currentlocation.replace(
            TRANSFER_DIRECTORY, sip_directory)
        extraction_target, file_path_cache = temporary_directory(
            file_to_be_extracted_path, date, file_path_cache)

        # Create the extract packages command.
        if command.script_type == "command" or command.script_type == "bashScript":
            args = []
            command_to_execute = command.command.replace(
                "%inputFile%", file_to_be_extracted_path)
            command_to_execute = command_to_execute.replace(
                "%outputDirectory%", extraction_target)
        else:
            command_to_execute = command.command
            args = [file_to_be_extracted_path, extraction_target]

        # Make the command clear to users when inspecting stdin/stdout.
        logger.debug("Command to execute is: %s", command_to_execute)
        exitstatus, stdout, stderr = executeOrRun(command.script_type,
                                                  command_to_execute,
                                                  arguments=args,
                                                  capture_output=True)
        job.write_output(stdout)
        job.write_error(stderr)

        if not exitstatus == 0:
            # Dang, looks like the extraction failed
            job.pyprint("Command",
                        command.description,
                        "failed!",
                        file=sys.stderr)
        else:
            extracted = True
            job.pyprint("Extracted contents from",
                        os.path.basename(file_to_be_extracted_path))

            # Assign UUIDs and insert them into the database, so the newly
            # extracted files are properly tracked by Archivematica
            for extracted_file in tree(extraction_target):
                extracted_file_original_location = extracted_file.replace(
                    extraction_target, file_.originallocation, 1)
                assign_uuid(
                    job,
                    extracted_file,
                    extracted_file_original_location,
                    file_.uuid,
                    transfer_uuid,
                    date,
                    task_uuid,
                    sip_directory,
                    file_to_be_extracted_path,
                )

            if transfer_mdl.diruuids:
                create_extracted_dir_uuids(job, transfer_mdl,
                                           extraction_target, sip_directory,
                                           file_)

            # We may want to remove the original package file after extracting
            # its contents
            if delete:
                delete_and_record_package_file(job, file_to_be_extracted_path,
                                               file_.uuid,
                                               file_.currentlocation)

    if extracted:
        return 0
    else:
        return 255
Пример #12
0
 def _execute_rule_command(self, rule):
     """Run the command against the file and return either 'passed' or
     'failed'. If the command errors or determines that the file is invalid,
     return 'failed'. Non-errors will result in the creation of an Event
     model in the db. Preservation derivative validation will result in the
     stdout from the command being saved to disk within the unit (i.e., SIP).
     """
     result = "passed"
     if rule.command.script_type in ("bashScript", "command"):
         command_to_execute = replace_string_values(
             rule.command.command,
             file_=self.file_uuid,
             sip=self.sip_uuid,
             type_="file",
         )
         args = []
     else:
         command_to_execute = rule.command.command
         args = [self.file_path]
     self.job.print_output("Running", rule.command.description)
     exitstatus, stdout, stderr = executeOrRun(
         type=rule.command.script_type,
         text=command_to_execute,
         printing=False,
         arguments=args,
     )
     if exitstatus != 0:
         self.job.print_error(
             "Command {description} failed with exit status {status};"
             " stderr:".format(description=rule.command.description,
                               status=exitstatus))
         return "failed"
     # Parse output and generate an Event
     # TODO: Evaluating a python string from a user-definable script seems
     # insecure practice; should be JSON.
     output = ast.literal_eval(stdout)
     event_detail = ('program="{tool.description}";'
                     ' version="{tool.version}"'.format(
                         tool=rule.command.tool))
     # If the FPR command has not errored but the actual validation
     # determined that the file is not valid, then we want to both create a
     # validation event in the db and set ``failed`` to ``True`` because we
     # want the micro-service in the dashboard GUI to indicate "Failed".
     # NOTE: this requires that the stdout of all validation FPR commands be
     # a dict (preferably a JSON object) with an ``eventOutcomeInformation``
     # boolean attribute.
     if output.get("eventOutcomeInformation") == "pass":
         self.job.print_output(
             f'Command "{rule.command.description}" was successful')
     elif output.get("eventOutcomeInformation") == "partial pass":
         self.job.print_output(
             f'Command "{rule.command.description}" was partially successful'
         )
     else:
         self.job.pyprint(
             "Command {cmd_description} indicated failure with this"
             " output:\n\n{output}".format(
                 cmd_description=rule.command.description,
                 output=pformat(stdout)),
             file=sys.stderr,
         )
         result = "failed"
     if self.file_type == "preservation":
         self._save_stdout_to_logs_dir(output)
     self.job.print_output(
         "Creating {purpose} event for {file_path} ({file_uuid})".format(
             purpose=self.purpose,
             file_path=self.file_path,
             file_uuid=self.file_uuid))
     databaseFunctions.insertIntoEvents(
         fileUUID=self.file_uuid,
         eventType="validation",  # From PREMIS controlled vocab.
         eventDetail=event_detail,
         eventOutcome=output.get("eventOutcomeInformation"),
         eventOutcomeDetailNote=output.get("eventOutcomeDetailNote"),
     )
     return result
Пример #13
0
def main(job, task_uuid, file_uuid):
    setup_dicts()

    succeeded = True

    file_ = File.objects.get(uuid=file_uuid)

    # Normally we don't transcribe derivatives (access copies, preservation copies);
    # however, some useful transcription tools can't handle some formats that
    # are common as the primary copies. For example, tesseract can't handle JPEG2000.
    # If there are no rules for the primary format passed in, try to look at each
    # derivative until a transcribable derivative is found.
    #
    # Skip derivatives to avoid double-scanning them; only look at them as a fallback.
    if file_.filegrpuse != "original":
        job.print_error(f"{file_uuid} is not an original; not transcribing")
        return 0

    rules = fetch_rules_for(file_)
    if not rules:
        file_, rules = fetch_rules_for_derivatives(file_)

    if not rules:
        job.print_error(
            "No rules found for file {} and its derivatives; not transcribing".format(
                file_uuid
            )
        )
        return 0
    else:
        if file_.filegrpuse == "original":
            noun = "original"
        else:
            noun = file_.filegrpuse + " derivative"
        job.print_error(f"Transcribing {noun} {file_.uuid}")

    rd = ReplacementDict.frommodel(file_=file_, type_="file")

    for rule in rules:
        script = rule.command.command
        if rule.command.script_type in ("bashScript", "command"):
            (script,) = rd.replace(script)
            args = []
        else:
            args = rd.to_gnu_options

        exitstatus, stdout, stderr = executeOrRun(
            rule.command.script_type, script, arguments=args, capture_output=True
        )
        job.write_output(stdout)
        job.write_error(stderr)
        if exitstatus != 0:
            succeeded = False

        output_path = rd.replace(rule.command.output_location)[0]
        relative_path = output_path.replace(rd["%SIPDirectory%"], "%SIPDirectory%")
        event = insert_transcription_event(exitstatus, file_uuid, rule, relative_path)

        if os.path.isfile(output_path):
            insert_file_into_database(
                task_uuid,
                file_uuid,
                rd["%SIPUUID%"],
                event,
                rule,
                output_path,
                relative_path,
            )

    return 0 if succeeded else 1