Пример #1
0
def process_file(job_identifier, file_id):
    job_handler = JobHandler(job_identifier)
    if job_handler.file_ready_for_process(
            file_id) and job_handler.filematch_try_process(file_id):
        time.sleep(0.001)
        if job_handler.filematch_processing_id(file_id) != file_id:
            # In this case two threads are processing the same file match =>
            # Race Condition!
            return
        matched_files = job_handler.get_matched_files(file_id)
        for matched_file in matched_files:
            job_handler.set_file_processed(matched_file.file_id)

        source_path = job_handler.get_upload_folder_path()
        destination_path = job_handler.get_output_folder_path()
        export_format_name = job_handler.get_export_format_name()
        source_srs = job_handler.get_source_srs()
        target_srs = job_handler.get_target_srs()
        simplify_parameter = job_handler.get_simplify_parameter()
        additional_arguments = job_handler.get_shell_parameters()
        extract_base_path = job_handler.get_extract_folder_path()
        if matched_files[0].file_match.is_archive:
            file_name = matched_files[0].matched_file_name
            if job_handler.get_file_count() > 1:
                # Creates a sub folder in the extract folder
                extract_path = os.path.join(
                    extract_base_path,
                    os.path.splitext(file_name)[0])
            else:
                extract_path = extract_base_path
            process_archive(
                job_identifier,
                os.path.join(
                    source_path,
                    file_name),
                extract_base_path,
                extract_path,
                destination_path,
                export_format_name,
                source_srs,
                target_srs,
                simplify_parameter,
                additional_arguments,
                1)
        else:
            conversion.convert_files(
                job_identifier,
                source_path,
                matched_files,
                destination_path,
                export_format_name,
                source_srs,
                target_srs,
                simplify_parameter,
                additional_arguments)
Пример #2
0
def process_folder(job_identifier,
                   source_path,
                   file_dict,
                   destination_path,
                   export_format_name,
                   source_srs,
                   target_srs,
                   simplify_parameter,
                   additional_arguments,
                   archive_depth=1):
    job_handler = JobHandler(job_identifier)
    file_matcher = FileMatcher(file_dict)

    for file_match in file_matcher.get_matches():
        # Rename files
        for file_id, new_file_name in file_match.get_file_dict().items():
            original_file_name = file_matcher.get_original_file_name(file_id)
            if original_file_name != new_file_name:
                filemanager.rename_file(source_path, original_file_name,
                                        new_file_name)

    for file_match in file_matcher.get_matches():
        job_handler.add_file_match(source_path, file_match.get_file_dict(),
                                   file_match.get_ogr_format_name(),
                                   file_match.is_archive(),
                                   file_match.is_valid())

    for file_match in file_matcher.get_matches():
        file_id = list(file_match.get_file_dict().keys())[0]
        if not job_handler.get_file_processed(file_id):
            matched_files = job_handler.get_matched_files(file_id)
            for matched_file in matched_files:
                job_handler.set_file_processed(matched_file.file_id)

        if matched_files[0].file_match.is_archive:
            # Process nested archive
            archive_file_name_without_extension = os.path.splitext(
                matched_files[0].file_name)[0]
            archive_path = os.path.join(source_path,
                                        matched_files[0].file_name)
            unpack_path = os.path.join(source_path,
                                       archive_file_name_without_extension)
            output_path = os.path.join(destination_path,
                                       archive_file_name_without_extension)
            process_archive(job_identifier, archive_path, unpack_path,
                            unpack_path, output_path, export_format_name,
                            source_srs, target_srs, simplify_parameter,
                            additional_arguments, archive_depth + 1)
        else:
            conversion.convert_files(job_identifier, source_path,
                                     matched_files, destination_path,
                                     export_format_name, source_srs,
                                     target_srs, simplify_parameter,
                                     additional_arguments)
Пример #3
0
def process_file(job_identifier, file_id):
    job_handler = JobHandler(job_identifier)
    if job_handler.file_ready_for_process(
            file_id) and job_handler.filematch_try_process(file_id):
        time.sleep(0.001)
        if job_handler.filematch_processing_id(file_id) != file_id:
            # In this case two threads are processing the same file match =>
            # Race Condition!
            return
        matched_files = job_handler.get_matched_files(file_id)
        for matched_file in matched_files:
            job_handler.set_file_processed(matched_file.file_id)

        source_path = job_handler.get_upload_folder_path()
        destination_path = job_handler.get_output_folder_path()
        export_format_name = job_handler.get_export_format_name()
        source_srs = job_handler.get_source_srs()
        target_srs = job_handler.get_target_srs()
        simplify_parameter = job_handler.get_simplify_parameter()
        additional_arguments = job_handler.get_shell_parameters()
        extract_base_path = job_handler.get_extract_folder_path()
        if matched_files[0].file_match.is_archive:
            file_name = matched_files[0].matched_file_name
            if job_handler.get_file_count() > 1:
                # Creates a sub folder in the extract folder
                extract_path = os.path.join(extract_base_path,
                                            os.path.splitext(file_name)[0])
            else:
                extract_path = extract_base_path
            process_archive(job_identifier,
                            os.path.join(source_path, file_name),
                            extract_base_path, extract_path, destination_path,
                            export_format_name, source_srs, target_srs,
                            simplify_parameter, additional_arguments, 1)
        else:
            conversion.convert_files(job_identifier, source_path,
                                     matched_files, destination_path,
                                     export_format_name, source_srs,
                                     target_srs, simplify_parameter,
                                     additional_arguments)
Пример #4
0
def process_folder(
        job_identifier,
        source_path,
        file_dict,
        destination_path,
        export_format_name,
        source_srs,
        target_srs,
        simplify_parameter,
        additional_arguments,
        archive_depth=1):
    job_handler = JobHandler(job_identifier)
    file_matcher = FileMatcher(file_dict)

    for file_match in file_matcher.get_matches():
        # Rename files
        for file_id, new_file_name in file_match.get_file_dict().items():
            original_file_name = file_matcher.get_original_file_name(file_id)
            if original_file_name != new_file_name:
                filemanager.rename_file(
                    source_path,
                    original_file_name,
                    new_file_name)

    for file_match in file_matcher.get_matches():
        job_handler.add_file_match(
            source_path,
            file_match.get_file_dict(),
            file_match.get_ogr_format_name(),
            file_match.is_archive(),
            file_match.is_valid())

    for file_match in file_matcher.get_matches():
        file_id = list(file_match.get_file_dict().keys())[0]
        if not job_handler.get_file_processed(file_id):
            matched_files = job_handler.get_matched_files(file_id)
            for matched_file in matched_files:
                job_handler.set_file_processed(matched_file.file_id)

        if matched_files[0].file_match.is_archive:
            # Process nested archive
            archive_file_name_without_extension = os.path.splitext(
                matched_files[0].file_name)[0]
            archive_path = os.path.join(
                source_path,
                matched_files[0].file_name)
            unpack_path = os.path.join(
                source_path,
                archive_file_name_without_extension)
            output_path = os.path.join(
                destination_path,
                archive_file_name_without_extension)
            process_archive(
                job_identifier,
                archive_path,
                unpack_path,
                unpack_path,
                output_path,
                export_format_name,
                source_srs,
                target_srs,
                simplify_parameter,
                additional_arguments,
                archive_depth + 1)
        else:
            conversion.convert_files(
                job_identifier,
                source_path,
                matched_files,
                destination_path,
                export_format_name,
                source_srs,
                target_srs,
                simplify_parameter,
                additional_arguments)