def process_file(job_identifier, file_id): job_handler = JobHandler(job_identifier) if job_handler.file_ready_for_process( file_id) and job_handler.filematch_try_process(file_id): time.sleep(0.001) if job_handler.filematch_processing_id(file_id) != file_id: # In this case two threads are processing the same file match => # Race Condition! return matched_files = job_handler.get_matched_files(file_id) for matched_file in matched_files: job_handler.set_file_processed(matched_file.file_id) source_path = job_handler.get_upload_folder_path() destination_path = job_handler.get_output_folder_path() export_format_name = job_handler.get_export_format_name() source_srs = job_handler.get_source_srs() target_srs = job_handler.get_target_srs() simplify_parameter = job_handler.get_simplify_parameter() additional_arguments = job_handler.get_shell_parameters() extract_base_path = job_handler.get_extract_folder_path() if matched_files[0].file_match.is_archive: file_name = matched_files[0].matched_file_name if job_handler.get_file_count() > 1: # Creates a sub folder in the extract folder extract_path = os.path.join( extract_base_path, os.path.splitext(file_name)[0]) else: extract_path = extract_base_path process_archive( job_identifier, os.path.join( source_path, file_name), extract_base_path, extract_path, destination_path, export_format_name, source_srs, target_srs, simplify_parameter, additional_arguments, 1) else: conversion.convert_files( job_identifier, source_path, matched_files, destination_path, export_format_name, source_srs, target_srs, simplify_parameter, additional_arguments)
def process_webservice_urls(job_identifier): job_handler = JobHandler(job_identifier) webservice_urls = job_handler.get_webservice_urls() destination_path = job_handler.get_output_folder_path() export_format_name = job_handler.get_export_format_name() source_srs = job_handler.get_source_srs() target_srs = job_handler.get_target_srs() simplify_parameter = job_handler.get_simplify_parameter() additional_arguments = job_handler.get_shell_parameters() base_name = 'webservice' counter = 1 for webservice_url in webservice_urls.values(): conversion.convert_webservice( job_identifier, webservice_url, destination_path, base_name + (str(counter) if counter > 1 else ''), export_format_name, source_srs, target_srs, simplify_parameter, additional_arguments) counter += 1
def process_file(job_identifier, file_id): job_handler = JobHandler(job_identifier) if job_handler.file_ready_for_process( file_id) and job_handler.filematch_try_process(file_id): time.sleep(0.001) if job_handler.filematch_processing_id(file_id) != file_id: # In this case two threads are processing the same file match => # Race Condition! return matched_files = job_handler.get_matched_files(file_id) for matched_file in matched_files: job_handler.set_file_processed(matched_file.file_id) source_path = job_handler.get_upload_folder_path() destination_path = job_handler.get_output_folder_path() export_format_name = job_handler.get_export_format_name() source_srs = job_handler.get_source_srs() target_srs = job_handler.get_target_srs() simplify_parameter = job_handler.get_simplify_parameter() additional_arguments = job_handler.get_shell_parameters() extract_base_path = job_handler.get_extract_folder_path() if matched_files[0].file_match.is_archive: file_name = matched_files[0].matched_file_name if job_handler.get_file_count() > 1: # Creates a sub folder in the extract folder extract_path = os.path.join(extract_base_path, os.path.splitext(file_name)[0]) else: extract_path = extract_base_path process_archive(job_identifier, os.path.join(source_path, file_name), extract_base_path, extract_path, destination_path, export_format_name, source_srs, target_srs, simplify_parameter, additional_arguments, 1) else: conversion.convert_files(job_identifier, source_path, matched_files, destination_path, export_format_name, source_srs, target_srs, simplify_parameter, additional_arguments)