def store_file(job_identifier, file_id, file_data):
    job_handler = JobHandler(job_identifier)
    if not job_handler.file_removed(file_id):
        file_name = job_handler.get_file_name(file_id)
        filemanager.store_uploaded_file(
            job_identifier.job_id,
            file_data,
            file_name)
        job_handler.set_file_uploaded(file_id)
Beispiel #2
0
def remove_file(request, client_job_token, file_id):
    if request.method == 'POST':
        job_identifier = jobidentification.get_job_identifier_by_client_job_token(
            request.session.session_key, client_job_token)
        if job_identifier is not None:
            job_handler = JobHandler(job_identifier)
            job_handler.remove_file(file_id)
        return HttpResponse('success')
    else:
        return redirect_to_main_page(request)
def _add_ogr_log_entry(job_identifier, input_string, full_path, ogr_command,
                       ogr_error_message, is_successful):
    log_handler = LogHandler(job_identifier)
    job_handler = JobHandler(job_identifier)
    sub_path = full_path.replace(job_handler.get_output_folder_path(),
                                 '').lstrip('/\\')
    if sub_path == '':
        sub_path = '-'
    else:
        sub_path = os.path.join('.', sub_path)
    log_handler.add_ogr_log_entry(sub_path, input_string, ogr_command,
                                  ogr_error_message, is_successful)
def process_archive(
        job_identifier,
        archive_path,
        unpack_base_path,
        unpack_path,
        output_path,
        export_format_name,
        source_srs,
        target_srs,
        simplify_parameter,
        additional_arguments,
        archive_depth=1):
    job_handler = JobHandler(job_identifier)

    # Allowed depth of nested archives
    if archive_depth > 6:
        return

    archives.unpack_archive_file(archive_path, unpack_path)

    folder_files_list = [(root, files)
                         for root, dirs, files in os.walk(unpack_path)]

    for folder_files in folder_files_list:
        source_path = folder_files[0]

        file_dict = {}

        # print '$$$$$$$$$$$$$$$$$'
        # print 'Source path: ' + source_path
        # print 'Number of files: ' + str(len(folder_files[1]))

        for file_name in folder_files[1]:
            file_dict[job_handler.get_free_file_id()] = file_name
        sub_path = source_path.replace(unpack_base_path, '').lstrip('/\\')
        destination_path = os.path.join(output_path, sub_path)

        if not os.path.exists(destination_path):
            os.mkdir(destination_path)

        process_folder(
            job_identifier,
            source_path,
            file_dict,
            destination_path,
            export_format_name,
            source_srs,
            target_srs,
            simplify_parameter,
            additional_arguments,
            archive_depth)
def initialize_conversion_job(job_identifier):
    filemanager.create_job_folders(job_identifier.job_id)
    upload_folder_path = filemanager.get_upload_folder_path(
        job_identifier.job_id)
    extract_folder_path = filemanager.get_extract_folder_path(
        job_identifier.job_id)
    output_folder_path = filemanager.get_output_folder_path(
        job_identifier.job_id)
    download_folder_path = filemanager.get_download_folder_path(
        job_identifier.job_id)

    job_handler = JobHandler(job_identifier)
    job_handler.set_upload_folder_path(upload_folder_path)
    job_handler.set_extract_folder_path(extract_folder_path)
    job_handler.set_output_folder_path(output_folder_path)
    job_handler.set_download_folder_path(download_folder_path)
def process_folder(job_identifier,
                   source_path,
                   file_dict,
                   destination_path,
                   export_format_name,
                   source_srs,
                   target_srs,
                   simplify_parameter,
                   additional_arguments,
                   archive_depth=1):
    job_handler = JobHandler(job_identifier)
    file_matcher = FileMatcher(file_dict)

    for file_match in file_matcher.get_matches():
        # Rename files
        for file_id, new_file_name in file_match.get_file_dict().items():
            original_file_name = file_matcher.get_original_file_name(file_id)
            if original_file_name != new_file_name:
                filemanager.rename_file(source_path, original_file_name,
                                        new_file_name)

    for file_match in file_matcher.get_matches():
        job_handler.add_file_match(source_path, file_match.get_file_dict(),
                                   file_match.get_ogr_format_name(),
                                   file_match.is_archive(),
                                   file_match.is_valid())

    for file_match in file_matcher.get_matches():
        file_id = list(file_match.get_file_dict().keys())[0]
        if not job_handler.get_file_processed(file_id):
            matched_files = job_handler.get_matched_files(file_id)
            for matched_file in matched_files:
                job_handler.set_file_processed(matched_file.file_id)

        if matched_files[0].file_match.is_archive:
            # Process nested archive
            archive_file_name_without_extension = os.path.splitext(
                matched_files[0].file_name)[0]
            archive_path = os.path.join(source_path,
                                        matched_files[0].file_name)
            unpack_path = os.path.join(source_path,
                                       archive_file_name_without_extension)
            output_path = os.path.join(destination_path,
                                       archive_file_name_without_extension)
            process_archive(job_identifier, archive_path, unpack_path,
                            unpack_path, output_path, export_format_name,
                            source_srs, target_srs, simplify_parameter,
                            additional_arguments, archive_depth + 1)
        else:
            conversion.convert_files(job_identifier, source_path,
                                     matched_files, destination_path,
                                     export_format_name, source_srs,
                                     target_srs, simplify_parameter,
                                     additional_arguments)
def process_archive(job_identifier,
                    archive_path,
                    unpack_base_path,
                    unpack_path,
                    output_path,
                    export_format_name,
                    source_srs,
                    target_srs,
                    simplify_parameter,
                    additional_arguments,
                    archive_depth=1):
    job_handler = JobHandler(job_identifier)

    # Allowed depth of nested archives
    if archive_depth > 6:
        return

    archives.unpack_archive_file(archive_path, unpack_path)

    folder_files_list = [(root, files)
                         for root, dirs, files in os.walk(unpack_path)]

    for folder_files in folder_files_list:
        source_path = folder_files[0]

        file_dict = {}

        # print '$$$$$$$$$$$$$$$$$'
        # print 'Source path: ' + source_path
        # print 'Number of files: ' + str(len(folder_files[1]))

        for file_name in folder_files[1]:
            file_dict[job_handler.get_free_file_id()] = file_name
        sub_path = source_path.replace(unpack_base_path, '').lstrip('/\\')
        destination_path = os.path.join(output_path, sub_path)

        if not os.path.exists(destination_path):
            os.mkdir(destination_path)

        process_folder(job_identifier, source_path, file_dict,
                       destination_path, export_format_name, source_srs,
                       target_srs, simplify_parameter, additional_arguments,
                       archive_depth)
def initialize_conversion_job(job_identifier):
    filemanager.create_job_folders(job_identifier.job_id)
    upload_folder_path = filemanager.get_upload_folder_path(
        job_identifier.job_id)
    extract_folder_path = filemanager.get_extract_folder_path(
        job_identifier.job_id)
    output_folder_path = filemanager.get_output_folder_path(
        job_identifier.job_id)
    download_folder_path = filemanager.get_download_folder_path(
        job_identifier.job_id)

    job_handler = JobHandler(job_identifier)
    job_handler.set_upload_folder_path(upload_folder_path)
    job_handler.set_extract_folder_path(extract_folder_path)
    job_handler.set_output_folder_path(output_folder_path)
    job_handler.set_download_folder_path(download_folder_path)
def store_file(job_identifier, file_id, file_data):
    job_handler = JobHandler(job_identifier)
    if not job_handler.file_removed(file_id):
        file_name = job_handler.get_file_name(file_id)
        filemanager.store_uploaded_file(job_identifier.job_id, file_data,
                                        file_name)
        job_handler.set_file_uploaded(file_id)
Beispiel #10
0
def _add_ogr_log_entry(
        job_identifier,
        input_string,
        full_path,
        ogr_command,
        ogr_error_message,
        is_successful):
    log_handler = LogHandler(job_identifier)
    job_handler = JobHandler(job_identifier)
    sub_path = full_path.replace(
        job_handler.get_output_folder_path(),
        '').lstrip('/\\')
    if sub_path == '':
        sub_path = '-'
    else:
        sub_path = os.path.join('.', sub_path)
    log_handler.add_ogr_log_entry(
        sub_path,
        input_string,
        ogr_command,
        ogr_error_message,
        is_successful)
def process_webservice_urls(job_identifier):
    job_handler = JobHandler(job_identifier)

    webservice_urls = job_handler.get_webservice_urls()
    destination_path = job_handler.get_output_folder_path()
    export_format_name = job_handler.get_export_format_name()
    source_srs = job_handler.get_source_srs()
    target_srs = job_handler.get_target_srs()
    simplify_parameter = job_handler.get_simplify_parameter()
    additional_arguments = job_handler.get_shell_parameters()

    base_name = 'webservice'
    counter = 1
    for webservice_url in webservice_urls.values():
        conversion.convert_webservice(
            job_identifier, webservice_url, destination_path,
            base_name + (str(counter) if counter > 1 else ''),
            export_format_name, source_srs, target_srs, simplify_parameter,
            additional_arguments)
        counter += 1
def process_webservice_urls(job_identifier):
    job_handler = JobHandler(job_identifier)

    webservice_urls = job_handler.get_webservice_urls()
    destination_path = job_handler.get_output_folder_path()
    export_format_name = job_handler.get_export_format_name()
    source_srs = job_handler.get_source_srs()
    target_srs = job_handler.get_target_srs()
    simplify_parameter = job_handler.get_simplify_parameter()
    additional_arguments = job_handler.get_shell_parameters()

    base_name = 'webservice'
    counter = 1
    for webservice_url in webservice_urls.values():
        conversion.convert_webservice(
            job_identifier, webservice_url, destination_path, base_name +
            (str(counter) if counter > 1 else ''),
            export_format_name, source_srs, target_srs, simplify_parameter,
            additional_arguments)
        counter += 1
Beispiel #13
0
def convert_webservice(request, client_job_token):
    if request.method == 'POST':
        POST_dict = request.POST.dict()
        webservice_url = POST_dict['webservice_url'].strip()
        export_format_name = POST_dict['export_format'].strip()
        source_srs = POST_dict['source_srs'].strip()
        target_srs = POST_dict['target_srs'].strip()
        simplify_parameter = POST_dict['simplify_parameter'].strip()
        download_name = POST_dict['download_name'].strip()
        if len(download_name) > 10:
            download_name = download_name[0:7] + '...'
        client_ip = get_client_ip(request)
        client_language = get_client_language(request)
        client_user_agent = get_client_user_agent(request)

        session_key = request.session.session_key
        job_identifier = jobidentification.get_new_job_identifier_by_client_job_token(
            session_key, client_job_token)
        job_id = job_identifier.job_id
        if job_id == '':
            return HttpResponseServerError('Error: Job Token is not valid.')

        job_handler = JobHandler(job_identifier)
        log_handler = LogHandler(job_identifier)
        download_handler = DownloadHandler(job_identifier)

        format_information = OgrFormat.get_format_information_by_name(
            export_format_name)
        if format_information is not None:
            job_handler.set_export_format_name(format_information.ogr_name)
            for shell_parameter in format_information.additional_parameters:
                if shell_parameter.use_for_writing:
                    job_handler.add_shell_parameter(
                        shell_parameter.prefix,
                        shell_parameter.parameter_name,
                        shell_parameter.parameter_value,
                        shell_parameter.value_quotation_marks)

        for global_shell_parameter in GlobalOgrShellParameter.get_active_parameters():
            job_handler.add_shell_parameter_object(global_shell_parameter)

        job_handler.add_webservice_url('0', webservice_url)
        job_handler.set_export_format_name(export_format_name)
        job_handler.set_source_srs(source_srs)
        job_handler.set_target_srs(target_srs)
        job_handler.set_simplify_parameter(simplify_parameter)

        # Log start
        log_handler.set_start_time()
        log_handler.set_client_ip(client_ip)
        log_handler.set_client_language(client_language)
        log_handler.set_client_user_agent(client_user_agent)
        log_handler.set_input_type('webservice')
        log_handler.set_export_format_name(
            job_handler.get_export_format_name())
        log_handler.set_source_srs(job_handler.get_source_srs())
        log_handler.set_target_srs(job_handler.get_target_srs())
        log_handler.set_simplify_parameter(
            job_handler.get_simplify_parameter())

        # Conversion start
        jobprocessing.initialize_conversion_job(job_identifier)
        jobprocessing.process_webservice_urls(job_identifier)

        jobprocessing.create_download_file(job_identifier)

        download_handler.add_download_item()
        download_handler.set_download_caption(download_name)

        # Conversion end
        log_handler.set_end_time()
        log_handler.set_download_file_size(
            download_handler.get_download_file_size() /
            1024)
        log_handler.set_has_download_file(
            download_handler.download_file_exists())
        log_handler.set_all_files_converted()
        log_handler.set_has_no_error()
        # Log end

        jobprocessing.cleanup()

        response_data = {}
        response_data['job_id'] = job_id
        response_data['successful'] = download_handler.download_file_exists()
        return HttpResponse(json.dumps(response_data), mimetype="text/plain")

    return HttpResponseServerError('Error: Request not valid.')
def process_file(job_identifier, file_id):
    job_handler = JobHandler(job_identifier)
    if job_handler.file_ready_for_process(
            file_id) and job_handler.filematch_try_process(file_id):
        time.sleep(0.001)
        if job_handler.filematch_processing_id(file_id) != file_id:
            # In this case two threads are processing the same file match =>
            # Race Condition!
            return
        matched_files = job_handler.get_matched_files(file_id)
        for matched_file in matched_files:
            job_handler.set_file_processed(matched_file.file_id)

        source_path = job_handler.get_upload_folder_path()
        destination_path = job_handler.get_output_folder_path()
        export_format_name = job_handler.get_export_format_name()
        source_srs = job_handler.get_source_srs()
        target_srs = job_handler.get_target_srs()
        simplify_parameter = job_handler.get_simplify_parameter()
        additional_arguments = job_handler.get_shell_parameters()
        extract_base_path = job_handler.get_extract_folder_path()
        if matched_files[0].file_match.is_archive:
            file_name = matched_files[0].matched_file_name
            if job_handler.get_file_count() > 1:
                # Creates a sub folder in the extract folder
                extract_path = os.path.join(extract_base_path,
                                            os.path.splitext(file_name)[0])
            else:
                extract_path = extract_base_path
            process_archive(job_identifier,
                            os.path.join(source_path, file_name),
                            extract_base_path, extract_path, destination_path,
                            export_format_name, source_srs, target_srs,
                            simplify_parameter, additional_arguments, 1)
        else:
            conversion.convert_files(job_identifier, source_path,
                                     matched_files, destination_path,
                                     export_format_name, source_srs,
                                     target_srs, simplify_parameter,
                                     additional_arguments)
def remove_file(job_identifier, job_id, file_id):
    job_handler = JobHandler(job_identifier)
    job_handler.remove_file(file_id)
Beispiel #16
0
def start_conversion_job(request, client_job_token):
    if request.method == 'POST':
        # Read POST values
        POST_dict = request.POST.dict()
        export_format_name = POST_dict['export_format'].strip()
        del POST_dict['export_format']
        source_srs = POST_dict['source_srs'].strip()
        del POST_dict['source_srs']
        target_srs = POST_dict['target_srs'].strip()
        del POST_dict['target_srs']
        simplify_parameter = POST_dict['simplify_parameter'].strip()
        del POST_dict['simplify_parameter']
        download_name = POST_dict['download_name'].strip()
        del POST_dict['download_name']
        if len(download_name) > 10:
            download_name = download_name[0:7] + '...'
        client_ip = get_client_ip(request)
        client_language = get_client_language(request)
        client_user_agent = get_client_user_agent(request)

        session_key = request.session.session_key
        job_identifier = jobidentification.get_new_job_identifier_by_client_job_token(
            session_key, client_job_token)
        job_id = job_identifier.job_id
        if job_id == '':
            return HttpResponseServerError('Error: Job Token is not valid.')

        job_handler = JobHandler(job_identifier)
        log_handler = LogHandler(job_identifier)
        download_handler = DownloadHandler(job_identifier)

        file_matcher = FileMatcher(POST_dict)
        for file_match in file_matcher.get_matches():
            job_handler.add_file_match(
                job_handler.get_upload_folder_path(),
                file_match.get_file_dict(),
                file_match.get_ogr_format_name(),
                file_match.is_archive(),
                file_match.is_valid())

        format_information = OgrFormat.get_format_information_by_name(
            export_format_name)
        if format_information is not None:
            job_handler.set_export_format_name(format_information.ogr_name)
            for shell_parameter in format_information.additional_parameters:
                if shell_parameter.use_for_writing:
                    job_handler.add_shell_parameter(
                        shell_parameter.prefix,
                        shell_parameter.parameter_name,
                        shell_parameter.parameter_value,
                        shell_parameter.value_quotation_marks)

        for global_shell_parameter in GlobalOgrShellParameter.get_active_parameters():
            job_handler.add_shell_parameter_object(global_shell_parameter)

        job_handler.set_export_format_name(export_format_name)
        job_handler.set_source_srs(source_srs)
        job_handler.set_target_srs(target_srs)
        job_handler.set_simplify_parameter(simplify_parameter)

        download_handler.set_download_caption(download_name)

        log_handler.set_start_time()
        log_handler.set_client_ip(client_ip)
        log_handler.set_client_language(client_language)
        log_handler.set_client_user_agent(client_user_agent)
        log_handler.set_input_type('files')
        log_handler.set_export_format_name(
            job_handler.get_export_format_name())
        log_handler.set_source_srs(job_handler.get_source_srs())
        log_handler.set_target_srs(job_handler.get_target_srs())
        log_handler.set_simplify_parameter(
            job_handler.get_simplify_parameter())

        jobprocessing.initialize_conversion_job(job_identifier)

        return HttpResponse('success')
    else:
        return redirect_to_main_page(request)
def process_folder(
        job_identifier,
        source_path,
        file_dict,
        destination_path,
        export_format_name,
        source_srs,
        target_srs,
        simplify_parameter,
        additional_arguments,
        archive_depth=1):
    job_handler = JobHandler(job_identifier)
    file_matcher = FileMatcher(file_dict)

    for file_match in file_matcher.get_matches():
        # Rename files
        for file_id, new_file_name in file_match.get_file_dict().items():
            original_file_name = file_matcher.get_original_file_name(file_id)
            if original_file_name != new_file_name:
                filemanager.rename_file(
                    source_path,
                    original_file_name,
                    new_file_name)

    for file_match in file_matcher.get_matches():
        job_handler.add_file_match(
            source_path,
            file_match.get_file_dict(),
            file_match.get_ogr_format_name(),
            file_match.is_archive(),
            file_match.is_valid())

    for file_match in file_matcher.get_matches():
        file_id = list(file_match.get_file_dict().keys())[0]
        if not job_handler.get_file_processed(file_id):
            matched_files = job_handler.get_matched_files(file_id)
            for matched_file in matched_files:
                job_handler.set_file_processed(matched_file.file_id)

        if matched_files[0].file_match.is_archive:
            # Process nested archive
            archive_file_name_without_extension = os.path.splitext(
                matched_files[0].file_name)[0]
            archive_path = os.path.join(
                source_path,
                matched_files[0].file_name)
            unpack_path = os.path.join(
                source_path,
                archive_file_name_without_extension)
            output_path = os.path.join(
                destination_path,
                archive_file_name_without_extension)
            process_archive(
                job_identifier,
                archive_path,
                unpack_path,
                unpack_path,
                output_path,
                export_format_name,
                source_srs,
                target_srs,
                simplify_parameter,
                additional_arguments,
                archive_depth + 1)
        else:
            conversion.convert_files(
                job_identifier,
                source_path,
                matched_files,
                destination_path,
                export_format_name,
                source_srs,
                target_srs,
                simplify_parameter,
                additional_arguments)
def process_file(job_identifier, file_id):
    job_handler = JobHandler(job_identifier)
    if job_handler.file_ready_for_process(
            file_id) and job_handler.filematch_try_process(file_id):
        time.sleep(0.001)
        if job_handler.filematch_processing_id(file_id) != file_id:
            # In this case two threads are processing the same file match =>
            # Race Condition!
            return
        matched_files = job_handler.get_matched_files(file_id)
        for matched_file in matched_files:
            job_handler.set_file_processed(matched_file.file_id)

        source_path = job_handler.get_upload_folder_path()
        destination_path = job_handler.get_output_folder_path()
        export_format_name = job_handler.get_export_format_name()
        source_srs = job_handler.get_source_srs()
        target_srs = job_handler.get_target_srs()
        simplify_parameter = job_handler.get_simplify_parameter()
        additional_arguments = job_handler.get_shell_parameters()
        extract_base_path = job_handler.get_extract_folder_path()
        if matched_files[0].file_match.is_archive:
            file_name = matched_files[0].matched_file_name
            if job_handler.get_file_count() > 1:
                # Creates a sub folder in the extract folder
                extract_path = os.path.join(
                    extract_base_path,
                    os.path.splitext(file_name)[0])
            else:
                extract_path = extract_base_path
            process_archive(
                job_identifier,
                os.path.join(
                    source_path,
                    file_name),
                extract_base_path,
                extract_path,
                destination_path,
                export_format_name,
                source_srs,
                target_srs,
                simplify_parameter,
                additional_arguments,
                1)
        else:
            conversion.convert_files(
                job_identifier,
                source_path,
                matched_files,
                destination_path,
                export_format_name,
                source_srs,
                target_srs,
                simplify_parameter,
                additional_arguments)
def remove_file(job_identifier, job_id, file_id):
    job_handler = JobHandler(job_identifier)
    job_handler.remove_file(file_id)