def get_sent_submissions(survey_dict): """Reads the database of processed submissions if anys.""" file_path = os.path.join(survey_dict['survey_path'], 'submissions.csv') if os.path.exists(file_path): # Exausting it in case we need to re-iterate it: submission_list = list(database.read_csv_file(file_path)) logger.info( 'Found submission file with: `%s` rows', len(submission_list)) submissions = get_submissions_dict(submission_list) else: submissions = {} return submissions
def process(url, name): """Main process to translate the given URL.""" survey_dict = query.get_survey_dict(url) map_path = os.path.join(survey_dict['survey_path'], 'limesurvey_map.csv') survey_map = database.read_csv_file(map_path) untranslated_path = os.path.join(survey_dict['survey_path'], name) untranslated_rows = database.read_csv_file(untranslated_path) untranslated_list = list(untranslated_rows) untranslated_header = untranslated_list[0] # Determine index when the submission data starts: data_index = get_data_index(untranslated_header) translated_map = get_translated_map(survey_map, untranslated_header) # Prepare the translated output translated_header = get_translated_header( untranslated_header, translated_map, data_index) translation_list = [translated_header] for row in filter(None, untranslated_list[1:]): translated_row = translate_row(row, translated_map, data_index) assert len(translated_header) == len(translated_row), 'Missmatch row count' translation_list.append(translated_row) logger.info('Translated: %s', len(translation_list) - 1) translated_path = os.path.join(survey_dict['survey_path'], 'translated.csv') database.save_csv_file(translated_path, translation_list) return True
def process(url, name, dry=False): survey_dict = query.get_survey_dict(url, with_translations=False) available_languages = query.get_language_list( survey_dict['form_tree'].tree) export_path = get_export_csv_path(survey_dict['survey_path'], name) if (not export_path) or (not os.path.exists(export_path)): answers_name = export_path if export_path else survey_dict['survey_path'] logger.error('Missing exported survey answers: `%s`', answers_name) raise ValueError('Cannot submit survey.') survey_dict['filename_prefix'] = ( export_path.rsplit('/', 1)[-1].replace('.csv', '')) submission_list = list(database.read_csv_file(export_path)) row_map = translator.update_key_map(submission_list[0]) sent_submissions = get_sent_submissions(survey_dict) submission_queue = get_submission_queue( submission_list, sent_submissions.keys(), survey_dict['filename_prefix']) for i, row in enumerate(submission_queue): submission_id = row[0] language = get_row_language(row, available_languages) locale_url = query.clean_url(url, {'surveylanguage': language}) logger.debug('Processing submission: `%s`', submission_id) partial_payload = translator.prepare_payload(row, row_map) if validator.is_valid_payload(partial_payload, survey_dict): payload, pre_submission = complete_payload(locale_url, partial_payload) if dry: logger.info('Dry run. Valid row: `%s`.', submission_id) continue else: # Send submissions success_response = send_submission( locale_url, payload, pre_submission, submission_id, survey_dict) if not success_response: # Failed submission: logger.error('Failed to send: `%s`', submission_id) continue submission_row = get_submission_row( submission_id, survey_dict['filename_prefix'], success_response) sent_submissions[submission_id] = submission_row logger.info('Submission sent: `%s`', success_response) # Update with database with processed submissions: return save_sent_submissions(sent_submissions, survey_dict)