def main(dry_run): dry_run_postfix = "" if dry_run: dry_run_postfix = ( "(this was a dry run, so no changes were actually made to the survey)" ) logger = get_logger() survey_client = SurveyDefinitionsClient(survey_id=conf.CURRENT_SURVEY_ID) survey = survey_client.get_survey()["result"] questions = survey["Questions"] updated_questions = list() for k, v in questions.items(): updated_question = False export_tag = v["DataExportTag"] question_text = v["QuestionText"] # add t1 question scores and graphs if export_tag in conf.current_delphi_round_question_config: survey_question = SurveyQuestion( v, conf.current_delphi_round_question_config[export_tag] ) survey_question.inject_previous_round_delphi_score() # graphs # add question JS existing_question_js = v.get("QuestionJS") if not existing_question_js: existing_question_js = q_js.EMPTY_JS split_js = q_js.js_add_on_ready_re.split(existing_question_js, maxsplit=1) assert ( n := len(split_js) ) == 2, f"Unexpected split of existing_question_js ({n}): {split_js}" t1_tag = conf.this_task_to_previous_task_mapping[export_tag] v["QuestionJS"] = ( split_js[0] + "Qualtrics.SurveyEngine.addOnReady(function()\n{" + get_graph_js_for_qualtrics(t1_tag) + split_js[1] ) # add graph container container_id = f"highcharts-{t1_tag}" replacements_1 = conf.chart_placeholder_re.subn( HIGHCHARTS_CONTAINER.format(container_id), question_text ) total_replacements = replacements_1[1] assert ( total_replacements == 1 ), f"Unexpected number of replacements ({total_replacements}) in question_text: {question_text}" replacements = replacements_1 v["QuestionText"] = replacements[0] updated_question = True if updated_question: updated_questions.append(export_tag) if not dry_run: survey_client.update_question(question_id=k, data=v) print( f"Updated {len(updated_questions)} questions: {updated_questions} {dry_run_postfix}" )
def main(dry_run=False): dry_run_postfix = "" if dry_run: dry_run_postfix = ( "(this was a dry run, so no changes were actually made to the survey)" ) survey_client = SurveyDefinitionsClient(survey_id=SURVEY_ID) survey = survey_client.get_survey()["result"] questions = survey["Questions"] # # adjust choice order # adjusted_order_questions = adjust_choices_order( # questions=questions, # mapping=choices_mapping, # question_target_template=mc_question_target, # number_of_choices=7, # ) # if not dry_run: # for k, v in adjusted_order_questions.items(): # survey_client.update_question(question_id=k, data=v) # print( # f"Choice order of {len(adjusted_order_questions)} questions was updated {dry_run_postfix}" # ) # questions.update(adjusted_order_questions) # # logger.info("Questions", extra={"questions": questions}) # replace modal prompt modal_link_re = regex.compile( '(<p><span class="info-icon">ⓘ</span> <a href="#modal-(\d+)" rel="modal:open">' "Read more about this statement</a>.</p>){e<=3}", # allow up to 3 errors regex.ENHANCEMATCH, ) target_link = '<p><span class="info-icon">ⓘ</span> <a href="#modal-{}" rel="modal:open">Read more about this statement</a></p>' updated_questions_counter = 0 updated_questions_tags = list() for k, v in questions.items(): question_text = v["QuestionText"] tag = v["DataExportTag"] if (m := modal_link_re.search(question_text)): v["QuestionText"] = question_text.replace( m.group(1), target_link.format(m.group(2))) logger.debug( "Question text after replacement", extra={"question text": v["QuestionText"]}, ) updated_questions_counter += 1 updated_questions_tags.append(tag) if not dry_run: survey_client.update_question(question_id=k, data=v)
def main(): survey_client = SurveyDefinitionsClient(survey_id=SURVEY_ID) survey = survey_client.get_survey()["result"] questions = survey["Questions"] for k, v in questions.items(): question_text = v["QuestionText"] updated_question = None if IMPORTANT_QUESTION_RE in question_text: updated_question = parse_important_question(v) elif (EASY_QUESTION_RE_1 in question_text) or (EASY_QUESTION_RE_2 in question_text): updated_question = parse_easy_question(v) else: print(f"Skipped question: {question_text}") if updated_question: survey_client.update_question(question_id=k, data=updated_question)
class SurveyEditor: """ Use this class for minor updates to manually-created surveys """ def __init__(self, survey_id, account_name="cambridge"): self.logger = get_logger() self.survey_client = SurveyDefinitionsClient( qualtrics_account_name=account_name, survey_id=survey_id) self.survey_definition = self.survey_client.get_survey()["result"] self.questions = self.survey_definition["Questions"] self.updated_questions = dict() def update_questions(self): for k, v in self.updated_questions.items(): self.survey_client.update_question(question_id=k, data=v) print( f"Updated {len(self.updated_questions)} questions: {list(self.updated_questions.keys())}" )
def main(): logger = get_logger() survey_client = SurveyDefinitionsClient(survey_id=SURVEY_ID) survey = survey_client.get_survey()["result"] questions = survey["Questions"] t2_to_t1_extended_mapping = get_extended_mapping() for k, v in questions.items(): export_tag = v["DataExportTag"] question_text = v["QuestionText"] if export_tag in t2_to_t1_extended_mapping: if (m := PROMPT_QUESTION_RE.search(question_text)) or ( m := SLIDER_QUESTION_RE.search(question_text)): logger.debug( "Expected substitution", extra={ "before": m.group("question"), "after": t2_to_t1_extended_mapping[export_tag], }, ) v["QuestionText"] = question_text.replace( m.group("question"), t2_to_t1_extended_mapping[export_tag]) survey_client.update_question(question_id=k, data=v)
class SurveyManager(CsvImporter): """ Manages the process of creating/updating surveys from scratch """ def __init__(self, survey_id=None, survey_name=None, input_dataset=None): super().__init__(csvfile_path=input_dataset) self.survey_client = SurveyDefinitionsClient(survey_id=survey_id) if survey_id is None: if survey_name == "Test survey": from random import randrange survey_name = f"Test survey {randrange(99999)}" self.survey_client.create_survey(survey_name) print(f"Created new survey: {survey_name}") self.survey = self.survey_client.get_survey()["result"] # pprint(self.survey) self.groups = dict() self.questions = dict() # rendered questions indexed by export tag self.thrash_block_id = None self.blocks_to_update = dict() self.blocks_to_add = dict() self.blocks_to_delete = dict() self.blocks_not_to_touch = dict() self.questions_to_update = dict() self.questions_to_add = dict() self.questions_to_delete = dict() self.questions_not_to_touch = dict() # region question methods def add_questions(self): responses = dict() for k, q in self.questions_to_add.items(): responses[k] = self.survey_client.create_question(data=q) return responses def update_questions(self): responses = dict() for k, q in self.questions_to_update.items(): responses[k] = self.survey_client.update_question(q["QuestionID"], data=q) return responses def delete_questions(self): responses = dict() for k, q in self.questions_to_delete.items(): responses[k] = self.survey_client.delete_question(q["QuestionID"]) return responses # endregion @staticmethod def covert_key_from_id_to_tag(questions): return {v["DataExportTag"]: v for _, v in questions.items()} @staticmethod def is_question_text_identical(q1, q2): return q1["QuestionText"] == q2["QuestionText"]
def main(dry_run): dry_run_postfix = "" if dry_run: dry_run_postfix = ( "(this was a dry run, so no changes were actually made to the survey)" ) logger = get_logger() survey_client = SurveyDefinitionsClient(survey_id=SURVEY_ID) survey = survey_client.get_survey()["result"] questions = survey["Questions"] updated_questions = list() for k, v in questions.items(): updated_question = False export_tag = v["DataExportTag"] question_text = v["QuestionText"] # replace consensus modals by tooltip no_consensus_replacements = no_consensus_re.subn( no_consensus_target, question_text) consensus_reached_replacements = consensus_reached_re.subn( consensus_reached_target, question_text) total_replacements = (no_consensus_replacements[1] + consensus_reached_replacements[1]) if total_replacements > 0: if no_consensus_replacements[1] > 0: div_name = "Consensus failed" question_text = no_consensus_replacements[0] elif consensus_reached_replacements[1] > 0: div_name = "Consensus reached" question_text = consensus_reached_replacements[0] logger.info(f"{div_name} div replaced in question {export_tag}", extra={}) updated_question = True # add t1 question scores and graphs if export_tag in t2_to_t1_mapping: # question scores if (m := previous_score_re.search(question_text)): question_text = question_text.replace( m.group("question"), t2_to_t1_mapping[export_tag]) else: logger.error( f"No match for previous score regular expression found in question {export_tag}", extra={"question_text": question_text}, ) # graphs # add question JS existing_question_js = v.get("QuestionJS") if not existing_question_js: existing_question_js = QUALTRICS_EMPTY_JS split_js = js_add_on_ready_re.split(existing_question_js, maxsplit=1) assert ( n := len(split_js) ) == 2, f"Unexpected split of existing_question_js ({n}): {split_js}" t1_tag = t2_to_t1_mapping[export_tag] v["QuestionJS"] = ( split_js[0] + "Qualtrics.SurveyEngine.addOnReady(function()\n{" + get_graph_js_for_qualtrics(t1_tag) + split_js[1]) # add graph container container_id = f"highcharts-{t1_tag}" replacements_1 = highcharts_placeholder_re_1.subn( HIGHCHARTS_CONTAINER.format(container_id), question_text) replacements_2 = highcharts_placeholder_re_2.subn( HIGHCHARTS_CONTAINER.format(container_id), question_text) total_replacements = replacements_1[1] + replacements_2[1] assert ( total_replacements == 1 ), f"Unexpected number of replacements ({total_replacements}) in question_text: {question_text}" replacements = replacements_1 if replacements_2[1] > 0: replacements = replacements_2 v["QuestionText"] = replacements[0] updated_question = True if updated_question: updated_questions.append(export_tag) if not dry_run: survey_client.update_question(question_id=k, data=v)