def ask_user_task_amount(df, num_sur_q): """Asks for user input to determine how many surveys were given to the participant in the study. Args: df: A cleaned DataFrame of the Qualtrics export. num_sur_q(int): Total number of questions asked in surveys. Returns: task_amount(int): The number of questions in each survey. """ poten_task_amts = [x for x in range(2, 30) if num_sur_q % x == 0] if len(poten_task_amts) > 1: print(f"Judging by the numbers, it looks like there are multiple ways" " in which these questions could be broken up. Please answer the" " question(s) below to continue to attempt to parse the data.\n") for poten_task_amt in poten_task_amts: ans = InputManager.get_yes_or_no(f"Where there {poten_task_amt} " "total tasks in your study?") if ans: task_amount = poten_task_amt return task_amount break elif len(poten_task_amts) == 1: ans = InputManager.get_yes_or_no( f"There were {poten_task_amts} in your" " study? (Y/n): ") if not ans: clean_getaway() task_amount = poten_task_amts[0] return task_amount else: clean_getaway()
def __init__(self, fnirs_file="./sampledata/samplefnirs/test_1.csv"): self.fnirs_file = fnirs_file self.df = pd.read_csv(fnirs_file, header=34) self.mark_col = self.df["Mark"].tolist() self.marks = [mark for mark in self.mark_col if mark > 0] self.mark_indexes = [indx for indx, mark in enumerate(self.mark_col) if mark > 0] self.valid_marks = self.validate_marks(self.marks) self.valid_spaceing = self.validate_spacing(self.mark_indexes) if self.valid_marks: print("Marking appears to be valid. Even number of marks found. \n") if self.valid_spaceing: print("Spacing between marks appears to be valid.\n") print(f"Mark values:\n{self.mark_counts}") print(f"Number of space warnings: {len(self.space_warnings)}. \n") self.onsets = self.get_onsets(self.mark_indexes) self.task_number = len(self.onsets) self.durations = self.get_durations(self.onsets) else: print("\033[1mWARNING:\033[0m\n" "Something appears to have gone wrong in the marking " "of this file. fNIRS marks need to be opened and closed. " "You can continue to attempt to parse the file, but the " "outcomes may not be correct. Make sure to check outputs " "for validity if you decide to continue. ") ans = InputManager.get_yes_or_no("Try anyway? (Y/n): ") if not ans: pass self.get_onsets()
def find_first_q(df): """Asks the user if the first question left over in the cleaned df is the first question of their survey. It also informs that user that the first question should be in the question asking for the participant's ID. Args: df: a (cleaned) DataFrame of the qualtrics data export. Returns: None""" try: first_q = df.iloc[0][0] except: return None ans = InputManager.get_yes_or_no( "\033[1m CHECKING FIRST QUESTION:" f"\n\n'{first_q}'\033[0m\n\n" "is the above the first survey question " "in your survey?: (Y/n) \n\n" "\033[1mNOTE:\033[0m This question should " "be asking for the experimenter to enter " "the participant's ID number.") if ans: return first_q else: clean_getaway()
def find_slice_prompt(df): """Finds the string value that the script will use to slice the survey data up by - the first question asked in the beginning of each survey. Args: df: A cleaned DataFrame from the Qualtrics export. Returns: slice_prompt(str): The string value of the first survey question in each survey.""" questions_list = df.iloc[0].tolist() num_of_q_per_task, num_of_tasks, num_prelim_qs = get_slice_prompt_index(df) print(f"There were {int(num_of_q_per_task)} questions per task.") print(f"There were {num_of_tasks} tasks total") print(f"The first {num_prelim_qs} were preliminary questions.") slice_prompt = questions_list[num_prelim_qs] ans = InputManager.get_yes_or_no( "\033[1m CHECKING FIRST SURVEY QUESTION:" f"\n\n'{slice_prompt}'\033[0m\n\n" "is the above the first survey question " "in your survey?: (Y/n) \n\n" "\033[1mNOTE:\033[0m This question should " "be asking for the experimenter to enter " "the task the participant just completed.") if not ans: clean_getaway() return slice_prompt
def get_survey_information(apiToken, dataCenter="ca1"): """Prints a list of the current survey names and corresponding Ids and asks the user which survey they want to recieve an export from. Args: apiToken(str): A string of characters that allows allows authentication when querying the Qualtrics API. dataCenter(str): Data center arguement for Qualtrics API. Returns: surveyID(str): A unique ID corresponding the to survey the user wants to retreieve survey responses for. Bool(False): If a surveyId isn't selected or found.""" baseUrl = f"https://{dataCenter}.qualtrics.com/API/v3/surveys" headers = { "x-api-token": apiToken, } response = requests.get(baseUrl, headers=headers) try: survey_info = json.loads(response.text)['result']['elements'] except KeyError: print(json.loads(response.text)) print("You are presenting an invalid API token to qualtrics. " "Please make sure that your API token exactly matches" "the token listed on the Qualtrics account and try again.") sys.exit() print("\nBelow are the surveys listed under your qualtrics account:\n") print("-" * 80) if len(survey_info) == 0: print("There are no surveys associated with your Qualtrics account.") print("Quitting this program.. .. ..") sys.exit() for i, survey in enumerate(survey_info): print(f"{i + 1}. - Survey Name: {survey['name']}") print(f"Survey ID: {survey['id']}") print("-" * 80) prompt = ( "Which survey would you like to export? \n(indicate the survey by typing the number listed next to the survey): " ) survey_response = InputManager.get_numerical_input(prompt, (len(survey_info))) surveyId = survey_info[survey_response - 1]['id'] if surveyId: prompt = "Would you like to save this as your default survey? \n(Answering yes will save the id to '/.ids/surveyid.txt')" ans = InputManager.get_yes_or_no(prompt) if ans: if not os.path.exists(f"{os.getcwd()}/.ids/"): os.mkdir(f"{os.getcwd()}/.ids/") with open(f"{os.getcwd()}/.ids/surveyid.txt", "w") as out_file: out_file.write(surveyId) return surveyId return False
def get_api_fpath(): """Allows user to manually import filepath that and contains their API token. Args: None Returns: api_fpath(str): location of the file containing user's API key. Bool (False): If user does not want to manually enter the filepath.""" ans = InputManager.get_yes_or_no( "Would you like to manually enter the filepath where your API token is located?: (Y/n): " ) if ans: fpath = InputManager.get_valid_fpath("Please enter a valid filepath: ") return fpath return False
def leave_any_out(df): """Asks the user if there is any section of the survey they would like to leave out of the parsing process. Args: df: A cleaned Qualtrics survey export. Returns: df: A cleaned Qualtrics export with the proper columns removed.""" ans = InputManager.get_yes_or_no( "Are there any columns you not like to" " include in the conditions files? (Y/n):") if ans: first_prompt = input("Please enter the first prompt for the section " "you wish to remove: ") how_many_after = int( input("How many columns do you wish to remove " " after the first prompt?: ")) headings_list = df.iloc[0].tolist() first_indx = headings_list.index(first_prompt) cols_to_remove = headings_list[first_indx:first_indx + how_many_after] df.drop(cols_to_remove, axis=1, inplace=True) return df