def score_answers(self, question: Question, answers: List[Answer]) -> float: """ Return a score between 0.0 and 1.0 indicating how similar the answers in <answers> are. This score is calculated by finding the similarity of every combination of two answers in <answers> and taking the average of all of these similarity scores. If there is only one answer in <answers> and it is valid return 1.0 since a single answer is always identical to itself. Raise InvalidAnswerError if any answer in <answers> is not a valid answer to <question>. === Precondition === len(answers) > 0 """ for ans in answers: if question.validate_answer(ans) is False: raise InvalidAnswerError if len(answers) == 1: return 1.0 else: count = 0.0 count_num = 0 for index_a, v1 in enumerate(answers): for index_b in range(index_a + 1, len(answers)): count += question.get_similarity(v1, answers[index_b]) count_num += 1 return count / count_num # yield [v1, answers[index_b]]
def score_answers(self, question: Question, answers: List[Answer]) -> float: """ Return a score between 0.0 and 1.0 indicating how similar the answers in <answers> are. This score is calculated by finding the similarity of every combination of two answers in <answers> and taking the average of all of these similarity scores. If there is only one answer in <answers> and it is valid return 1.0 since a single answer is always identical to itself. Raise InvalidAnswerError if any answer in <answers> is not a valid answer to <question>. === Precondition === len(answers) > 0 """ for answer in answers: if not question.validate_answer(answer): raise InvalidAnswerError if len(answers) == 1: return 1.0 score = 0 permute = 0 for ans in answers: i = answers.index(ans) for other in answers: j = answers.index(other) if i < j: score += question.get_similarity(ans, other) permute += 1 return score / permute
def test_student_get_answer() -> None: student = Student(1, "Jane") q1 = Question(1, "How are you") a1 = Answer("good") q2 = Question(2, "hello") a2 = Answer("hi") assert student.get_answer(q1) is None student.set_answer(q1, a1) student.set_answer(q2, a2) assert student.get_answer(q1) == a1 assert student.get_answer(q2) == a2
def test_student_set_answer(): student = Student(1, "Jane") q1 = Question(1, "How are you") a1 = Answer("good") q2 = Question(2, "hello") a2 = Answer("hi") a3 = Answer("test") student.set_answer(q1, a1) student.set_answer(q2, a2) student.set_answer(q2, a3) assert student.get_answer(q1) == a1 assert student.get_answer(q2) == a3
def has_answer(self, question: Question) -> bool: """ Return True iff this student has an answer for a question with the same id as <question> and that answer is a valid answer for <question>. """ return question.id in self._answers and \ question.validate_answer(self._answers[question.id])
def score_answers(self, question: Question, answers: List[Answer]) -> float: """ Return a score between 0.0 and 1.0 indicating how similar the answers in <answers> are. This score is calculated by finding the similarity of every combination of two answers in <answers>, finding the average of all of these similarity scores, and then subtracting this average from 1.0 If there is only one answer in <answers> and it is valid, return 0.0 since a single answer is never identical to itself. Raise InvalidAnswerError if any answer in <answers> is not a valid answer to <question>. === Precondition === len(answers) > 0 """ acc1 = 0 acc2 = 0 if len(answers) == 1: return 0.0 valid = [] for answer in answers: valid.append(answer.is_valid(question)) if False in valid: raise InvalidAnswerError for answer in answers: lst = answers[:] lst.remove(answer) for sub in lst: acc1 += question.get_similarity(answer, sub) acc2 += 1 return 1.0 - acc1 / acc2
def score_answers(self, question: Question, answers: List[Answer]) -> float: """ Return a score between 0.0 and 1.0 indicating how similar the answers in <answers> are. This score is calculated by finding the similarity of every combination of two answers in <answers> and taking the average of all of these similarity scores. If there is only one answer in <answers> and it is valid return 1.0 since a single answer is always identical to itself. Raise InvalidAnswerError if any answer in <answers> is not a valid answer to <question>. === Precondition === len(answers) > 0 """ super().check_answers(question, answers) if len(answers) == 1: return 0.0 count = 0.0 score = 0.0 for [a, b] in combinations(answers): count += 1.0 score += question.get_similarity(a, b) return score / count
def sentiment(q): sent = Question(survey, q) g = sent.summary.reset_index().melt(['index']) return alt.Chart(g).mark_bar().encode( x={ "field": "value", "stack": "normalize", "type": "quantitative", "axis": { "title": None, "format": "%", }, "sort": "descending" }, y={ "field": "index", "type": "nominal", "axis": { "title": None, "labelLimit": 350, }, }, color={ "field": 'variable', "type": "nominal", "legend": { "title": None }, "scale": { "scheme": "pinkyellowgreen" }, "sort": "descending" }, )
def score_answers(self, question: Question, answers: List[Answer]) -> float: """ Return a score between 0.0 and 1.0 indicating how similar the answers in <answers> are. This score is calculated by finding the similarity of every combination of two answers in <answers> and taking the average of all of these similarity scores. If there is only one answer in <answers> and it is valid return 1.0 since a single answer is always identical to itself. Raise InvalidAnswerError if any answer in <answers> is not a valid answer to <question>. === Precondition === len(answers) > 0 """ if len(answers) == 1: if answers[0].is_valid(question): return 1.0 else: raise InvalidAnswerError sigma = 0.0 combos = 0.0 for i in range(len(answers) - 1): for j in range(i + 1, len(answers)): first = answers[i] second = answers[j] if first.is_valid(question) and second.is_valid(question): sigma += question.get_similarity(first, second) combos += 1.0 else: raise InvalidAnswerError if combos == 0.0: return 0.0 else: return sigma/combos
def score_answers(self, question: Question, answers: List[Answer]) -> float: """ Return score between 0.0 and 1.0 indicating the quality of the group of <answers> to the question <question>. The score returned will be zero iff there are any unique answers in <answers> and will be 1.0 otherwise. An answer is not unique if there is at least one other answer in <answers> with identical content. Raise InvalidAnswerError if any answer in <answers> is not a valid answer to <question>. === Precondition === len(answers) > 0 """ for ans in answers: if question.validate_answer(ans) is False: raise InvalidAnswerError if len(answers) == 1: return 0.0 else: new_list = [] for ans in answers: if ans.content not in new_list: new_list.append(ans.content) if len(new_list) > 1: return 0.0 else: return 1.0
def score_answers(self, question: Question, answers: List[Answer]) -> float: """ Return a score between 0.0 and 1.0 indicating how similar the answers in <answers> are. This score is calculated by finding the similarity of every combination of two answers in <answers> and taking the average of all of these similarity scores. If there is only one answer in <answers> and it is valid return 1.0 since a single answer is always identical to itself. Raise InvalidAnswerError if any answer in <answers> is not a valid answer to <question>. === Precondition === len(answers) > 0 """ # TODO: complete the body of this method if len(answers) == 1 and answers[0].is_valid(question): return 1.0 for a in answers: if not a.is_valid(question): raise InvalidAnswerError score = [] for a in list(self._combinations(answers)): score.append(question.get_similarity(a[0], a[1])) return sum(score) / len(score)
def score_answers(self, question: Question, answers: List[Answer]) -> float: """ Return a score between 0.0 and 1.0 indicating how similar the answers in <answers> are. This score is calculated by finding the similarity of every combination of two answers in <answers> and taking the average of all of these similarity scores. If there is only one answer in <answers> and it is valid return 1.0 since a single answer is always identical to itself. Raise InvalidAnswerError if any answer in <answers> is not a valid answer to <question>. === Precondition === len(answers) > 0 """ invalid_answer(question, answers) if only_one_valid(question, answers): return 1.0 sim_scores = 0 num = 0 for i in range(len(answers) - 1): for j in range(i + 1, len(answers)): sim_scores += question.get_similarity(answers[i], answers[j]) num += 1 return sim_scores / num
def score_answers(self, question: Question, answers: List[Answer]) -> float: """ Return a score between 0.0 and 1.0 indicating how similar the answers in <answers> are. This score is calculated by finding the similarity of every combination of two answers in <answers> and taking the average of all of these similarity scores. If there is only one answer in <answers> and it is valid return 1.0 since a single answer is always identical to itself. Raise InvalidAnswerError if any answer in <answers> is not a valid answer to <question>. === Precondition === len(answers) > 0 """ if len(answers) == 1: if answers[0] is None: raise InvalidAnswerError if answers[0].is_valid(question): return 1.0 else: raise InvalidAnswerError num_pairs = 0 score = 0.0 for i in range(len(answers)): if not answers[i].is_valid(question): raise InvalidAnswerError for j in range(i + 1, len(answers)): num_pairs += 1 score += question.get_similarity(answers[i], answers[j]) return score / num_pairs
def cost(): q = Question(survey, 56) summ = q.summary.reset_index() summ["test"] = ["Affordability"] * len(summ) return alt.Chart(summ).mark_bar().encode( x={ "field": "Valid Percent", "type": "quantitative", "stack": "normalize", "axis": { "title": None }, }, y={ "field": "test", "type": "nominal", "axis": { "title": None }, }, color={ "field": 'index', "type": "nominal", "legend": { "title": None }, "scale": { "scheme": "pinkyellowgreen" }, "sort": "descending" }, )
def score_answers(self, question: Question, answers: List[Answer]) -> float: """ Return a score between 0.0 and 1.0 indicating how similar the answers in <answers> are. This score is calculated by finding the similarity of every combination of two answers in <answers>, finding the average of all of these similarity scores, and then subtracting this average from 1.0 If there is only one answer in <answers> and it is valid, return 0.0 since a single answer is never identical to itself. Raise InvalidAnswerError if any answer in <answers> is not a valid answer to <question>. === Precondition === len(answers) > 0 """ for answer in answers: if not answer.is_valid(question): raise InvalidAnswerError if len(answers) == 1: return 0.0 lst_of_similarity = [] for opt1 in answers: index = answers.index(opt1) for opt2 in answers[index + 1:]: # after the opt index, check the end similarity = question.get_similarity(opt1, opt2) lst_of_similarity.append(similarity) average = sum(lst_of_similarity) / len(lst_of_similarity) # average value return float(1.0 - average)
def set_answer(self, question: Question, answer: Answer) -> None: """ Record this student's answer <answer> to the question <question>. Do nothing if answer is not valid for this question. """ # Validate answer for question if question.validate_answer(answer): self._answers[question.id] = answer
def test_student_get_answer() -> None: """A test for get_answer() in class Student.""" s = Student(123, 'Mike') ans = Answer(False) que = Question(2, 'F') s.set_answer(que, ans) test_ans = s.get_answer(que) assert ans == test_ans
def invalid_answer(question: Question, answers: List[Answer]) -> None: """ Raise InvalidAnswerError if any answer in <answers> is not a valid answer to <question>. """ for answer in answers: if not question.validate_answer(answer): raise InvalidAnswerError
def has_answer(self, question: Question) -> bool: """ Return True iff this student has an answer for a question with the same id as <question> and that answer is a valid answer for <question>. """ # TODO: complete the body of this method if question.id in self._responses: return question.validate_answer(self._responses[question.id][1]) return False
def has_answer(self, question: Question) -> bool: """ Return True iff this student has an answer for a question with the same id as <question> and that answer is a valid answer for <question>. """ for answer in self._answers: if answer[0].id == question.id \ and question.validate_answer(answer[1]): return True return False
def check_answers(self, question: Question, answers: List[Answer]) -> None: """ Takes a question and a list of answers, and check if they're all valid Raise InvalidAnswerError if any answer in <answers> is not a valid answer to <question>. """ for answer in answers: if not question.validate_answer(answer): raise InvalidAnswerError
def has_answer(self, question: Question) -> bool: """ Return True iff this student has an answer for a question with the same id as <question> and that answer is a valid answer for <question>. """ # TODO: complete the body of this method if self._ans.get(question): answer = self._ans.get(question) return question.validate_answer(answer) return False
def score_answers(self, question: Question, answers: List[Answer]) -> float: """ Return a score between 0.0 and 1.0 indicating how similar the answers in <answers> are. This score is calculated by finding the similarity of every combination of two answers in <answers> and taking the average of all of these similarity scores. # TODO Do we count answer pairs of the same answer with itself? would # this make a difference? If there is only one answer in <answers> and it is valid return 1.0 since a single answer is always identical to itself. Raise InvalidAnswerError if any answer in <answers> is not a valid answer to <question>. === Precondition === len(answers) > 0 """ # Single answer case if len(answers) == 1: if answers[0].is_valid(question): return 1.0 else: raise InvalidAnswerError # Check if any answers are not valid for answer in answers: if answer is None or not answer.is_valid(question): raise InvalidAnswerError # Multiple answers case comparison_count = 0 total_similarity = 0 for i in range(len(answers)): for j in range(i + 1, len(answers)): comparison_count += 1 total_similarity += question.get_similarity( answers[i], answers[j]) return total_similarity / comparison_count
def score_answers(self, question: Question, answers: List[Answer]) -> float: """ Return a score between 0.0 and 1.0 indicating how similar the answers in <answers> are. This score is calculated by finding the similarity of every combination of two answers in <answers> and taking the average of all of these similarity scores. If there is only one answer in <answers> and it is valid return 1.0 since a single answer is always identical to itself. Raise InvalidAnswerError if any answer in <answers> is not a valid answer to <question>. === Precondition === len(answers) > 0 """ # TODO: complete the body of this method #check answers are valid for ans in answers: if not ans.is_valid(question): raise InvalidAnswerError if len(answers) == 1: return 1 scores = [] # answers=[1,2,3,4,5,6] for i in range(len(answers) - 1): for j in range(i + 1, len(answers)): # print(i, j) similarity = question.get_similarity(answers[i], answers[j]) scores.append(similarity) return sum(scores) / len(scores)
def score_answers(self, question: Question, answers: List[Answer]) -> float: """ Return a score between 0.0 and 1.0 indicating how similar the answers in <answers> are. This score is calculated by finding the similarity of every combination of two answers in <answers> and taking the average of all of these similarity scores. If there is only one answer in <answers> and it is valid return 1.0 since a single answer is always identical to itself. Raise InvalidAnswerError if any answer in <answers> is not a valid answer to <question>. === Precondition === len(answers) > 0 """ for ans in answers: # checking if any answer in not valid if not ans.is_valid(question): raise InvalidAnswerError if len(answers) == 1: return 1.0 lst_score = [] for opt in answers: # looping through to get the combination ind = answers.index(opt) for other_opt in answers[ind + 1:]: # to not include the current option # checking for indexing errors similarity = question.get_similarity(opt, other_opt) lst_score.append(similarity) # similarity of the two answers average = sum(lst_score) / len(lst_score) # sum of all the score divided by number of scores return average
import altair as alt import pandas as pd from survey import df, attended, Question, survey memb_recv = Question(survey, 47) memb_recv_ans = [l['answer'] for l in memb_recv.metadata['answers']['0']] out = pd.DataFrame() for ans in memb_recv_ans: summary = Question(survey, 121, df[memb_recv.metadata['question']] == ans).summary summary[memb_recv.metadata['question']] = [ans] * len(summary) out = out.append(summary) # tidy data out = out.reset_index().melt( ["When did you receive your membership?", "index"]) def transfers_value(): return alt.Chart(out).mark_bar().encode( x={ "field": "index", "type": "nominal", "axis": { "title": None } }, y={ "field": "value",
def only_one_valid(question: Question, answers: List[Answer]) -> bool: """ Evaluate if there is only one answer in <answers> and it is valid. """ return len(answers) == 1 and question.validate_answer(answers[0])
def test_student_has_answer() -> None: """A test for has_answer() in class Student.""" s = Student(123, 'Mike') que = Question(2, 'F') assert s.has_answer(que) is False