def bar_graph(connection: Connection, question_id: str, auth_user_id: str=None, email: str=None, limit: [int, None]=None, count_order: bool=False) -> dict: """ Get a list of the number of times each submission value appears. You must provide either an auth_user_id or e-mail address. :param connection: a SQLAlchemy Connection :param question_id: the UUID of the question :param auth_user_id: the UUID of the user :param email: the e-mail address of the user. :param limit: a limit on the number of results :param count_order: whether to order from largest count to smallest :return: a JSON dict containing the result [[values], [counts]] """ user_id = _get_user_id(connection, auth_user_id, email) allowable_types = {'text', 'integer', 'decimal', 'multiple_choice', 'date', 'time', 'location', 'facility'} question = question_select(connection, question_id) tcn = _get_type_constraint_name(allowable_types, question) # Assume that you only want to consider the non-other answers original_table, column_name = _table_and_column(tcn) table = original_table.join( question_table, original_table.c.question_id == question_table.c.question_id ).join(survey_table) conds = [question_table.c.question_id == question_id, survey_table.c.auth_user_id == user_id] column = get_column(original_table, column_name) column_query = select( [column, sqlcount(column)] ).select_from(table).group_by(column) ordering = desc(sqlcount(column)) if count_order else column ordered_query = column_query.order_by(ordering) result = connection.execute( ordered_query.where(and_(*conds)).limit(limit) ) result = _return_sql(connection, result, question.survey_id, user_id, question_id) bar_graph_result = [[_jsonify(connection, r[0], question_id), r[1]] for r in result] response = json_response( _return_sql(connection, bar_graph_result, question.survey_id, user_id, question_id)) response['query'] = 'bar_graph' return response
def time_series(connection: Connection, question_id: str, auth_user_id: str=None, email: str=None) -> dict: """ Get a list of submissions to the specified question over time. You must provide either an auth_user_id or e-mail address. :param connection: a SQLAlchemy Connection :param question_id: the UUID of the question :param auth_user_id: the UUID of the user :param email: the e-mail address of the user. :return: a JSON dict containing the result [[times], [values]] """ user_id = _get_user_id(connection, auth_user_id, email) allowable_types = {'text', 'integer', 'decimal', 'multiple_choice', 'date', 'time', 'location'} question = question_select(connection, question_id) tcn = _get_type_constraint_name(allowable_types, question) # Assume that you only want to consider the non-other answers original_table, column_name = _table_and_column(tcn) table = original_table.join( survey_table, original_table.c.survey_id == survey_table.c.survey_id ).join( submission_table, original_table.c.submission_id == submission_table.c.submission_id ) column = get_column(original_table, column_name) where_stmt = select( [column, submission_table.c.submission_time] ).select_from(table).where( original_table.c.question_id == question_id ).where( survey_table.c.auth_user_id == user_id ) result = _return_sql( connection, connection.execute(where_stmt.order_by('submission_time asc')), question.survey_id, auth_user_id, question_id) tsr = [[r.submission_time.isoformat(), _jsonify(connection, r[column_name], question_id)] for r in result] time_series_result = tsr response = json_response( _return_sql(connection, time_series_result, question.survey_id, user_id, question_id)) response['query'] = 'time_series' return response
def _scalar(connection: Connection, question_id: str, sql_function: GenericFunction, *, auth_user_id: str=None, email: str=None, is_type_exception: bool=False, allowable_types: set={'integer', 'decimal'}) -> Real: """ Get a scalar SQL-y value (max, mean, etc) across all submissions to a question. You must provide either an auth_user_id or e-mail address. :param connection: a SQLAlchemy Connection :param question_id: the UUID of the question :param sql_function: the SQL function to execute :param auth_user_id: the UUID of the user :param email: the e-mail address of the user :param is_type_exception: whether to look at the "other"/"don't know"/etc responses :return: the result of the SQL function :raise InvalidTypeForAggregationError: if the type constraint name is bad """ user_id = _get_user_id(connection, auth_user_id, email) question = question_select(connection, question_id) conds = [question_table.c.question_id == question_id, survey_table.c.auth_user_id == user_id] if is_type_exception: original_table = answer_table column_name = 'answer_text' else: tcn = _get_type_constraint_name(allowable_types, question) original_table, column_name = _table_and_column(tcn) table = original_table.join( question_table, original_table.c.question_id == question_table.c.question_id ).join(survey_table) if is_type_exception: conds.append(original_table.c.is_type_exception) column = get_column(original_table, column_name) result = connection.execute(select([sql_function(column)]).select_from( table).where(and_(*conds))).scalar() return _return_sql(connection, result, question.survey_id, user_id, question_id)
def _get_filtered_ids(connection: Connection, filters: list) -> Iterator: """ Given a list of filters like { 'question_id': <question_id>, '<type_constraint_name>': <value> }, yield the submission_id values that pass the filters :param connection: a SQLAlchemy Connection :param filters: a list of filters consisting of answers to questions """ for filter_pair in filters: question_id = filter_pair.pop('question_id') type_constraint = list(filter_pair.keys())[0] # TODO: better way...? value = filter_pair[type_constraint] answers = connection.execute( select([answer_table]).where( answer_table.c.question_id == question_id ).where(get_column(answer_table, type_constraint) == value)) for answer in answers: yield answer.submission_id
def testGetColumn(self): self.assertIs(db.get_column(answer_table, 'answer_integer'), answer_table.c.answer_integer) self.assertRaises(db.NoSuchColumnError, db.get_column, answer_table, 'garbage')