def group_random_group_size(responses, grpsize): """ group responses using randomization approach """ # use itertools to chunk the students into groups iterable = iter(responses) groups = list(iter(lambda: list(itertools.islice(iterable, grpsize)), [])) # deal with the last, potentially partial group last_group_index = len(groups) - 1 if len(groups[last_group_index]) < grpsize: # distribute them throughout the other groups logging.info( "Partial group identified; distributing across other groups.") lastgroup = groups[last_group_index] outliers = copy.deepcopy(lastgroup) groups.remove(lastgroup) while outliers: for group in groups: if outliers: group.append(outliers[0]) outliers = outliers[1:] else: break # scoring and return scores, ave = [], 0 scores, ave = score_groups(groups) logging.info("scores: " + str(scores)) logging.info("average: " + str(ave)) return groups
def group_random_num_group(responses, numgrp): """ group responses using randomization approach """ # number of students placed into a group stunum = 0 iterable = iter(responses) # number of students in each group (without overflow) grpsize = int(len(responses) / numgrp) groups = list() for i in range(0, numgrp): group = list() while len(group) is not grpsize and stunum < len(responses): group.append(next(iterable)) stunum = stunum + 1 groups.append(group) # deal with the last remaining students if len(responses) % stunum is not 0: logging.info( "Overflow students identified; distributing into groups.") for x in range(0, len(responses) % stunum): groups[x].append(next(iterable)) stunum = stunum + 1 # scoring and return scores, ave = [], 0 scores, ave = score_groups(groups) logging.info("scores: " + str(scores)) logging.info("average: " + str(ave)) return groups
def rebalance(student_groups, scores, ave): """rebalances the groups of students""" # average score of the groups ave = int(ave // len(scores)) # print("average score: "+str(ave)) # print("Threshold: "+str(int(ave*(2/3)))) if int(ave * (2 / 3)) - 1 in scores: # print("Students not well distributed") # stores the student with the highest value in the highest valued group temp = max(student_groups[scores.index(max(scores))]) # stores the student with the lowest value in the lowest valued grouop lowest = min(student_groups[scores.index(min(scores))]) # switches the highest and lowest valued students student_groups[scores.index(max(scores))].insert( scores.index(max(scores)), lowest) student_groups[scores.index(max(scores))].remove(temp) student_groups[scores.index(min(scores))].insert( scores.index(min(scores)), temp) student_groups[scores.index(min(scores))].remove(lowest) # reevaluates scores score_groups(student_groups)
def group_rrobin_group_size(responses, grpsize): """ group responses using round robin approach """ # setup target groups groups = list() # // integer div numgrps = len(responses) // grpsize logging.info("target groups: %d", numgrps) for _ in range(numgrps): groups.append(list()) # setup cyclical group target indices = list(range(0, numgrps)) target_group = itertools.cycle(indices) # randomize the order in which the columns will be drained columns = list() for col in range(1, len(responses[0])): columns.append(col) random.shuffle(columns) logging.info("column priority: %d", columns) # iterate through the response columns for col in columns: for response in responses: if response[col] is True: groups[target_group.__next__()].append(response) responses.remove(response) # disperse anyone not already grouped while responses: groups[target_group.__next__()].append(responses[0]) responses.remove(responses[0]) # scoring and return scores, ave = [], 0 scores, ave = group_scoring.score_groups(groups) logging.info("scores: %d", scores) logging.info("average: %d", ave) return groups