Esempio n. 1
0
    def main(self) -> None:
        """
        High-level process method for class. Pulls Canvas data, sends data, and logs activity in the database.

        :return: None
        :rtype: None
        """
        # Fetch data from Canvas API and store as submission records in the database
        sub_dicts: list[dict[str, Any]] = self.get_sub_dicts_for_exam()
        if len(sub_dicts) > 0:
            self.create_sub_records(sub_dicts)

        # Find old and new submissions for exam to send to M-Pathways
        sub_to_transmit_qs: QuerySet = self.exam.submissions.filter(
            transmitted=False)
        subs_to_transmit: list[Submission] = list(sub_to_transmit_qs.all())

        # Identify old submissions for debugging purposes
        redo_subs: list[Submission] = list(
            sub_to_transmit_qs.filter(
                graded_timestamp__lt=self.sub_time_filter))
        if len(redo_subs) > 0:
            LOGGER.info(
                f'Will try to re-send {len(redo_subs)} previously un-transmitted submissions'
            )
            LOGGER.debug(f'Previously un-transmitted submissions: {redo_subs}')

        # Identify and separate submissions to send with duplicate uniqnames
        freq_qs: QuerySet = sub_to_transmit_qs.values(
            'student_uniqname').annotate(frequency=Count('student_uniqname'))
        dup_uniqnames: list[str] = [
            uniqname_dict['student_uniqname']
            for uniqname_dict in list(freq_qs)
            if uniqname_dict['frequency'] > 1
        ]
        dup_uniqname_subs: list[Submission] = []
        regular_subs: list[Submission] = []
        for sub_to_transmit in subs_to_transmit:
            if sub_to_transmit.student_uniqname in dup_uniqnames:
                dup_uniqname_subs.append(sub_to_transmit)
            else:
                regular_subs.append(sub_to_transmit)

        # Send scores and update the database
        if len(regular_subs) > 0:
            # Send regular submissions in chunks of 100
            regular_sub_lists: list[list[Submission]] = chunk_list(
                regular_subs)
            for regular_sub_list in regular_sub_lists:
                self.send_scores(regular_sub_list)
        if len(dup_uniqname_subs) > 0:
            LOGGER.info(
                'Found submissions to send with duplicate uniqnames; they will be sent individually'
            )
            # Send each submission with a duplicate uniqname individually
            for dup_uniqname_sub in dup_uniqname_subs:
                self.send_scores([dup_uniqname_sub])

        return None
Esempio n. 2
0
    def __format_description(self, desc):
        """
        """
        desc = desc.split()
        desc = util.chunk_list(desc, 8)
        serial = []

        for line in desc:
            serial.append('+ ' + ' '.join(line))

        return '\n'.join(serial)
Esempio n. 3
0
    def test_chunk_list_of_random_nums_with_default_size_and_less_than_one_chunk(
            self):
        """
        chunk_list creates a list containing one element, also a list, when input length is less than chunk_size.
        """
        random_nums: list[int] = random.sample(range(1000000), 70)
        result: list[list[int]] = chunk_list(random_nums)

        self.assertEqual(len(result), 1)
        self.assertEqual(len(result[0]), 70)
        self.assertEqual(result[0], random_nums)
Esempio n. 4
0
    def test_chunk_list_of_subs_with_custom_size_and_more_than_one_chunk(self):
        """
        chunk_list chunks a list of three submissions into a list of two lists with lengths 2 and 1 when chunk_size is 2.
        """
        submissions: list[Submission] = list(
            Exam.objects.get(id=1).submissions.all())
        result: list[list[Submission]] = chunk_list(submissions, chunk_size=2)

        self.assertEqual(len(result), 2)
        self.assertEqual(len(result[0]), 2)
        self.assertEqual(len(result[1]), 1)

        all_subs: list[Submission] = [
            submission for sublist in result for submission in sublist
        ]
        self.assertEqual(submissions, all_subs)
Esempio n. 5
0
    def test_chunk_list_of_random_nums_with_default_size_and_multiple_chunks(
            self):
        """
        chunk_list creates a list of three integer lists of lengths 100, 100, and 50 with default chunk_size.
        """
        random_nums: list[int] = random.sample(range(1000000), 250)
        result: list[list[int]] = chunk_list(random_nums)

        self.assertEqual(len(result), 3)
        self.assertEqual(len(result[0]), 100)
        self.assertEqual(len(result[1]), 100)
        self.assertEqual(len(result[2]), 50)

        all_nums: list[int] = [
            random_num for sublist in result for random_num in sublist
        ]
        self.assertEqual(random_nums, all_nums)
    def __init__(self, input_basename_list):

        self.FLOAT_MAX = 1e30
        self.FLOAT_MIN = 1e-30
        self.n_cpu = mp.cpu_count()

        self.basename_list = input_basename_list
        self.split_base_list = chunk_list(
            self.basename_list, (len(self.basename_list) / self.n_cpu) + 1)
        self.n_feature = np.load(FEATURE_MAP_FILE).shape[0]
        self.n_data = len(input_basename_list)

        self.w = np.ones(self.n_feature, dtype=np.float32) * 0.5
        self.w_best = []

        # empirical feature count
        self.f_empirical = np.zeros(self.n_feature, dtype=np.float32)
        self.f_expected = np.zeros(self.n_feature, dtype=np.float32)
        self.f_gradient = np.zeros(self.n_feature, dtype=np.float32)
        self.f_gradient_best = []

        self.loglikelihood = 0.0
        self.min_loglikelihood = -self.FLOAT_MAX
        self.lam = 0.01
        self.DELTA = 0.01
        self.converged = False
        self.pid = os.getpid()

        # compute empirical feature count
        for bname in self.basename_list:
            tmp_model = MaxEntIRL()
            tmp_model.load_trajectory(
                os.path.join(TRAJECTORY_PATH, bname + ".npy"))
            tmp_model.update_weight(self.w)
            tmp_model.load_features(FEATURE_MAP_FILE)
            tmp_model.load_image(IMAGE_FILE)
            self.f_empirical += tmp_model.compute_empirical_feature_count()
        self.f_empirical /= self.n_feature
        print "empirical feature count:", self.f_empirical

        # make cache directory
        if not os.path.exists(CACHE_DIR):
            os.mkdir(CACHE_DIR)
Esempio n. 7
0
    # policy
    model.compute_policy(os.path.join(RESULT_DIR, "policy_%s.npy" % input_base))

    # forecast distribution
    model.compute_forecast_distribution(os.path.join(RESULT_DIR, "prob_%s.npy" % input_base))

    # probability map
    model.map_probability(os.path.join(RESULT_DIR, "result_%s.png" % input_base))



if __name__ == '__main__':

    trained_weight_file = "./RESULT/weight-015.txt"
    n_cpu = mp.cpu_count()

    if not os.path.exists(RESULT_DIR):
        os.mkdir(RESULT_DIR)

    base_list = read_text(BASE_FILE)
    split_base_list = chunk_list(base_list, (len(base_list) / n_cpu) + 1)

    threads = []
    for b_list in split_base_list:
        threads.append(mp.Process(target=optimal_control_single_thread,
                                  args=(b_list, trained_weight_file)))
    for t in threads:
        t.start()
    for t in threads:
        t.join()