Ejemplo n.º 1
0
def main(image_path):
    # get the final worksheet from the image
    ext = Extractor(image_path, False)
    final = ext.final

    # get the form code by checking the image's QR code
    decoded_qr_code = reader(final)

    # extract the cells and student's responses
    cells = Cells(final)

    # grade the worksheet by using a CNN to OCR the student's responses
    grader = Grader(decoded_qr_code)
    grader.grade(cells.student_responses)
    worksheet = grader.display(final, cells.sorted_contours)
    Helpers.save_image(f'{Helpers.IMAGE_DIRECTORY}/graded.png', worksheet)
Ejemplo n.º 2
0
def callback(ch, method, properties, body):
    start_time = time.time()
    print(json.loads(body.decode()))
    data = json.loads(body.decode())["data"]

    sourceCodeBasePath = "tmp/src"
    extractTarGz(tarGzBase64=data["sourceCodeBase64"],
                 basePath=sourceCodeBasePath)

    # start docker
    grade = Grader(tmpPath="tmp",
                   entryPoint=data["entry"],
                   testcases=data["testcases"])
    result = grade.grade()
    result['user'] = {
        'projectId': data["projectId"],
        'userId': data["userId"],
        'courseId': data["courseId"],
        'activityId': data["activityId"]
    }
    headers = {'Content-Type': "application/json"}

    requests.post(f'{os.getenv("BRIDGE_SERVICE_URL")}/callback/',
                  data=json.dumps(result),
                  headers=headers)
    end_time = time.time() - start_time
    with open("execution_time.log", "a+") as f:
        f.write(f"{end_time}\n")
    print("finish process message")
    ch.basic_ack(delivery_tag=method.delivery_tag)
Ejemplo n.º 3
0
    def submit_answer(self, request, pk=None):
        """Submit an answer to a question"""
        question = get_object_or_404(Question_Master, pk=pk)
        self.check_object_permissions(request, question)

        submission = User_Submissions.objects.get(
            user=request.user,
            question=question
        )

        if submission.status == submission.CORRECT or submission.status == submission.WRONG:
            error = {
                'status': False,
                'detail': 'Question already answered'
            }
            return Response(error, status.HTTP_400_BAD_REQUEST)

        if submission.is_answer_shown():
            error = {
                'status': False,
                'detail': 'Question already attempted',
            }
            return Response(error, status.HTTP_400_BAD_REQUEST)

        if submission.attempts >= question.attempts:
            error = {
                'status': False,
                'detail': 'Exceeded the maximum number of attempts'
            }
            return Response(error, status.HTTP_400_BAD_REQUEST)

        submission.attempts += 1
        attempts_remaining = question.attempts - submission.attempts

        serializer = serializers.AnswerSubmitSerializer(data=request.DATA)
        print serializer
        if serializer.is_valid():
            submission.status = User_Submissions.ATTEMPTED
            submission.answer = serializer.data['answer']

            data = {
                'status': submission.status,
                'marks': submission.marks,
                'attempts_remaining': attempts_remaining,
                'explaination': submission.explaination
            }

            grader = Grader(submission=submission, question=question)
            if grader.grade():
                submission = grader.submission
                data['status'] = submission.status
                data['marks'] = submission.marks
                data['explaination'] = submission.explaination
                if attempts_remaining == 0 or submission.status == User_Submissions.CORRECT:
                    if grader.the_question is None:
                        the_question = Question.objects.get_subclass(
                            pk=submission.question.pk)
                        data['answer'] = \
                            the_question.get_answer()
                    else:
                        data['answer'] = \
                            grader.the_question.get_answer()
                serializer = serializers.FrontEndSubmissionSerializer(data)
            else:
                serializer = serializers.FrontEndSubmissionSerializer(data)

            # return the result of grading
            return Response(serializer.data)
        else:
            submission.save()
            content = serializer.errors
            return Response(content, status.HTTP_400_BAD_REQUEST)
Ejemplo n.º 4
0
    def _grade_project(self, project_name=None, emails=None):
        """Grades the given project for each email in the specified email list.
        Projects are expected to have been cloned to a local directory previously.
        Results in the gradebook file saved to the project's working directory.
        :param project_name: Name of the project to grade
        :param emails: List of emails for which to clone the project"""

        if project_name is None:
            project_name = self._argsdict['project']
        project_dir = os.sep.join([self._working_dir_name, project_name])
        project_due_dt = ProctorConfig.get_config_value(project_name, 'due_dt')

        gradebook = GradeBook(self._working_dir_name, project_name,
                              project_due_dt)
        builder = Builder()
        testrunner = UnitTestRunner()
        grader = Grader(builder, testrunner, gradebook)

        owner_emails = emails if not emails is None else \
            self._get_emails_from_file(self._argsdict['emails'])
        users_missing_project = []

        self._logger.info(f'Grading {project_name}')

        num_to_grade = len(owner_emails)
        current = 0

        # Grade project for each student listed in owner_emails
        for email in owner_emails:

            email = email.strip(' ')
            current += 1

            self._logger.info('---')
            self._logger.info(f'Owner {email} ({current} of {num_to_grade})')
            if len(email) == 0:
                self._logger.debug(
                    f"Invalid owner email '{email}'. Check email file for blank lines."
                )
                continue

            dir_to_grade = Path(project_dir) / email  # interesting Path syntax
            if not dir_to_grade.exists():
                users_missing_project.append(email)
                self._logger.warning(
                    'Local project not found: {}. Try clone.'.format(
                        str(dir_to_grade)))
                gradebook.local_project_not_found(email)
                continue

            project = self._server.get_user_project(email, project_name)
            if project:
                commits = project.commits.list()
                if commits:
                    latest_commit_date = commits[
                        0].created_at  # GitLab returns most recent first (index 0)
                    grader.grade(email, project_name, dir_to_grade,
                                 project_due_dt, latest_commit_date)
                else:
                    gradebook.commit_not_found(email)
                    self._logger.warning(
                        'No commit. Server project found, no commit.')
            else:
                gradebook.server_project_not_found(email)
                self._logger.warning(
                    'Not found. Project not found on server. Check email address.'
                )

        self._logger.info('---')
        self._logger.info(f'Saving grades to: {gradebook.get_file_name()}')
        gradebook.save()

        if users_missing_project:
            self._logger.info(
                'Local project missing for: {}'.format(users_missing_project))
            if 'chide' in self._argsdict and self._argsdict['chide']:
                self._logger.info('Chiding people with missing projects...')
                Postman.send_missing_project_email(users_missing_project,
                                                   project_name, self._logger)
Ejemplo n.º 5
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.backends.cudnn.deterministic = True
    # Load dataset
    if args.dataset == "mnist":
        dataset = MNIST(
            root="../data",
            transform=transforms.ToTensor(
            ),  # TODO: you may want to tweak this
            train=not args.eval)
        dataloader = DataLoader(dataset,
                                args.batch_size,
                                shuffle=True,
                                drop_last=True)
    else:
        raise NotImplementedError

    # Configure
    if not args.eval:
        time_str = time.strftime('%Y-%m-%d-%H-%M-%S')
        logdir = args.logdir if args.logdir is not None else "logs/cvae_" + time_str
        os.makedirs(logdir, exist_ok=True)
        writer = SummaryWriter(logdir)

        imgdir = args.imgdir if args.imgdir is not None else "imgs/cvae_" + time_str
        os.makedirs(imgdir, exist_ok=True)

        checkpointdir = args.checkpointdir if args.checkpointdir is not None else "checkpoints/cvae_" + time_str
        os.makedirs(checkpointdir, exist_ok=True)

    label_dim = 10
    img_dim = (1, 28, 28)
    latent_dim = 100

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    cvae = CVAE(img_dim, label_dim, latent_dim)
    cvae.to(device)
    # optimizer = optim.Adam(cvae.parameters(), lr=args.lr)
    optimizer = optim.SGD(cvae.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [30, 50, 80],
                                                     gamma=0.1)

    # plt.figure()
    # toPIL = transforms.ToPILImage()
    # plt.imshow(toPIL(dataset[0][0]), cmap='gray')
    # plt.savefig("output.png")

    # for images, labels in dataloader:
    #     plt.figure()
    #     toPIL = transforms.ToPILImage()
    #     plt.imshow(toPIL(images[0]), cmap='gray')
    #     plt.savefig("output.png")
    #     print('lab =', labels[0])
    #     break

    def KLLoss(z_mean, z_std):
        return 0.5 * torch.mean(z_mean**2 + z_std**2 - 1 - torch.log(z_std))

    if args.grade:
        from grader import Grader
        grader = Grader()

    if not args.eval:
        for name, param in cvae.named_parameters():
            print(name, param.shape)
        prior = torch.distributions.Normal(0, 1)

        # criterion = nn.MSELoss()
        criterion = nn.BCELoss()

        best_acc = 0.
        best_epoch = -1

        for epoch in range(args.num_epochs):
            # TODO: Training, logging, saving, visualization, etc.
            loss_sum, mse_sum, kl_sum = 0, 0, 0
            for it, (images, labels) in tqdm(enumerate(dataloader)):
                images = images.to(device)
                labels = labels.to(device)

                labels_onehot = torch.zeros(
                    (labels.size(0), label_dim)).to(device)
                labels_onehot.scatter_(dim=1,
                                       index=labels.view(-1, 1),
                                       value=1)
                # print(labels_onehot.size(), labels_onehot)

                optimizer.zero_grad()
                latent = cvae.encode(images, labels_onehot)
                recon = cvae.decode(latent, labels_onehot)
                kl_loss = KLLoss(cvae.z_mean_val, cvae.z_std_val)
                # TODO: finish loss = MSE + KL
                mse_loss = criterion(recon, images)
                loss = mse_loss + kl_loss  # changed
                loss.backward()
                optimizer.step()

                loss_sum += loss.data
                mse_sum += mse_loss.data
                kl_sum += kl_loss.data

            loss_sum /= len(dataloader)
            mse_sum /= len(dataloader)
            kl_sum /= len(dataloader)
            writer.add_scalar('loss/TotalLoss', loss_sum, epoch)
            writer.add_scalar('loss/MSE', mse_sum, epoch)
            writer.add_scalar('loss/KL', kl_sum, epoch)
            print("Epoch %d, iteration %d: loss=%.6f, mse=%.6f, kl=%.6f" %
                  (epoch, it, loss_sum, mse_sum, kl_sum))

            samples = generate_samples(cvae, 10, device)
            save_samples_image(samples,
                               os.path.join(imgdir, f"epoch{epoch}.png"))

            checkpoint = {
                'model': cvae.state_dict(),
                'optimizer': optimizer.state_dict()
            }
            torch.save(checkpoint,
                       os.path.join(checkpointdir, f"epoch{epoch}.pt"))

            if args.grade:
                cvae.eval()
                samples = generate_samples(cvae, 1000, device)
                acc = grader.grade(samples)
                writer.add_scalar('grade/accuracy', acc, epoch)
                print("Epoch %d: accuracy=%.6f" % (epoch, acc))

                if acc > best_acc:
                    torch.save(checkpoint,
                               os.path.join(checkpointdir, "best.pt"))
                    best_acc = acc
                    best_epoch = epoch
        print(f"Best accuracy: {best_acc}, at epoch {best_epoch}")

    else:
        assert args.load_path is not None
        checkpoint = torch.load(args.load_path, map_location=device)
        cvae.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        cvae.eval()
        samples = generate_samples(cvae, 1000, device)
        torch.save(samples, "vae_generated_samples.pt")
Ejemplo n.º 6
0

def test_a():
    n_queens_solver = submission.BacktrackingSearch()
    n_queens_solver.solve(submission.create_n_queens_csp(8))
    grader.require_is_equal(92, n_queens_solver.num_assignments)
    grader.require_is_equal(2057, n_queens_solver.num_operations)


def test_b():
    mcv_solver = submission.BacktrackingSearch()
    mcv_solver.solve(submission.create_n_queens_csp(8), mcv=True)
    grader.require_is_equal(92, mcv_solver.num_assignments)
    grader.require_is_equal(1361, mcv_solver.num_operations)


def test_c():
    ac_solver = submission.BacktrackingSearch()
    ac_solver.solve(submission.create_n_queens_csp(8), ac3=True)
    grader.require_is_equal(92, ac_solver.num_assignments)
    grader.require_is_equal(21, ac_solver.first_assignment_num_operations)
    grader.require_is_equal(769, ac_solver.num_operations)


grader = Grader()
submission = grader.load('submission')
grader.add_part('a', test_a, 5, description='Test for Create 8-Queens CSP')
grader.add_part('b', test_b, 5, description='Test for MCV with 8-Queens CSP')
grader.add_part('c', test_c, 10, description='Test for AC-3 with n-queens CSP')
grader.grade()