def get_all_submissions_of_challenge(request, challenge_pk, challenge_phase_pk): """ Returns all the submissions for a particular challenge """ # To check for the corresponding challenge from challenge_pk. challenge = get_challenge_model(challenge_pk) # To check for the corresponding challenge phase from the challenge_phase_pk and challenge. try: challenge_phase = ChallengePhase.objects.get(pk=challenge_phase_pk, challenge=challenge) except ChallengePhase.DoesNotExist: response_data = {'error': 'Challenge Phase {} does not exist'.format(challenge_phase_pk)} return Response(response_data, status=status.HTTP_404_NOT_FOUND) # To check for the user as a host of the challenge from the request and challenge_pk. if is_user_a_host_of_challenge(user=request.user, challenge_pk=challenge_pk): # Filter submissions on the basis of challenge for host for now. Later on, the support for query # parameters like challenge phase, date is to be added. submissions = Submission.objects.filter(challenge_phase__challenge=challenge).order_by('-submitted_at') paginator, result_page = paginated_queryset(submissions, request) try: serializer = ChallengeSubmissionManagementSerializer(result_page, many=True, context={'request': request}) response_data = serializer.data return paginator.get_paginated_response(response_data) except: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) # To check for the user as a participant of the challenge from the request and challenge_pk. elif has_user_participated_in_challenge(user=request.user, challenge_id=challenge_pk): # get participant team object for the user for a particular challenge. participant_team_pk = get_participant_team_id_of_user_for_a_challenge( request.user, challenge_pk) # Filter submissions on the basis of challenge phase for a participant. submissions = Submission.objects.filter(participant_team=participant_team_pk, challenge_phase=challenge_phase).order_by('-submitted_at') paginator, result_page = paginated_queryset(submissions, request) try: serializer = SubmissionSerializer(result_page, many=True, context={'request': request}) response_data = serializer.data return paginator.get_paginated_response(response_data) except: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) # when user is neither host not participant of the challenge. else: response_data = {'error': 'You are neither host nor participant of the challenge!'} return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
def run_submission(challenge_id, challenge_phase, submission, user_annotation_file_path): ''' * receives a challenge id, phase id and user annotation file path * checks whether the corresponding evaluation script for the challenge exists or not * checks the above for annotation file * calls evaluation script via subprocess passing annotation file and user_annotation_file_path as argument ''' # Use the submission serializer to send relevant data to evaluation script # so that challenge hosts can use data for webhooks or any other service. submission_serializer = SubmissionSerializer(submission) submission_output = None phase_id = challenge_phase.id annotation_file_name = PHASE_ANNOTATION_FILE_NAME_MAP.get( challenge_id).get(phase_id) annotation_file_path = PHASE_ANNOTATION_FILE_PATH.format( challenge_id=challenge_id, phase_id=phase_id, annotation_file=annotation_file_name) submission_data_dir = SUBMISSION_DATA_DIR.format( submission_id=submission.id) # create a temporary run directory under submission directory, so that # main directory does not gets polluted temp_run_dir = join(submission_data_dir, 'run') create_dir(temp_run_dir) stdout_file_name = 'temp_stdout.txt' stderr_file_name = 'temp_stderr.txt' stdout_file = join(temp_run_dir, stdout_file_name) stderr_file = join(temp_run_dir, stderr_file_name) stdout = open(stdout_file, 'a+') stderr = open(stderr_file, 'a+') # call `main` from globals and set `status` to running and hence `started_at` submission.status = Submission.RUNNING submission.started_at = timezone.now() submission.save() try: successful_submission_flag = True with stdout_redirect(stdout) as new_stdout, stderr_redirect( stderr) as new_stderr: # noqa submission_output = EVALUATION_SCRIPTS[challenge_id].evaluate( annotation_file_path, user_annotation_file_path, challenge_phase.codename, submission_metadata=submission_serializer.data, ) ''' A submission will be marked successful only if it is of the format { "result":[ { "split_codename_1":{ "key1":30, "key2":50, } }, { "split_codename_2":{ "key1":90, "key2":10, } }, { "split_codename_3":{ "key1":100, "key2":45, } } ], "submission_metadata": {'foo': 'bar'}, "submission_result": ['foo', 'bar'], } ''' if 'result' in submission_output: leaderboard_data_list = [] for split_result in submission_output['result']: # get split_code_name that is the key of the result split_code_name = list(split_result.keys())[0] # Check if the challenge_phase_split exists for the challenge_phaseand dataset_split try: challenge_phase_split = ChallengePhaseSplit.objects.get( challenge_phase=challenge_phase, dataset_split__codename=split_code_name) except: stderr.write( "ORGINIAL EXCEPTION: No such relation between Challenge Phase and DatasetSplit" " specified by Challenge Host \n") stderr.write(traceback.format_exc()) successful_submission_flag = False break # Check if the dataset_split exists for the codename in the result try: dataset_split = challenge_phase_split.dataset_split except: stderr.write( "ORGINIAL EXCEPTION: The codename specified by your Challenge Host doesn't match" " with that in the evaluation Script.\n") stderr.write(traceback.format_exc()) successful_submission_flag = False break leaderboard_data = LeaderboardData() leaderboard_data.challenge_phase_split = challenge_phase_split leaderboard_data.submission = submission leaderboard_data.leaderboard = challenge_phase_split.leaderboard leaderboard_data.result = split_result.get( dataset_split.codename) leaderboard_data_list.append(leaderboard_data) if successful_submission_flag: LeaderboardData.objects.bulk_create(leaderboard_data_list) # Once the submission_output is processed, then save the submission object with appropriate status else: successful_submission_flag = False except: stderr.write(traceback.format_exc()) successful_submission_flag = False submission_status = Submission.FINISHED if successful_submission_flag else Submission.FAILED submission.status = submission_status submission.completed_at = timezone.now() submission.save() # after the execution is finished, set `status` to finished and hence `completed_at` if submission_output: output = {} output['result'] = submission_output.get('result', '') submission.output = output # Save submission_result_file submission_result = submission_output.get('submission_result', '') submission.submission_result_file.save('submission_result.json', ContentFile(submission_result)) # Save submission_metadata_file submission_metadata = submission_output.get('submission_metadata', '') submission.submission_metadata_file.save( 'submission_metadata.json', ContentFile(submission_metadata)) submission.save() stderr.close() stdout.close() stderr_content = open(stderr_file, 'r').read() stdout_content = open(stdout_file, 'r').read() # TODO :: see if two updates can be combine into a single update. with open(stdout_file, 'r') as stdout: stdout_content = stdout.read() submission.stdout_file.save('stdout.txt', ContentFile(stdout_content)) if (submission_status is Submission.FAILED): with open(stderr_file, 'r') as stderr: stderr_content = stderr.read() submission.stderr_file.save('stderr.txt', ContentFile(stderr_content)) # delete the complete temp run directory shutil.rmtree(temp_run_dir)
def run_submission(challenge_id, challenge_phase, submission, user_annotation_file_path): """ * receives a challenge id, phase id and user annotation file path * checks whether the corresponding evaluation script for the challenge exists or not * checks the above for annotation file * calls evaluation script via subprocess passing annotation file and user_annotation_file_path as argument """ # Use the submission serializer to send relevant data to evaluation script # so that challenge hosts can use data for webhooks or any other service. submission_serializer = SubmissionSerializer(submission) submission_output = None phase_id = challenge_phase.id annotation_file_name = PHASE_ANNOTATION_FILE_NAME_MAP.get( challenge_id).get(phase_id) annotation_file_path = PHASE_ANNOTATION_FILE_PATH.format( challenge_id=challenge_id, phase_id=phase_id, annotation_file=annotation_file_name, ) submission_data_dir = SUBMISSION_DATA_DIR.format( submission_id=submission.id) submission.status = Submission.RUNNING submission.started_at = timezone.now() submission.save() # create a temporary run directory under submission directory, so that # main directory does not gets polluted temp_run_dir = join(submission_data_dir, "run") create_dir(temp_run_dir) stdout_file = join(temp_run_dir, "temp_stdout.txt") stderr_file = join(temp_run_dir, "temp_stderr.txt") stdout = open(stdout_file, "a+") stderr = open(stderr_file, "a+") remote_evaluation = submission.challenge_phase.challenge.remote_evaluation if remote_evaluation: try: logger.info( "{} Sending submission {} for remote evaluation".format( SUBMISSION_LOGS_PREFIX, submission.id)) with stdout_redirect(stdout) as new_stdout, stderr_redirect( stderr) as new_stderr: submission_output = EVALUATION_SCRIPTS[challenge_id].evaluate( annotation_file_path, user_annotation_file_path, challenge_phase.codename, submission_metadata=submission_serializer.data, ) return except Exception: stderr.write(traceback.format_exc()) stderr.close() stdout.close() submission.status = Submission.FAILED submission.completed_at = timezone.now() submission.save() with open(stdout_file, "r") as stdout: stdout_content = stdout.read() submission.stdout_file.save("stdout.txt", ContentFile(stdout_content)) with open(stderr_file, "r") as stderr: stderr_content = stderr.read() submission.stderr_file.save("stderr.txt", ContentFile(stderr_content)) # delete the complete temp run directory shutil.rmtree(temp_run_dir) return # call `main` from globals and set `status` to running and hence `started_at` try: successful_submission_flag = True with stdout_redirect(stdout) as new_stdout, stderr_redirect( # noqa stderr) as new_stderr: # noqa submission_output = EVALUATION_SCRIPTS[challenge_id].evaluate( annotation_file_path, user_annotation_file_path, challenge_phase.codename, submission_metadata=submission_serializer.data, ) """ A submission will be marked successful only if it is of the format { "result":[ { "split_codename_1":{ "key1":30, "key2":50, } }, { "split_codename_2":{ "key1":90, "key2":10, } }, { "split_codename_3":{ "key1":100, "key2":45, } } ], "submission_metadata": {'foo': 'bar'}, "submission_result": ['foo', 'bar'], } """ error_bars_dict = dict() if "error" in submission_output: for split_error in submission_output["error"]: split_code_name = list(split_error.keys())[0] error_bars_dict[split_code_name] = split_error[split_code_name] if "result" in submission_output: leaderboard_data_list = [] for split_result in submission_output["result"]: # get split_code_name that is the key of the result split_code_name = list(split_result.keys())[0] # Check if the challenge_phase_split exists for the challenge_phaseand dataset_split try: challenge_phase_split = ChallengePhaseSplit.objects.get( challenge_phase=challenge_phase, dataset_split__codename=split_code_name, ) except Exception: stderr.write( "ORGINIAL EXCEPTION: No such relation between Challenge Phase and DatasetSplit" " specified by Challenge Host \n") stderr.write(traceback.format_exc()) successful_submission_flag = False break # Check if the dataset_split exists for the codename in the result try: dataset_split = challenge_phase_split.dataset_split except Exception: stderr.write( "ORGINIAL EXCEPTION: The codename specified by your Challenge Host doesn't match" " with that in the evaluation Script.\n") stderr.write(traceback.format_exc()) successful_submission_flag = False break leaderboard_data = LeaderboardData() leaderboard_data.challenge_phase_split = challenge_phase_split leaderboard_data.submission = submission leaderboard_data.leaderboard = ( challenge_phase_split.leaderboard) leaderboard_data.result = split_result.get( dataset_split.codename) if "error" in submission_output: leaderboard_data.error = error_bars_dict.get( dataset_split.codename) leaderboard_data_list.append(leaderboard_data) if successful_submission_flag: LeaderboardData.objects.bulk_create(leaderboard_data_list) # Once the submission_output is processed, then save the submission object with appropriate status else: successful_submission_flag = False except Exception: stderr.write(traceback.format_exc()) successful_submission_flag = False # Set submission_output to None to handle case when evaluation script throws exception # In case of exception from evaluation script submission_output is assigned exception object submission_output = None submission_status = (Submission.FINISHED if successful_submission_flag else Submission.FAILED) submission.status = submission_status submission.completed_at = timezone.now() submission.save() # after the execution is finished, set `status` to finished and hence `completed_at` if submission_output: output = {} output["result"] = submission_output.get("result", "") submission.output = output # Save submission_result_file submission_result = submission_output.get("submission_result", "") submission_result = json.dumps(submission_result) submission.submission_result_file.save("submission_result.json", ContentFile(submission_result)) # Save submission_metadata_file submission_metadata = submission_output.get("submission_metadata", "") submission.submission_metadata_file.save( "submission_metadata.json", ContentFile(submission_metadata)) submission.save() stderr.close() stdout.close() stderr_content = open(stderr_file, "r").read() stdout_content = open(stdout_file, "r").read() # TODO :: see if two updates can be combine into a single update. with open(stdout_file, "r") as stdout: stdout_content = stdout.read() submission.stdout_file.save("stdout.txt", ContentFile(stdout_content)) if submission_status is Submission.FAILED: with open(stderr_file, "r") as stderr: stderr_content = stderr.read() submission.stderr_file.save("stderr.txt", ContentFile(stderr_content)) # delete the complete temp run directory shutil.rmtree(temp_run_dir)