def _remove_env(self, instance_id): if LOCAL_TEST: return try: del self.envs[instance_id] del self.env_info[instance_id] # Mark the relevant submission as failed submission_id = hGet("CROWDAI::INSTANCE_ID_MAP", instance_id) submission_id = int(submission_id.decode('utf-8')) except KeyError: raise InvalidUsage('Instance_id {} unknown or expired.'.format(instance_id)) # Update crowdAI Submission try: api_key = hGet("CROWDAI::API_KEY_MAP", instance_id.split("___")[0] ) api_key = api_key.decode('utf-8') api = CROWDAI_API(CROWDAI_TOKEN) api.authenticate_participant(api_key) submission = api.get_submission(CROWDAI_CHALLENGE_CLIENT_NAME, submission_id) if submission.grading_status != "graded": submission.grading_status = "failed" submission.message = "Timelimit Exceeded / Error during evaluation" submission.update() except Exception as e: logger.error("Unable to update submission on crowdAI : {}".format(str(e))) logger.error(traceback.format_exc())
def test_does_not_work_for_correct_submission_id(): """Incorrect submission id throws an Exception""" api = CROWDAI_API(AUTH_TOKEN) challenge_id = "IEEEInvestmentRankingChallenge" submission_id = 50300000000001 with pytest.raises(CrowdAIRemoteException): api.get_submission(challenge_id, submission_id)
def test_throws_error_when_wrong_challenge_id(): """Tests if throws error when creating submission with \ wrong challenge id""" api = CROWDAI_API(AUTH_TOKEN) api.authenticate_participant(EXAMPLE_API_KEY) challenge_id = "wrong_challenge_id" with pytest.raises(CrowdAIRemoteException): api.create_submission(challenge_id)
def test_successfully_creates_submission_with_correct_api_key(): """Tests if Successfully creates submission for correct api_key \ and correct_challenge_id""" api = CROWDAI_API(AUTH_TOKEN) challenge_id = "test_challenge" api.authenticate_participant(EXAMPLE_API_KEY) submission = api.create_submission(challenge_id) assert type(submission.id) == int assert submission.score is False assert submission.score_secondary is False assert submission.grading_status is "submitted"
def test_works_for_correct_submission_id(): """Correct submission id returns a valid submission object""" api = CROWDAI_API(AUTH_TOKEN) challenge_id = "IEEEInvestmentRankingChallenge" submission_id = 5030 submission = api.get_submission(challenge_id, submission_id) assert submission.id == 5030 assert submission.score == 0.001587 assert submission.score_secondary == 0.00608137471612 assert submission.grading_status == "graded" print(submission)
def test_null_score_secondary_raises_exception(): """Tests that an exception is raised when score is set and score_secondary is not. """ api = CROWDAI_API(AUTH_TOKEN) challenge_id = "test_challenge" api.authenticate_participant(EXAMPLE_API_KEY) submission = api.create_submission(challenge_id) assert type(submission.id) == int assert submission.score is False assert submission.score_secondary is False assert submission.grading_status is "submitted" submission.score = 0.98 submission.grading_status = "graded" with pytest.raises(CrowdAIAPIException): submission.update()
def grade_submission(data, _context): file_key = data["file_key"] _update_job_event( _context, job_info_template(_context, "Post Processing MIDI file....")) pruned_filekey, converted_filekeys = post_process_midi( _context, POOL, file_key) processed_filekeys = converted_filekeys print("Making submission on crowdAI") api = CROWDAI_API(config.CROWDAI_TOKEN) api.authenticate_participant(_context["api_key"]) meta = {} meta["file_key"] = file_key #meta["processed_filekeys"] = processed_filekeys submission = api.create_submission(config.challenge_id) submission.score = config.SCORE_DEFAULT submission.score_secondary = config.SCORE_SECONDARY_DEFAULT submission.grading_status = "graded" submission.meta = meta submission.update() submission_id = submission.id print("Submitted : ", submission) register_submission_on_redis(_context, POOL, submission_id, pruned_filekey, processed_filekeys) message_for_participants = """ Score (mu) : {} Secondary_score (sigma) : {} Please note that these scores are only the initial scores, and they will change over time as your submission is evaluated by the human volunteers. """.format(submission.score, submission.score_secondary) _update_job_event(_context, job_info_template(_context, message_for_participants)) result = {'result': 'submission recorded'} result['score_mu'] = submission.score result['score_sigma'] = submission.score_secondary _update_job_event(_context, job_complete_template(_context, result))
def test_updates_submission(): """Tests is it successfully updates submission""" api = CROWDAI_API(AUTH_TOKEN) challenge_id = "test_challenge" api.authenticate_participant(EXAMPLE_API_KEY) submission = api.create_submission(challenge_id) assert type(submission.id) == int assert submission.score is False assert submission.score_secondary is False assert submission.grading_status is "submitted" submission.score = 0.98 submission.score_secondary = 0.99 submission.grading_status = "graded" submission.update() submission_from_server = api.get_submission(challenge_id, submission.id) assert submission_from_server.score == 0.98 assert submission_from_server.score_secondary == 0.99 assert submission_from_server.grading_status == "graded"
def test_updates_submission_with_meta(): """Tests is it successfully updates submission with the meta param""" api = CROWDAI_API(AUTH_TOKEN) challenge_id = "test_challenge" api.authenticate_participant(EXAMPLE_API_KEY) submission = api.create_submission(challenge_id) assert type(submission.id) == int assert submission.score is False assert submission.score_secondary is False assert submission.grading_status is "submitted" assert submission.meta == {} submission.score = 0.98 submission.score_secondary = 0.99 submission.grading_status = "graded" submission.meta["something"] = "A" submission.meta["something_else"] = "B" submission.update() submission_from_server = api.get_submission(challenge_id, submission.id) assert submission_from_server.score == 0.98 assert submission_from_server.score_secondary == 0.99 assert submission_from_server.grading_status == "graded" assert submission_from_server.meta["something"] == "A" assert submission_from_server.meta["something_else"] == "B" submission.score = 0.198 submission.score_secondary = 0.199 submission.grading_status = "graded" submission.meta["something"] = "A_1" submission.meta["something_else"] = "B_1" submission.update() submission_from_server = api.get_submission(challenge_id, submission.id) assert submission_from_server.score == 0.198 assert submission_from_server.score_secondary == 0.199 assert submission_from_server.grading_status == "graded" assert submission_from_server.meta["something"] == "A_1" assert submission_from_server.meta["something_else"] == "B_1"
def monitor_close(self, instance_id): env = self._lookup_env(instance_id) rPush("CROWDAI::SUBMISSION::%s::actions"%(instance_id), "close") rPush("CROWDAI::SUBMISSION::%s::observations"%(instance_id), "close") rPush("CROWDAI::SUBMISSION::%s::rewards"%(instance_id), "close") rPush("CROWDAI::SUBMISSION::%s::actions"%(instance_id), "CROWDAI_REPLAY_DATA_VERSION:"+str(CROWDAI_REPLAY_DATA_VERSION)) rPush("CROWDAI::SUBMISSION::%s::observations"%(instance_id), "CROWDAI_REPLAY_DATA_VERSION:"+str(CROWDAI_REPLAY_DATA_VERSION)) rPush("CROWDAI::SUBMISSION::%s::rewards"%(instance_id), "CROWDAI_REPLAY_DATA_VERSION:"+str(CROWDAI_REPLAY_DATA_VERSION)) SCORE = env.total SCORE = SCORE * 1.0 / len(SEED_MAP) print("CLOSED %s, %f" % (instance_id, SCORE)) print("Submitting to crowdAI.org as Stanford...") if not DEBUG_MODE and not LOCAL_TEST: api_key = hGet("CROWDAI::API_KEY_MAP", instance_id.split("___")[0] ) api_key = api_key.decode('utf-8') submission_id = hGet("CROWDAI::INSTANCE_ID_MAP", instance_id) submission_id = int(submission_id.decode('utf-8')) try: api = CROWDAI_API(CROWDAI_TOKEN) api.authenticate_participant(api_key) submission = api.get_submission(CROWDAI_CHALLENGE_CLIENT_NAME, submission_id) submission.grading_status = "graded" submission.message = "Graded Successfully !" submission.score = SCORE submission.meta = {} submission.meta["grader"] = "live_grader" submission.update() rPush("CROWDAI::SUBMITTED_Q", instance_id) except Exception as e: logger.error(traceback.format_exc()) print("Unable to update score on crowdAI") print(str(e)) return SCORE
def _authenticate_crowdai(): CROWDAI_AUTH_TOKEN = os.environ["CROWDAI_AUTH_TOKEN"] crowdai_api = CROWDAI_API(CROWDAI_AUTH_TOKEN) return crowdai_api
def test_error_on_authentication_with_wrong_username(): """Tests if it throws error on authentication with wrong username""" with pytest.raises(CrowdAIRemoteException): api = CROWDAI_API(AUTH_TOKEN) api.authenticate_participant_with_username(str(uuid.uuid4())) assert api.participant_id is not False
def test_valid_api_authentication_with_username(): """Tests if we can authenticate with valid username""" api = CROWDAI_API(AUTH_TOKEN) api.authenticate_participant_with_username("spMohanty") assert api.participant_id is not False
def test_invalid_apikey_doesnot_authenticates(): """Tests : cannot authenticate with invalid API Key""" with pytest.raises(CrowdAIRemoteException): api = CROWDAI_API(AUTH_TOKEN) api.authenticate_participant(str(uuid.uuid4()))
def test_valid_apikey_authenticates(): """Tests if we can authenticate with valid API Key""" api = CROWDAI_API(AUTH_TOKEN) api.authenticate_participant(EXAMPLE_API_KEY) assert api.participant_id is not False
def env_create(): """ Create an instance of the specified environment Parameters: - env_id: gym environment ID string, such as 'CartPole-v0' Returns: - instance_id: a short identifier (such as '3c657dbc') for the created environment instance. The instance_id is used in future API calls to identify the environment to be manipulated """ env_id = get_required_param(request.get_json(), 'env_id') api_key = get_required_param(request.get_json(), 'token').strip() version = get_required_param(request.get_json(), 'version') if not DISABLE_VERIFICATION: try: api = CROWDAI_API(CROWDAI_TOKEN) api.authenticate_participant(api_key) # try to see if an env variable specifies the round. else default crowdai_env_difficulty = int(os.getenv("CROWDAI_ENV_DIFFICULTY", 0)) crowdai_round_id = os.getenv('CROWDAI_ROUND_ID', 29) if crowdai_round_id: crowdai_round_id = int(crowdai_round_id) if not LOCAL_TEST: submission = api.create_submission(CROWDAI_CHALLENGE_CLIENT_NAME, round_id=crowdai_round_id) hSet("CROWDAI_DIFFICULTY_MAP", str(submission.id), str(crowdai_env_difficulty)) else: submission = MockSubmission() except Exception as e: error_message = str(e) response = jsonify(message=error_message) response.status_code = 400 logger.error(traceback.format_exc()) return response participant_id = api.participant_id else: participant_id = str(0) # Validate client version if version != pkg_resources.get_distribution("osim-rl").version: error_message="Wrong client version. Please update to the new version. Read more on https://github.com/stanfordnmbl/osim-rl/docs" response = jsonify(message = error_message) response.status_code = 400 if not DISABLE_VERIFICATION: submission.grading_status="failed" submission.message = error_message submission.update() return response try: instance_id = create_env_after_validation(envs, env_id, participant_id) except Exception as e: error_message = str(e) submission.grading_status = "failed" submission.message = error_message submission.update() response = jsonify(message=error_message) response.status_code = 404 logger.error(traceback.format_exc()) return response hSet("CROWDAI::API_KEY_MAP", participant_id, api_key) if not DISABLE_VERIFICATION: hSet("CROWDAI::INSTANCE_ID_MAP", instance_id, submission.id) response = jsonify(instance_id=instance_id) response.status_code = 200 return response
def test_throws_error_when_not_authenticated(): """Tests if throws error when creating submission without authentication""" api = CROWDAI_API(AUTH_TOKEN) challenge_id = "test_challenge" with pytest.raises(CrowdAIAPIException): api.create_submission(challenge_id)