def job_execution_wrapper(data):
    redis_conn = redis.Redis(connection_pool=POOL)
    job = get_current_job()

    _context = {}
    _context['redis_conn'] = redis_conn
    _context['response_channel'] = data['broker_response_channel']
    _context['job_id'] = job.id
    _context['data_sequence_no'] = data['data_sequence_no']
    _context['api_key'] = data['extra_params']['api_key']

    # Register Job Running event
    _update_job_event(
        _context,
        job_running_template(_context['data_sequence_no'], job.id)
        )
    result = {}
    try:
        if data["function_name"] == "grade_submission":
            # Run the job
            answer_file_path = config.answer_file_path

            result = _submit(data["data"], answer_file_path, _context)
            # Register Job Complete event
            if data["data"]["round"] == 2:
                # Hide scores in case of round 2
                del result["score"]
                del result["score_secondary"]

            _update_job_event(
                _context,
                job_info_template(
                    _context, "Scores Submitted Successfully ! Please remember to upload your code and a description of your approach (as a short paper) to https://gitlab.crowdai.org , as a private repository, tto be eligible for the final round-2 rankings.")
                )
            _update_job_event(
                _context,
                job_complete_template(_context, result)
                )
        else:
            _error_object = job_error_template(
                job.id,
                "Function not implemented error"
                )
            _update_job_event(
                _context,
                job_error_template(job.id, result)
                )
            result = _error_object
    except Exception as e:
        _error_object = job_error_template(
            _context['data_sequence_no'],
            job.id,
            str(e)
            )
        _update_job_event(_context, _error_object)
    return result
def grade_submission(data, _context):
    file_key = data["file_key"]

    _update_job_event(
        _context, job_info_template(_context,
                                    "Post Processing  MIDI file...."))

    pruned_filekey, converted_filekeys = post_process_midi(
        _context, POOL, file_key)
    processed_filekeys = converted_filekeys
    print("Making submission on crowdAI")
    api = CROWDAI_API(config.CROWDAI_TOKEN)
    api.authenticate_participant(_context["api_key"])
    meta = {}
    meta["file_key"] = file_key
    #meta["processed_filekeys"] = processed_filekeys
    submission = api.create_submission(config.challenge_id)
    submission.score = config.SCORE_DEFAULT
    submission.score_secondary = config.SCORE_SECONDARY_DEFAULT
    submission.grading_status = "graded"
    submission.meta = meta
    submission.update()
    submission_id = submission.id
    print("Submitted : ", submission)
    register_submission_on_redis(_context, POOL, submission_id, pruned_filekey,
                                 processed_filekeys)

    message_for_participants = """
    Score (mu) : {}
    Secondary_score (sigma) : {}
    Please note that these scores are only the initial scores,
    and they will change over time as your submission is
    evaluated by the human volunteers.
    """.format(submission.score, submission.score_secondary)

    _update_job_event(_context,
                      job_info_template(_context, message_for_participants))

    result = {'result': 'submission recorded'}
    result['score_mu'] = submission.score
    result['score_sigma'] = submission.score_secondary
    _update_job_event(_context, job_complete_template(_context, result))
def _submit(client_payload, answer_file_path, context):
    """
        takes a list of predicted heights and actual heights
        and computes the score
        and prepares the plots for submission to the leaderboard
    """
    file_key = client_payload["file_key"]
    _update_job_event(context,
                      job_info_template(context, "Grading Submission...."))

    _payload = {}
    _meta = {}
    _meta['file_key'] = file_key
    _payload["meta"] = _meta
    submission_id = report_to_crowdai(context,
                                      _payload,
                                      submission_id=False,
                                      status='submitted')
    print("Submission id : ", submission_id)
    try:
        localfilepath = download_file(context, file_key)
        _client_payload = {}
        _client_payload["submission_file_path"] = localfilepath

        _result_object = config.evaluator._evaluate(_client_payload, context)
        print _result_object
        _payload = _result_object
        report_to_crowdai(context,
                          _payload,
                          submission_id=submission_id,
                          status='graded')
        # Clean up file if possible
        os.remove(localfilepath)
        return _result_object
    except Exception as e:
        # Report to crowdAI
        if "meta" in _payload.keys():
            del _payload["meta"]
        report_to_crowdai(context,
                          _payload,
                          submission_id=submission_id,
                          status='failed',
                          message=str(e))
        # raise the exception again
        # so that it can be handled further down the chain
        raise e
def job_execution_wrapper(data):
    redis_conn = redis.Redis(connection_pool=POOL)
    job = get_current_job()

    _context = {}
    _context['redis_conn'] = redis_conn
    _context['response_channel'] = data['broker_response_channel']
    _context['job_id'] = job.id
    _context['data_sequence_no'] = data['data_sequence_no']
    _context['api_key'] = data['extra_params']['api_key']

    # Register Job Running event
    _update_job_event(
        _context, job_running_template(_context['data_sequence_no'], job.id))
    result = {}
    try:
        if data["function_name"] == "grade_submission":
            # Run the job
            answer_file_path = config.answer_file_path

            result = _submit(data["data"], answer_file_path, _context)
            # Register Job Complete event
            _update_job_event(
                _context,
                job_info_template(
                    _context,
                    "Scores Submitted Successfully ! Mean Absolute Percentage Error (MAPE) : %s"
                    % (str(result['score']))))
            _update_job_event(_context,
                              job_complete_template(_context, result))
        else:
            _error_object = job_error_template(
                job.id, "Function not implemented error")
            _update_job_event(_context, job_error_template(job.id, result))
            result = _error_object
    except Exception as e:
        _error_object = job_error_template(_context['data_sequence_no'],
                                           job.id, str(e))
        _update_job_event(_context, _error_object)
    return result
Beispiel #5
0
def split_midi_into_chunks(_context, midifile, track_length, target_directory):
    cumulative_ticks = 0
    cumulative_time = 0
    relative_time = 0
    track_index = 0
    SPLITS = []
    for _ in range(config.MIDI_NUM_SPLITS):
        SPLITS.append(mido.MidiTrack())

    for _message in midifile.tracks[0]:
        cumulative_ticks += _message.time
        delta_s = mido.tick2second(
                    _message.time,
                    midifile.ticks_per_beat,
                    DEFAULT_MIDI_TEMPO
                    )

        if random.randint(0, 100) < 5:
            """
                Report progress with a probability of 5%
            """
            progress_step_offset = 0
            progress_step_weight = 0.33
            percent_complete = (cumulative_time * 1.0 / track_length) * 100
            update_progress(
                _context,
                (progress_step_offset + (progress_step_weight * percent_complete))
                )

        cumulative_time += delta_s
        relative_time += delta_s
        if not _message.is_meta:
            SPLITS[track_index].append(_message.copy())
        else:
            """
            Ignore all meta messages
            """
            pass

        if relative_time >= (track_length*1.0/config.MIDI_NUM_SPLITS):
            relative_time = 0
            track_index += 1

    _update_job_event(
        _context,
        job_info_template(
            _context, "Saving split chunks..."))

    split_file_paths = []
    split_length_sum = 0
    # Write split files into target_directory
    for _idx, _track in enumerate(SPLITS):
        _m = mido.MidiFile()
        _m.ticks_per_beat = midifile.ticks_per_beat
        _m.tracks.append(_track)
        split_length_sum += _m.length
        target_file_path = "{}/{}.midi".format(
            target_directory,
            str(uuid.uuid4())
            )
        _m.save(target_file_path)
        split_file_paths.append(target_file_path)
        progress_step_offset = 33
        progress_step_weight = 0.25
        percent_complete = (_idx * 1.0 / len(SPLITS)) * 100
        update_progress(
            _context,
            (progress_step_offset + (progress_step_weight * percent_complete))
            )


    return split_file_paths
Beispiel #6
0
def post_process_midi(_context, redis_pool, filekey):
    """
        Helper file to post process midi file
    """
    local_directory_path, local_filepath = download_midi(_context, filekey)

    """
    # Load and validate midi file
    """
    _update_job_event(
        _context,
        job_info_template(
            _context, "Validating MIDI file..."))

    midi, track_length = load_and_validate_midi(_context, local_filepath)

    _update_job_event(
        _context,
        job_info_template(
            _context, "MIDI file validated..."))

    """
    # Split midi file into NUMBER_OF_PARTS (180)
    """
    _update_job_event(
        _context,
        job_info_template(
            _context, "Splitting file into 120 chunks of ~30 seconds each..."))

    split_file_paths = split_midi_into_chunks(
        _context,
        midi,
        track_length,
        local_directory_path
        )

    """
    # Convert midi files to dataURI
    """
    _update_job_event(
        _context,
        job_info_template(
            _context, "Encoding individual chunks..."))
    pruned_filekey = filekey.split("/")[-1]
    converted_filekeys = convert_midi_files_to_json(
                    _context,
                    [local_filepath]+split_file_paths,
                    pruned_filekey
                    )

    """
    # Upload to target directory on S3
    """
    _update_job_event(
        _context,
        job_info_template(
            _context, "Saving encoded chunks..."))
    upload_processed_files_to_s3(_context, local_directory_path, pruned_filekey)

    """
    # Clean up
    """
    _update_job_event(
        _context,
        job_info_template(
            _context, "Cleaning up..."))

    shutil.rmtree(local_directory_path)
    # Add relevant entries in redis queues

    return pruned_filekey, converted_filekeys
def _submit(client_payload, answer_file_path, context):
    """
        takes a list of predicted heights and actual heights
        and computes the score
        and prepares the plots for submission to the leaderboard
    """
    file_key = client_payload["file_key"]
    _update_job_event(
        context,
        job_info_template(
            context, "Grading Submission....")
    )

    if "round" not in client_payload.keys():
        raise Exception("""
        The round parameter has not been specified. Please upgrade your
        crowdai client to atleast version 1.0.21 by :
        pip install -U crowdai

        and then update your submission code by following the latest instructions
        from :
        https://github.com/crowdAI/ieee_investment_ranking_challenge-starter-kit#submission-of-predicted-file-to-crowdai
        """)

    round_id = client_payload["round"]
    assert round_id in config.crowdai_round_id_map.keys(), \
        "Unknown Round ID Passed. Allowed values : {}".format(
            str(config.crowdai_round_id_map.keys())
        )
    crowdai_round_id = config.crowdai_round_id_map[round_id]

    _payload = {}
    _meta = {}
    _meta['file_key'] = file_key
    _payload["meta"] = json.dumps(_meta)
    _payload["challenge_round_id"] = crowdai_round_id
    submission_id = report_to_crowdai(
                    context,
                    _payload,
                    submission_id=False,
                    status='submitted')
    print("Submission id : ", submission_id)
    try:
        localfilepath = download_file(context, file_key)
        _client_payload = {}
        _client_payload["submission_file_path"] = localfilepath

        _result_object = config.evaluator._evaluate(
            client_payload=_client_payload,
            round_indicator=round_id,
            _context=context)
        print _result_object
        _payload = _result_object
        report_to_crowdai(
                        context,
                        _payload,
                        submission_id=submission_id,
                        message = "graded successfully",
                        status='graded')
        # Clean up file if possible
        os.remove(localfilepath)
        return _result_object
    except Exception as e:
        # Report to crowdAI
        if "meta" in _payload.keys():
            del _payload["meta"]
        report_to_crowdai(
                        context,
                        _payload,
                        submission_id=submission_id,
                        status='failed',
                        message=str(e)
                        )
        # raise the exception again
        # so that it can be handled further down the chain
        raise e