Пример #1
0
def start(username):

    # register start time for this user
    utils_users.get_experiment_start_time(username, 'pairwise')

    # get the starting points from the acquirer
    job1, job2 = utils_jobs.get_next_start_jobs(username)

    # transform the two jobs into dictionaries
    job1 = utils_jobs.job_array_to_job_dict(job1)
    job2 = utils_jobs.job_array_to_job_dict(job2)

    return render_template("query_pairwise_jobs.html", username=username, job1=job1, job2=job2, side_clicked=-1)
Пример #2
0
def start(username):

    # get the start time for this user
    utils_users.get_experiment_start_time(username, 'ranking')

    # get the dataset for this user
    user_dataset = utils_users.get_gp_dataset(
        username, 'ranking', num_objectives=specs_jobs.NUM_OBJECTIVES)

    # if no data has been collected yet, we only display two starting jobs
    if user_dataset.comparisons.shape[0] == 0:

        # delete any datapoint in the user's dataset (in case experiment was aborted)
        user_dataset.datapoints = np.empty((0, specs_jobs.NUM_OBJECTIVES))

        # get the starting points from the acquirer
        job1, job2 = utils_jobs.get_next_start_jobs(username)

        # add jobs to dataset of user
        job1_idx = user_dataset._add_single_datapoint(job1)
        job2_idx = user_dataset._add_single_datapoint(job2)

        # save dataset
        utils_users.update_gp_dataset(username, user_dataset, 'ranking')

        # convert into displayable format
        job1 = utils_jobs.job_array_to_job_dict(job1)
        job2 = utils_jobs.job_array_to_job_dict(job2)

        # add ID to the above dictionaries (equals the index in the dataset
        job1['ID'] = job1_idx
        job2['ID'] = job2_idx

        # put jobs we want to display in the respective lists
        jobs_unranked = [job1, job2]
        jobs_ranked = []

    # otherwise, we show the previous ranking and pick a new point according to that
    else:

        # intialise the GP
        gp = GPPairwise(num_objectives=specs_jobs.NUM_OBJECTIVES,
                        seed=specs_jobs.SEED)

        # initialise acquirer
        acquirer = DiscreteAcquirer(input_domain=utils_jobs.get_jobs(),
                                    query_type='clustering',
                                    seed=specs_jobs.SEED)

        # add collected datapoints to acquirer
        acquirer.history = user_dataset.datapoints
        # add collected datapoints to GP
        gp.update(user_dataset)

        # let acquirer pick new point
        job_new = acquirer.get_next_point(gp, user_dataset)

        # add that point to the dataset and save
        job_new_idx = user_dataset._add_single_datapoint(job_new)
        utils_users.update_gp_dataset(username, user_dataset, 'ranking')

        # convert job to dictionary
        job_new = utils_jobs.job_array_to_job_dict(job_new)

        # add the ID
        job_new['ID'] = job_new_idx

        # put into list of jobs that need to be ranked
        jobs_unranked = [job_new]

        # get ranking so far
        ranking = utils_users.get_ranking(username)
        # get the job information from that ranking and convert to dictionaries
        jobs_ranked = user_dataset.datapoints[ranking]
        jobs_ranked = [
            utils_jobs.job_array_to_job_dict(job) for job in jobs_ranked
        ]
        # add the IDs
        for i in range(len(ranking)):
            jobs_ranked[i]['ID'] = ranking[i]

    return render_template("query_ranking_jobs.html",
                           username=username,
                           jobs_unranked=jobs_unranked,
                           jobs_ranked=jobs_ranked)