def _test_application_handler(task_name):
    test = True
    (task_param, base_dataset, usual_batch_size,
     al_params, n_jobs, no_repetitions) = get_application_config(task_name)
    al_params.annotationBudget = al_params.startingSize+usual_batch_size

    # define application handler
    agent_params = ALAgentParameters(agent_name="Random", batch_size_annotation=usual_batch_size, batch_size_agent=-1)
    application_handler = ApplicationHandler(task_param, al_params, agent_params)

    # define lists
    task_param_list = [task_param]
    agent_param_list = [agent_params]

    # define file handler for saving the results
    filename = f"../pytests/tests_application_handlers/applicationHandler_test_{task_name}.json"
    file_handler = ApplicationHandlerFileHandlerJSON(filename)

    # run the experiment
    with ParallelRunHandler(task_param_list[0].get_experiment_filename(), n_jobs=1, test=True, save_results=False,
                            parallelization=False) as parallel_run_handler:
        finished_application_handlers = parallel_run_handler.al_apply_agents_on_task(
            task_param_list, al_params, agent_param_list,
        )

    # save the results
    file_handler.write_application_handlers_to_file(finished_application_handlers)

    # plot the results
    ApplicationHandlerFileHandlerJSON(filename).plot_all_content_with_confidence_intervals(plot_really=False)
예제 #2
0
def _get_test_parameters():
    test_cases = []
    for agent_name in [
            "Random", "Uncertainty", "Diversity", "Uncertainty_Diversity",
            "Representative", "Ensemble"
    ]:
        for batch_size_annotation in [1, 8]:
            for batch_size_agent in [1, 3, -1]:
                name = f'{agent_name}_{batch_size_annotation}_{batch_size_agent}'
                agent_params = ALAgentParameters(
                    agent_name=agent_name,
                    batch_size_annotation=batch_size_annotation,
                    batch_size_agent=batch_size_agent)
                test_case = pytest.param(agent_params, id=name)
                test_cases.append(test_case)
    return test_cases
def _test_ensemble_training(task_name):
    starting_size = 8
    annotation_budget = 16
    batch_size_annotation = -1
    n_jobs = 2
    max_evals = 2

    algo = [hp.atpe.suggest, hp.tpe.suggest, hp.rand.suggest][0]

    task_param_list = [TaskParameters(task_name)]

    al_params = ALParameters(annotation_budget=annotation_budget, starting_size=starting_size)
    agent_param = ALAgentParameters(agent_name="Ensemble", batch_size_annotation=batch_size_annotation)

    train_ensemble_with_hyperopt(algo, task_param_list, n_jobs, al_params,
                                 agent_param, max_evals, verbose=False)

    print('blub')
    def define_agents_dict(self):
        # must import here to prevent cyclic imports
        from AL_agents.al_agent_parameters import ALAgentParameters

        self.agents_dict = dict()
        for agentName in self.beta_dict.keys():
            agent_params = ALAgentParameters(
                agent_name=agentName,
                batch_size_annotation=self.al_agent_parameters.
                batch_size_annotation,
                batch_size_agent=self.al_agent_parameters.batch_size_agent)
            agent = agent_params.create_agent()
            self.agents_dict[agentName] = agent

        agent_random_params = ALAgentParameters(
            "Random", self.al_agent_parameters.batch_size_annotation,
            self.al_agent_parameters.batch_size_agent)
        self.agent_random = agent_random_params.create_agent()
예제 #5
0
def _apply_single_heuristic(agent_parameters: ALAgentParameters):
    agent = agent_parameters.create_agent()

    al_parameters = ALParameters(starting_size=8, annotation_budget=16)
    al_parameters.batch_size_annotation = 4
    task_params = TaskParameters(task_name="model_checkerboard",
                                 dataset="2x2_rotated")
    task = task_params.create_task()

    al_env = ALEnvironment(al_parameters, task)

    # run AL with random sampling
    iteration = al_parameters.startingSize
    expectedNoIterations = al_env.expected_number_iterations()
    # print("Starting random AL with %d iterations" % expectedNoIterations)

    observation = al_env.reset()
    for i in range(expectedNoIterations):
        action = agent.policy(observation)
        observation, reward, done, info = al_env.step(action)
        # print('iteration %d: accuracy %.4f' % (iteration, info['accuracy']))
        iteration += 1
        if done:
            break
예제 #6
0
task_name = ["model_UCI", "model_checkerboard", "model_Vision",
             "model_bAbI"][2]
test = False
delete_old_ensemble_data = False
only_last_agent = False
only_ensembles_with_batch_sizes = True

# define task
(task_param, base_dataset, usual_batch_size, al_params, n_jobs,
 no_repetitions) = get_application_config(task_name)

# define agents to apply on task
agent_param_list = list()
# agentParams.append(AL_Agent_Parameters(agentName="Uncertainty", batch_size_annotation=1))
agent_param_list.append(
    ALAgentParameters(agent_name="Random",
                      batch_size_annotation=usual_batch_size))
agent_param_list.append(
    ALAgentParameters(agent_name="Uncertainty",
                      batch_size_annotation=usual_batch_size))
agent_param_list.append(
    ALAgentParameters(agent_name="Diversity",
                      batch_size_annotation=usual_batch_size))
agent_param_list.append(
    ALAgentParameters(agent_name="Uncertainty_Diversity",
                      batch_size_annotation=usual_batch_size))
agent_param_list.append(
    ALAgentParameters(agent_name="Representative",
                      batch_size_annotation=usual_batch_size))
beta_dict = BetaDictHandler(task_name).beta_dict
agent_param_list.append(
    ALAgentParameters(agent_name="Ensemble",
예제 #7
0
runs_per_objective_function = 2

max_evals = 100

algo = [hp.atpe.suggest, hp.tpe.suggest, hp.rand.suggest][1]

'''
Parameters for monte carlo simulation
'''
training_task = ['UCI', 'checkerboard', 'MNIST', 'bAbI'][2]
task_param_list = []
if training_task == 'UCI':
    uci_datasets = ['2-breast_cancer', '3-diabetis', '4-flare_solar',
                    '5-german', '6-heart', '7-mushrooms', '8-waveform', '9-wdbc']
    for uci_dataset in uci_datasets:
        task_param_list += [TaskParameters(task_name="model_UCI", dataset=uci_dataset)]
if training_task == 'checkerboard':
    task_param_list += [TaskParameters(task_name="model_checkerboard", dataset="2x2")]
    task_param_list += [TaskParameters(task_name="model_checkerboard", dataset="2x2_rotated")]
if training_task == "MNIST":
    task_param_list += [TaskParameters(task_name="model_Vision", dataset='MNIST')]
if training_task == 'bAbI':
    task_param_list += [TaskParameters(task_name="model_bAbI", dataset='single_supporting_fact')]

task_names = list([task_param.__short_repr__() for task_param in task_param_list])
task_param_list *= int(max(1, runs_per_objective_function / len(task_param_list)))
al_params = ALParameters(annotation_budget=annotation_budget, starting_size=starting_size)
agent_param = ALAgentParameters(agent_name="Ensemble", batch_size_annotation=batch_size_annotation)

train_ensemble_with_hyperopt(algo, task_param_list, n_jobs, al_params, agent_param, max_evals, parallelization=False)