Exemple #1
0
def convert_stochastic_network_to_genome(network: ComplexStochasticNetwork, original_genome: Genome = None, fitness=None,
                                         fix_std=True) -> Genome:
    if original_genome is None:
        raise ValueError('Not implemented')

    genome = original_genome.copy()

    for layer_index in range(network.n_layers):
        stochastic_linear_layer = getattr(network, f'layer_{layer_index}')
        layer = network.layers[layer_index]

        for bias_index, node_key in enumerate(layer.output_keys):
            # if node_key in genome.node_genes.keys():
            genome.node_genes[node_key].set_mean(stochastic_linear_layer.qb_mean[bias_index])
            if fix_std:
                bias_logvar = DEFAULT_LOGVAR
            else:
                bias_logvar = stochastic_linear_layer.qb_logvar[bias_index]
            genome.node_genes[node_key].set_log_var(bias_logvar)

        for connection_input_index, input_node_key in enumerate(layer.input_keys):
            for connection_output_index, output_node_key in enumerate(layer.output_keys):
                connection_key = (input_node_key, output_node_key)
                mean = stochastic_linear_layer.qw_mean[connection_output_index, connection_input_index]
                if fix_std:
                    weight_logvar = DEFAULT_LOGVAR
                else:
                    weight_logvar = stochastic_linear_layer.qw_logvar[connection_output_index, connection_input_index]
                if connection_key in genome.connection_genes.keys():
                    genome.connection_genes[connection_key].set_mean(mean)
                    genome.connection_genes[connection_key].set_log_var(weight_logvar)

    genome.fitness = fitness

    return genome
Exemple #2
0
pool = Pool(processes=N_PROCESSES, initializer=process_initialization, initargs=(config.dataset, True))
for genome in genomes:
    logger.debug(f'Genome {genome.key}: {genome.get_graph()}')

    # x = torch.zeros(2, 784).float()
    # y = torch.zeros(2).long()
    # x=dataset.x.clone().detach()
    # y=dataset.y.clone().detach()
    loss = get_loss('classification')
    beta_type = 'other'
    problem_type = 'classification'
    batch_size = 100000
    n_samples = 20
    is_gpu = False
    x = (genome.copy(),
         # x, y,
         loss, beta_type, problem_type,
         batch_size, n_samples, is_gpu)

    tasks.append(x)

# TODO: fix logging when using multiprocessing. Easy fix is to disable
fitnesses = list(pool.imap(evaluate_genome_parallel, tasks, chunksize=max([len(genomes)//N_PROCESSES, 1])))

pool.close()
for i, genome in enumerate(genomes):
    print(fitnesses[i])
    genome.fitness = fitnesses[i]

print('finished')
Exemple #3
0
exit_queue = manager.Queue()
exception_queue = manager.Queue()
results_queue = manager.Queue()


workers = []
for i in range(N_PROCESSES):
    worker = Worker(task_queue=task_queue,
                    exit_queue=exit_queue,
                    exception_queue=exception_queue,
                    results_queue=results_queue)
    worker.start()
    workers.append(worker)

for genome in genomes:
    task_queue.put(Task(genome=genome.copy(), dataset=None,
                        x=torch.zeros((2, 784)).float(),
                        y=torch.zeros(2).long(),
                        # x=self.dataset.x.clone().detach(),
                        # y=self.dataset.y.clone().detach(),
                        loss=get_loss('classification'),
                        beta_type='other',
                        problem_type='classification',
                        batch_size=100000,
                        n_samples=20,
                        is_gpu=False))

while not task_queue.empty():
    print('reading results from workers')
    print(task_queue.qsize())
    # for worker in workers: