예제 #1
0
def _evaluate_genome_parallel(genome: Genome,
                              loss,
                              beta_type,
                              problem_type,
                              is_testing,
                              batch_size=10000,
                              n_samples=10,
                              is_gpu=False):
    '''
    Calculates: KL-Div(q(w)||p(w|D))
    Uses the VariationalInferenceLoss class (not the alternative)
    '''

    # kl_posterior = 0
    #
    # kl_qw_pw = compute_kl_qw_pw(genome=genome)

    # setup network
    network = ComplexStochasticNetwork(genome=genome)
    if is_gpu:
        network.cuda()
    m = math.ceil(len(dataset.x) / batch_size)
    network.eval()

    # calculate Data log-likelihood (p(y*|x*,D))
    if is_testing:
        x_batch, y_batch = dataset.x_test, dataset.y_test
    else:
        x_batch, y_batch = dataset.x_train, dataset.y_train
    x_batch, y_batch = _prepare_batch_data(x_batch=x_batch,
                                           y_batch=y_batch,
                                           problem_type=problem_type,
                                           is_gpu=is_gpu,
                                           n_input=genome.n_input,
                                           n_output=genome.n_output,
                                           n_samples=n_samples)

    with torch.no_grad():
        # forward pass
        output, kl_qw_pw = network(x_batch)
        output, _, y_batch = _process_output_data(output,
                                                  y_true=y_batch,
                                                  n_samples=n_samples,
                                                  n_output=genome.n_output,
                                                  problem_type=problem_type,
                                                  is_pass=True)
        beta = get_beta(beta_type=beta_type,
                        m=m,
                        batch_idx=0,
                        epoch=1,
                        n_epochs=1)
        kl_posterior = loss(y_pred=output,
                            y_true=y_batch,
                            kl_qw_pw=kl_qw_pw,
                            beta=beta)

    loss_value = kl_posterior.item()
    return loss_value
예제 #2
0
    def run(self) -> None:
        '''
            Calculates: KL-Div(q(w)||p(w|D))
            Uses the VariationalInferenceLoss class (not the alternative)
            '''
        # from experiments.multiprocessing_utils import ForkedPdb; ForkedPdb().set_trace()
        kl_posterior = 0

        kl_qw_pw = compute_kl_qw_pw(genome=self.genome)

        # setup network
        network = ComplexStochasticNetwork(genome=self.genome)

        # m = math.ceil(len(self.dataset) / self.batch_size)
        m = math.ceil(len(self.x) / self.batch_size)

        network.eval()

        # calculate Data log-likelihood (p(y*|x*,D))
        # x_batch, y_batch = self.dataset.x, self.dataset.y
        x_batch, y_batch = self.x, self.y
        x_batch, y_batch = _prepare_batch_data(x_batch=x_batch,
                                               y_batch=y_batch,
                                               problem_type=self.problem_type,
                                               is_gpu=self.is_gpu,
                                               n_input=self.genome.n_input,
                                               n_output=self.genome.n_output,
                                               n_samples=self.n_samples)
        print('running forward pass')
        with torch.no_grad():
            # forward pass
            output, _ = network(x_batch)
            print('forward pass completed')
            # print(self.config.beta_type)
            beta = get_beta(beta_type=self.beta_type,
                            m=m,
                            batch_idx=0,
                            epoch=1,
                            n_epochs=1)
            # print(f'Beta: {beta}')
            kl_posterior += self.loss(y_pred=output,
                                      y_true=y_batch,
                                      kl_qw_pw=kl_qw_pw,
                                      beta=beta)

        loss_value = kl_posterior.item()
        self.result = (self.genome.key, loss_value)
예제 #3
0
def evaluate_genome(genome: Genome, loss, beta_type, problem_type,
                    batch_size=10000, n_samples=10, is_gpu=False):
    '''
    Calculates: KL-Div(q(w)||p(w|D))
    Uses the VariationalInferenceLoss class (not the alternative)
    # '''
    # dataset = get_dataset(genome.genome_config.dataset, testing=True)
    # dataset.generate_data()
    kl_posterior = 0

    kl_qw_pw = compute_kl_qw_pw(genome=genome)

    # setup network
    network = ComplexStochasticNetwork(genome=genome)
    if is_gpu:
        network.cuda()
    m = math.ceil(len(dataset.x) / batch_size)

    network.eval()

    # calculate Data log-likelihood (p(y*|x*,D))
    x_batch, y_batch = dataset.x, dataset.y
    x_batch, y_batch = _prepare_batch_data(x_batch=x_batch,
                                           y_batch=y_batch,
                                           problem_type=problem_type,
                                           is_gpu=is_gpu,
                                           n_input=genome.n_input,
                                           n_output=genome.n_output,
                                           n_samples=n_samples)

    if is_gpu:
        x_batch, y_batch = x_batch.cuda(), y_batch.cuda()

    with torch.no_grad():
        # forward pass
        output, _ = network(x_batch)
        # print(self.config.beta_type)
        beta = get_beta(beta_type=beta_type, m=m, batch_idx=0, epoch=1, n_epochs=1)
        # print(f'Beta: {beta}')
        kl_posterior += loss(y_pred=output, y_true=y_batch, kl_qw_pw=kl_qw_pw, beta=beta)

    loss_value = kl_posterior.item()
    return loss_value
예제 #4
0
def evaluate_genome_with_dataloader(genome: Genome,
                                    data_loader,
                                    loss,
                                    beta_type,
                                    problem_type,
                                    batch_size=10000,
                                    n_samples=10,
                                    is_gpu=False,
                                    return_all=False):
    '''
    Calculates: KL-Div(q(w)||p(w|D))
    Uses the VariationalInferenceLoss class (not the alternative)
    '''
    kl_posterior = 0

    kl_qw_pw = compute_kl_qw_pw(genome=genome)

    # setup network
    network = ComplexStochasticNetwork(genome=genome)
    if is_gpu:
        network.cuda()
    m = math.ceil(len(data_loader) / batch_size)
    network.eval()

    chunks_x = []
    chunks_y_pred = []
    chunks_y_true = []

    # calculate Data log-likelihood (p(y*|x*,D))
    for batch_idx, (x_batch, y_batch) in enumerate(data_loader):
        x_batch, y_batch = _prepare_batch_data(x_batch=x_batch,
                                               y_batch=y_batch,
                                               problem_type=problem_type,
                                               is_gpu=is_gpu,
                                               n_input=genome.n_input,
                                               n_output=genome.n_output,
                                               n_samples=n_samples)

        with torch.no_grad():
            # forward pass
            output, _ = network(x_batch)
            beta = get_beta(beta_type=beta_type,
                            m=m,
                            batch_idx=batch_idx,
                            epoch=1,
                            n_epochs=1)
            kl_posterior += loss(y_pred=output,
                                 y_true=y_batch,
                                 kl_qw_pw=kl_qw_pw,
                                 beta=beta)
            if return_all:
                chunks_x.append(x_batch)
                chunks_y_pred.append(output)
                chunks_y_true.append(y_batch)

    loss_value = kl_posterior.item()

    if return_all:
        x = torch.cat(chunks_x, dim=0)
        y_pred = torch.cat(chunks_y_pred, dim=0)
        y_true = torch.cat(chunks_y_true, dim=0)
        return x, y_true, y_pred, loss_value
    return loss_value