Esempio n. 1
0
def map_comparisons(sigmas=None):
    """Creates a plot comparing various choices of value maps for the crowd analysis case."""
    if sigmas is None:
        sigmas = [0.4, 1, 2, 4, 6, 8]
    sns.set_style('darkgrid')
    figure, axes = plt.subplots(dpi=dpi)
    x_axis = np.arange(0, 4, 0.001)
    normals = []
    for sigma in sigmas:
        normal_ = norm(0, sigma)
        normals.append(normal_)
    mixture = MixtureModel(normals)
    axes.plot(x_axis,
              mixture.pdf(x_axis) / mixture.pdf(x_axis).max(),
              color=sns.color_palette()[0])
    axes.plot(x_axis,
              normals[0].pdf(x_axis) / (normals[0].pdf(x_axis).max()),
              color=sns.color_palette()[1])

    axes.plot(x_axis,
              normals[-1].pdf(x_axis) / (normals[-1].pdf(x_axis).max()),
              color=sns.color_palette()[1])
    axes.plot(x_axis, (1 / (x_axis + 1)) / (1 / (x_axis + 1)).max(),
              color=sns.color_palette()[2])
    axes.set_ylabel('Map value')
    axes.set_xlabel('Distance from head position')
    matplotlib2tikz.save(os.path.join('latex', 'mapcomparisons.tex'))
    plt.show()
    plt.close(figure)
def generate_double_a2_a3_a4_coefficients(number_of_examples):
    """Generates coefficients with a double uniform distribution for a2, a3, and a4."""
    a2_distribution = MixtureModel([uniform(-2, 1), uniform(1, 1)])
    a2 = a2_distribution.rvs(
        size=[number_of_examples, irrelevant_data_multiplier, 1]).astype(
            dtype=np.float32)
    a3_distribution = MixtureModel([uniform(-2, 1), uniform(1, 1)])
    a3 = a3_distribution.rvs(
        size=[number_of_examples, irrelevant_data_multiplier, 1]).astype(
            dtype=np.float32)
    a4_distribution = MixtureModel([uniform(-2, 1), uniform(1, 1)])
    a4 = a4_distribution.rvs(
        size=[number_of_examples, irrelevant_data_multiplier, 1]).astype(
            dtype=np.float32)
    return a2, a3, a4
Esempio n. 3
0
 def validation_summaries(self, step):
     """Prepares the summaries that should be run for the given application."""
     settings = self.settings
     dnn_summary_writer = self.dnn_summary_writer
     gan_summary_writer = self.gan_summary_writer
     DNN = self.DNN
     D = self.D
     G = self.G
     train_dataset = self.train_dataset
     validation_dataset = self.validation_dataset
     unlabeled_dataset = self.unlabeled_dataset
     dnn_predicted_train_labels = DNN(torch.tensor(
         train_dataset.examples.astype(np.float32)).to(gpu))[0].to('cpu').detach().numpy()
     dnn_train_label_errors = np.mean(np.abs(dnn_predicted_train_labels - train_dataset.labels))
     dnn_summary_writer.add_scalar('2 Train Error/MAE', dnn_train_label_errors, )
     dnn_predicted_validation_labels = DNN(torch.tensor(
         validation_dataset.examples.astype(np.float32)).to(gpu))[0].to('cpu').detach().numpy()
     dnn_validation_label_errors = np.mean(np.abs(dnn_predicted_validation_labels - validation_dataset.labels))
     dnn_summary_writer.add_scalar('1 Validation Error/MAE', dnn_validation_label_errors, )
     predicted_train_labels = D(torch.tensor(
         train_dataset.examples.astype(np.float32)).to(gpu))[0].to('cpu').detach().numpy()
     gan_train_label_errors = np.mean(np.abs(predicted_train_labels - train_dataset.labels))
     gan_summary_writer.add_scalar('2 Train Error/MAE', gan_train_label_errors, )
     predicted_validation_labels = D(torch.tensor(
         validation_dataset.examples.astype(np.float32)).to(gpu))[0].to('cpu').detach().numpy()
     gan_validation_label_errors = np.mean(np.abs(predicted_validation_labels - validation_dataset.labels))
     gan_summary_writer.add_scalar('1 Validation Error/MAE', gan_validation_label_errors, )
     gan_summary_writer.add_scalar('1 Validation Error/Ratio MAE GAN DNN',
                                   gan_validation_label_errors / dnn_validation_label_errors, )
     z = torch.tensor(MixtureModel([norm(-settings.mean_offset, 1), norm(settings.mean_offset, 1)]).rvs(
         size=[settings.batch_size, G.input_size]).astype(np.float32)).to(gpu)
     fake_examples = G(z, add_noise=False)
     fake_examples_array = fake_examples.to('cpu').detach().numpy()
     fake_predicted_labels = D(fake_examples)[0]
     fake_predicted_labels_array = fake_predicted_labels.to('cpu').detach().numpy()
     unlabeled_labels_array = unlabeled_dataset.labels[:settings.validation_dataset_size]
     label_wasserstein_distance = wasserstein_distance(fake_predicted_labels_array, unlabeled_labels_array)
     gan_summary_writer.add_scalar('Generator/Predicted Label Wasserstein', label_wasserstein_distance, )
     unlabeled_examples_array = unlabeled_dataset.examples[:settings.validation_dataset_size]
     unlabeled_examples = torch.tensor(unlabeled_examples_array.astype(np.float32)).to(gpu)
     unlabeled_predictions = D(unlabeled_examples)[0]
     if dnn_summary_writer.step % settings.summary_step_period == 0:
         unlabeled_predictions_array = unlabeled_predictions.to('cpu').detach().numpy()
         validation_predictions_array = predicted_validation_labels
         train_predictions_array = predicted_train_labels
         dnn_validation_predictions_array = dnn_predicted_validation_labels
         dnn_train_predictions_array = dnn_predicted_train_labels
         distribution_image = generate_display_frame(fake_examples_array, unlabeled_predictions_array,
                                                     validation_predictions_array, dnn_validation_predictions_array,
                                                     train_predictions_array, dnn_train_predictions_array, step)
         distribution_image = standard_image_format_to_tensorboard_image_format(distribution_image)
         gan_summary_writer.add_image('Distributions', distribution_image)
Esempio n. 4
0
    def validation_summaries(self, step):
        """Prepares the summaries that should be run for the given application."""
        settings = self.settings
        dnn_summary_writer = self.dnn_summary_writer
        gan_summary_writer = self.gan_summary_writer
        DNN = self.DNN
        D = self.D
        G = self.G
        train_dataset = self.train_dataset
        validation_dataset = self.validation_dataset

        # DNN training evaluation.
        self.evaluation_epoch(settings, DNN, train_dataset, dnn_summary_writer, '2 Train Error')
        # DNN validation evaluation.
        dnn_validation_count_mae = self.evaluation_epoch(settings, DNN, validation_dataset, dnn_summary_writer,
                                                         '1 Validation Error')
        # GAN training evaluation.
        self.evaluation_epoch(settings, D, train_dataset, gan_summary_writer, '2 Train Error')
        # GAN validation evaluation.
        self.evaluation_epoch(settings, D, validation_dataset, gan_summary_writer, '1 Validation Error',
                              comparison_value=dnn_validation_count_mae)
        # Real images.
        train_iterator = iter(DataLoader(train_dataset, batch_size=settings.batch_size))
        examples, densities = next(train_iterator)
        predicted_densities, _ = D(examples)
        real_comparison_image = self.create_crowd_images_comparison_grid(examples, densities,
                                                                         predicted_densities)
        gan_summary_writer.add_image('Real', real_comparison_image)
        dnn_predicted_densities, _ = DNN(examples)
        dnn_real_comparison_image = self.create_crowd_images_comparison_grid(examples, densities,
                                                                             dnn_predicted_densities)
        dnn_summary_writer.add_image('Real', dnn_real_comparison_image)
        validation_iterator = iter(DataLoader(train_dataset, batch_size=settings.batch_size))
        examples, densities = next(validation_iterator)
        predicted_densities, _ = D(examples)
        validation_comparison_image = self.create_crowd_images_comparison_grid(examples, densities,
                                                                               predicted_densities)
        gan_summary_writer.add_image('Validation', validation_comparison_image)
        dnn_predicted_densities, _ = DNN(examples)
        dnn_validation_comparison_image = self.create_crowd_images_comparison_grid(examples, densities,
                                                                                   dnn_predicted_densities)
        dnn_summary_writer.add_image('Validation', dnn_validation_comparison_image)
        # Generated images.
        z = torch.randn(settings.batch_size, G.input_size)
        fake_examples = G(z)
        fake_images_image = torchvision.utils.make_grid(to_image_range(fake_examples.data[:9]), nrow=3)
        gan_summary_writer.add_image('Fake/Standard', fake_images_image.numpy().transpose([1, 2, 0]).astype(np.uint8))
        z = torch.from_numpy(MixtureModel([norm(-settings.mean_offset, 1), norm(settings.mean_offset, 1)]
                                         ).rvs(size=[settings.batch_size, G.input_size]).astype(np.float32))
        fake_examples = G(z)
        fake_images_image = torchvision.utils.make_grid(to_image_range(fake_examples.data[:9]), nrow=3)
        gan_summary_writer.add_image('Fake/Offset', fake_images_image.numpy().transpose([1, 2, 0]).astype(np.uint8))
Esempio n. 5
0
def generate_single_peak_double_peak(mean_offset=3):
    """Creates a display of a single peak normal distribution surrounded by a double peak one."""
    sns.set_style('darkgrid')
    figure, axes = plt.subplots(dpi=dpi)
    x_axis = np.arange(-5, 5, 0.001)
    axes.plot(x_axis, norm(0, 1).pdf(x_axis), color=sns.color_palette()[0])
    axes.plot(x_axis,
              MixtureModel([norm(-mean_offset, 1),
                            norm(mean_offset, 1)]).pdf(x_axis),
              color=sns.color_palette()[1],
              label='HHZ 1')
    matplotlib2tikz.save(os.path.join('latex', 'single_peak_double_peak.tex'))
    plt.show()
    plt.close(figure)
def generate_display_frame(fake_examples, unlabeled_predictions, test_predictions, dnn_test_predictions,
                           train_predictions, dnn_train_predictions, step):
    """Generates an image of the distribution predictions during training."""
    sns.set_style('darkgrid')
    bandwidth = 0.1
    fake_a3 = np.transpose(np.polyfit(np.linspace(-1, 1, num=10), np.transpose(fake_examples[:, :10]), 3))
    x_axis_limits = [-4, 4]
    x_axis = np.arange(*x_axis_limits, 0.001)
    figure, axes = plt.subplots(dpi=dpi)
    axes.text(0.98, 0.98, 'Step: {}'.format(step), horizontalalignment='right', verticalalignment='top',
              family='monospace', fontsize=10, transform=axes.transAxes)
    axes.plot(x_axis, MixtureModel([uniform(-2, 1), uniform(1, 1)]).pdf(x_axis), color=sns.color_palette()[0],
              label='Real Data Distribution')
    try:
        axes = sns.kdeplot(fake_a3[0, :], ax=axes, color=sns.color_palette()[4], bw=bandwidth,
                           label='Fake Data Distribution')
    except ValueError:
        pass
    axes = sns.kdeplot(unlabeled_predictions, ax=axes, color=sns.color_palette()[1], bw=bandwidth,
                       label='Unlabeled Predictions')
    axes = sns.kdeplot(test_predictions, ax=axes, color=sns.color_palette()[2], bw=bandwidth,
                       label='GAN Test Predictions')
    axes = sns.kdeplot(train_predictions, ax=axes, color=sns.color_palette()[2], linewidth=0.5, bw=bandwidth,
                       label='GAN Train Predictions')
    axes = sns.kdeplot(dnn_test_predictions, ax=axes, color=sns.color_palette()[3], bw=bandwidth,
                       label='DNN Test Predictions')
    axes = sns.kdeplot(dnn_train_predictions, ax=axes, color=sns.color_palette()[3], linewidth=0.5, bw=bandwidth,
                       label='DNN Train Predictions')
    axes.set_xlim(*x_axis_limits)
    axes.set_ylim(0, 1)
    axes.legend(loc='upper left')
    figure.tight_layout(pad=0)
    figure.canvas.draw()
    image_array = np.fromstring(figure.canvas.tostring_rgb(), dtype=np.uint8, sep='')
    image_array = image_array.reshape(figure.canvas.get_width_height()[::-1] + (3,))
    plt.close(figure)
    return image_array
Esempio n. 7
0
 def gan_training_step(self, labeled_examples, labels, unlabeled_examples,
                       step):
     """Runs an individual round of GAN training."""
     # Labeled.
     self.gan_summary_writer.step = step
     self.d_optimizer.zero_grad()
     loss = torch.tensor(0, dtype=torch.float)
     labeled_loss = self.labeled_loss_calculation(labeled_examples, labels)
     loss += labeled_loss
     # Unlabeled.
     self.D.apply(
         disable_batch_norm_updates
     )  # Make sure only labeled data is used for batch norm statistics
     unlabeled_loss = self.unlabeled_loss_calculation(unlabeled_examples)
     loss += unlabeled_loss
     # Feature regularization loss.
     if self.settings.regularize_feature_norm:
         feature_regularization_loss = torch.abs(
             self.unlabeled_features.mean(0).norm() - 1)
         loss += feature_regularization_loss
     # Fake.
     z = torch.tensor(
         MixtureModel([
             norm(-self.settings.mean_offset, 1),
             norm(self.settings.mean_offset, 1)
         ]).rvs(
             size=[unlabeled_examples.size(0), self.G.input_size]).astype(
                 np.float32)).to(gpu)
     fake_examples = self.G(z)
     fake_loss = self.fake_loss_calculation(fake_examples)
     loss += fake_loss
     # Gradient penalty.
     alpha = torch.rand(2, device=gpu)
     alpha = alpha / alpha.sum(0)
     interpolates = (
         alpha[0] * unlabeled_examples.detach().requires_grad_() +
         alpha[1] * fake_examples.detach().requires_grad_())
     interpolates_loss = self.interpolate_loss_calculation(interpolates)
     gradients = torch.autograd.grad(outputs=interpolates_loss,
                                     inputs=interpolates,
                                     grad_outputs=torch.ones_like(
                                         interpolates_loss, device=gpu),
                                     create_graph=True,
                                     only_inputs=True)[0]
     gradient_penalty = (
         (gradients.view(unlabeled_examples.size(0), -1).norm(dim=1) - 1)**
         2).mean() * self.settings.gradient_penalty_multiplier
     # Discriminator update.
     loss += gradient_penalty
     loss.backward()
     self.d_optimizer.step()
     # Generator.
     if step % self.settings.generator_training_step_period == 0:
         self.g_optimizer.zero_grad()
         z = torch.randn(unlabeled_examples.size(0),
                         self.G.input_size).to(gpu)
         fake_examples = self.G(z)
         generator_loss = self.generator_loss_calculation(
             fake_examples, unlabeled_examples)
         generator_loss.backward()
         self.g_optimizer.step()
         if self.gan_summary_writer.is_summary_step():
             self.gan_summary_writer.add_scalar('Generator/Loss',
                                                generator_loss.item())
     # Summaries.
     if self.gan_summary_writer.is_summary_step():
         self.gan_summary_writer.add_scalar('Discriminator/Labeled Loss',
                                            labeled_loss.item())
         self.gan_summary_writer.add_scalar('Discriminator/Unlabeled Loss',
                                            unlabeled_loss.item())
         self.gan_summary_writer.add_scalar('Discriminator/Fake Loss',
                                            fake_loss.item())
         if self.labeled_features is not None:
             self.gan_summary_writer.add_scalar(
                 'Feature Norm/Labeled',
                 self.labeled_features.mean(0).norm().item())
             self.gan_summary_writer.add_scalar(
                 'Feature Norm/Unlabeled',
                 self.unlabeled_features.mean(0).norm().item())
     self.D.apply(
         enable_batch_norm_updates
     )  # Make sure only labeled data is used for batch norm running statistics
Esempio n. 8
0
 def validation_summaries(self, step):
     """Prepares the summaries that should be run for the given application."""
     settings = self.settings
     dnn_summary_writer = self.dnn_summary_writer
     gan_summary_writer = self.gan_summary_writer
     DNN = self.DNN
     D = self.D
     G = self.G
     train_dataset = self.train_dataset
     validation_dataset = self.validation_dataset
     # DNN training evaluation.
     self.evaluation_epoch(settings, DNN, train_dataset, dnn_summary_writer,
                           '2 Train Error')
     # DNN validation evaluation.
     dnn_validation_mae = self.evaluation_epoch(settings, DNN,
                                                validation_dataset,
                                                dnn_summary_writer,
                                                '1 Validation Error')
     # GAN training evaluation.
     self.evaluation_epoch(settings, D, train_dataset, gan_summary_writer,
                           '2 Train Error')
     # GAN validation evaluation.
     self.evaluation_epoch(settings,
                           D,
                           validation_dataset,
                           gan_summary_writer,
                           '1 Validation Error',
                           comparison_value=dnn_validation_mae)
     # Real images.
     train_dataset_loader = DataLoader(train_dataset,
                                       batch_size=settings.batch_size,
                                       shuffle=True)
     train_iterator = iter(train_dataset_loader)
     examples, _ = next(train_iterator)
     images_image = torchvision.utils.make_grid(to_image_range(
         examples[:9]),
                                                nrow=3)
     gan_summary_writer.add_image(
         'Real',
         images_image.numpy().transpose([1, 2, 0]).astype(np.uint8),
     )
     # Generated images.
     z = torch.randn(settings.batch_size, G.input_size).to(gpu)
     fake_examples = G(z).to('cpu')
     fake_images_image = torchvision.utils.make_grid(to_image_range(
         fake_examples.data[:9]),
                                                     nrow=3)
     gan_summary_writer.add_image(
         'Fake/Standard',
         fake_images_image.numpy().transpose([1, 2, 0]).astype(np.uint8),
     )
     z = torch.from_numpy(
         MixtureModel([
             norm(-settings.mean_offset, 1),
             norm(settings.mean_offset, 1)
         ]).rvs(size=[settings.batch_size, G.input_size]).astype(
             np.float32)).to(gpu)
     fake_examples = G(z).to('cpu')
     fake_images_image = torchvision.utils.make_grid(to_image_range(
         fake_examples.data[:9]),
                                                     nrow=3)
     gan_summary_writer.add_image(
         'Fake/Offset',
         fake_images_image.numpy().transpose([1, 2, 0]).astype(np.uint8),
     )
Esempio n. 9
0
    def validation_summaries(self, step):
        """Prepares the summaries that should be run for the given application."""
        settings = self.settings
        dnn_summary_writer = self.dnn_summary_writer
        gan_summary_writer = self.gan_summary_writer
        DNN = self.DNN
        D = self.D
        G = self.G
        train_dataset = self.train_dataset
        validation_dataset = self.validation_dataset

        # DNN training evaluation.
        self.evaluation_epoch(settings,
                              DNN,
                              train_dataset,
                              dnn_summary_writer,
                              '2 Train Error',
                              shuffle=False)
        # DNN validation evaluation.
        dnn_validation_count_mae = self.evaluation_epoch(settings,
                                                         DNN,
                                                         validation_dataset,
                                                         dnn_summary_writer,
                                                         '1 Validation Error',
                                                         shuffle=False)
        # GAN training evaluation.
        self.evaluation_epoch(settings,
                              D,
                              train_dataset,
                              gan_summary_writer,
                              '2 Train Error',
                              shuffle=False)
        # GAN validation evaluation.
        self.evaluation_epoch(settings,
                              D,
                              validation_dataset,
                              gan_summary_writer,
                              '1 Validation Error',
                              comparison_value=dnn_validation_count_mae,
                              shuffle=False)
        # Real images.
        train_iterator = iter(
            DataLoader(train_dataset, batch_size=settings.batch_size))
        images, densities, maps = next(train_iterator)
        predicted_densities, _, predicted_maps = D(images.to(gpu))
        real_comparison_image = self.create_map_comparison_image(
            images, maps, predicted_maps.to('cpu'))
        gan_summary_writer.add_image('Real', real_comparison_image)
        dnn_predicted_densities, _, predicted_maps = DNN(images.to(gpu))
        dnn_real_comparison_image = self.create_map_comparison_image(
            images, maps, predicted_maps.to('cpu'))
        dnn_summary_writer.add_image('Real', dnn_real_comparison_image)
        validation_iterator = iter(
            DataLoader(train_dataset, batch_size=settings.batch_size))
        images, densities, maps = next(validation_iterator)
        predicted_densities, _, predicted_maps = D(images.to(gpu))
        validation_comparison_image = self.create_map_comparison_image(
            images, maps, predicted_maps.to('cpu'))
        gan_summary_writer.add_image('Validation', validation_comparison_image)
        dnn_predicted_densities, _, predicted_maps = DNN(images.to(gpu))
        dnn_validation_comparison_image = self.create_map_comparison_image(
            images, maps, predicted_maps.to('cpu'))
        dnn_summary_writer.add_image('Validation',
                                     dnn_validation_comparison_image)
        # Generated images.
        z = torch.randn(settings.batch_size, G.input_size)
        fake_examples = G(z.to(gpu)).to('cpu')
        fake_images_image = torchvision.utils.make_grid(fake_examples.data[:9],
                                                        normalize=True,
                                                        range=(-1, 1),
                                                        nrow=3)
        gan_summary_writer.add_image('Fake/Standard',
                                     fake_images_image.numpy())
        z = torch.as_tensor(
            MixtureModel([
                norm(-settings.mean_offset, 1),
                norm(settings.mean_offset, 1)
            ]).rvs(size=[settings.batch_size, G.input_size]).astype(
                np.float32))
        fake_examples = G(z.to(gpu)).to('cpu')
        fake_images_image = torchvision.utils.make_grid(fake_examples.data[:9],
                                                        normalize=True,
                                                        range=(-1, 1),
                                                        nrow=3)
        gan_summary_writer.add_image('Fake/Offset', fake_images_image.numpy())

        self.test_summaries()
Esempio n. 10
0
 def gan_training_step(self, labeled_examples, labels, unlabeled_examples,
                       step):
     """Runs an individual round of GAN training."""
     # Labeled.
     self.D.apply(disable_batch_norm_updates)  # No batch norm
     self.gan_summary_writer.step = step
     self.d_optimizer.zero_grad()
     labeled_loss = self.labeled_loss_calculation(labeled_examples, labels)
     labeled_loss.backward()
     # Unlabeled.
     # self.D.apply(disable_batch_norm_updates)  # Make sure only labeled data is used for batch norm statistics
     unlabeled_loss = self.unlabeled_loss_calculation(
         labeled_examples, unlabeled_examples)
     unlabeled_loss.backward()
     # Fake.
     z = torch.tensor(
         MixtureModel([
             norm(-self.settings.mean_offset, 1),
             norm(self.settings.mean_offset, 1)
         ]).rvs(
             size=[unlabeled_examples.size(0), self.G.input_size]).astype(
                 np.float32)).to(gpu)
     fake_examples = self.G(z)
     fake_loss = self.fake_loss_calculation(unlabeled_examples,
                                            fake_examples)
     fake_loss.backward()
     # Gradient penalty.
     gradient_penalty = self.gradient_penalty_calculation(
         fake_examples, unlabeled_examples)
     gradient_penalty.backward()
     # Discriminator update.
     self.d_optimizer.step()
     # Generator.
     if step % self.settings.generator_training_step_period == 0:
         self.g_optimizer.zero_grad()
         z = torch.randn(unlabeled_examples.size(0),
                         self.G.input_size).to(gpu)
         fake_examples = self.G(z)
         generator_loss = self.generator_loss_calculation(
             fake_examples, unlabeled_examples)
         generator_loss.backward()
         self.g_optimizer.step()
         if self.gan_summary_writer.is_summary_step():
             self.gan_summary_writer.add_scalar('Generator/Loss',
                                                generator_loss.item())
     # Summaries.
     if self.gan_summary_writer.is_summary_step():
         self.gan_summary_writer.add_scalar('Discriminator/Labeled Loss',
                                            labeled_loss.item())
         self.gan_summary_writer.add_scalar('Discriminator/Unlabeled Loss',
                                            unlabeled_loss.item())
         self.gan_summary_writer.add_scalar('Discriminator/Fake Loss',
                                            fake_loss.item())
         self.gan_summary_writer.add_scalar(
             'Discriminator/Gradient Penalty', gradient_penalty.item())
         self.gan_summary_writer.add_scalar(
             'Discriminator/Gradient Norm',
             self.gradient_norm.mean().item())
         if self.labeled_features is not None:
             self.gan_summary_writer.add_scalar(
                 'Feature Norm/Labeled',
                 self.labeled_features.mean(0).norm().item())
             self.gan_summary_writer.add_scalar(
                 'Feature Norm/Unlabeled',
                 self.unlabeled_features.mean(0).norm().item())