Exemplo n.º 1
0
    def train(self, data_loader, epochs):
        """
        Trains Neural Process.

        Parameters
        ----------
        dataloader : torch.utils.DataLoader instance

        epochs : int
            Number of epochs to train for.
        """
        for epoch in range(epochs):
            epoch_loss = 0.
            for i, data in enumerate(data_loader):
                self.optimizer.zero_grad()

                # Sample number of context and target points
                num_context = randint(*self.num_context_range)
                num_extra_target = randint(*self.num_extra_target_range)

                # Create context and target points and apply neural process
                if self.is_img:
                    img, _ = data  # data is a tuple (img, label)
                    batch_size = img.size(0)
                    context_mask, target_mask = \
                        batch_context_target_mask(self.neural_process.img_size,
                                                  num_context, num_extra_target,
                                                  batch_size)

                    img = img.to(self.device)
                    context_mask = context_mask.to(self.device)
                    target_mask = target_mask.to(self.device)

                    p_y_pred, q_target, q_context = \
                        self.neural_process(img, context_mask, target_mask)

                    # Calculate y_target as this will be required for loss
                    _, y_target = img_mask_to_np_input(img, target_mask)
                else:
                    x, y = data
                    x_context, y_context, x_target, y_target = \
                        context_target_split(x, y, num_context, num_extra_target)
                    p_y_pred, q_target, q_context = \
                        self.neural_process(x_context, y_context, x_target, y_target)

                loss = self._loss(p_y_pred, y_target, q_target, q_context)
                loss.backward()
                self.optimizer.step()

                epoch_loss += loss.item()

                self.steps += 1

                if self.steps % self.print_freq == 0:
                    print("iteration {}, loss {:.3f}".format(
                        self.steps, loss.item()))

            print("Epoch: {}, Avg_loss: {}".format(
                epoch, epoch_loss / len(data_loader)))
            self.epoch_loss_history.append(epoch_loss / len(data_loader))
Exemplo n.º 2
0
    def train(self, data_loader, epochs):
        """
        Trains Neural Process.
        Parameters
        ----------
        dataloader : torch.utils.DataLoader instance
        epochs : int
            Number of epochs to train for.
        """
        for epoch in range(epochs):
            epoch_loss = 0.
            for i, data in enumerate(data_loader):
                self.optimizer.zero_grad()

                # Sample number of context and target points
                num_context = randint(*self.num_context_range)
                num_extra_target = randint(*self.num_extra_target_range)

                x, y = data
                x_context, y_context, x_target, y_target = \
                    context_target_split(x, y, num_context, num_extra_target)
                p_y_pred, q_target, q_context = \
                    self.neural_process(x_context, y_context, x_target, y_target)

                loss = self._loss(p_y_pred, y_target, q_target, q_context)
                loss.backward()
                self.optimizer.step()

                epoch_loss += loss.item()

                self.steps += 1

                if self.steps % self.print_freq == 0:
                    print("iteration {}, loss {:.3f}".format(self.steps, loss.item()))

            print("Epoch: {}, Avg_loss: {}".format(epoch, epoch_loss / len(data_loader)))
            self.epoch_loss_history.append(epoch_loss / len(data_loader))
    def train(self, data_loader, epochs, x_context_plot, y_context_plot):
        """
        Trains Neural Process.

        Parameters
        ----------
        dataloader : torch.utils.DataLoader instance

        epochs : int
            Number of epochs to train for.
        """
        for epoch in range(epochs):
            epoch_loss = 0.
            for i, data in enumerate(data_loader):
                self.optimizer.zero_grad()

                # Sample number of context and target points
                num_context = randint(*self.num_context_range)
                num_extra_target = randint(*self.num_extra_target_range)

                # Create context and target points and apply neural process
                if self.is_img:
                    img, _ = data  # data is a tuple (img, label)
                    batch_size = img.size(0)
                    context_mask, target_mask = \
                        batch_context_target_mask(self.neural_process.img_size,
                                                  num_context, num_extra_target,
                                                  batch_size)

                    img = img.to(self.device)
                    context_mask = context_mask.to(self.device)
                    target_mask = target_mask.to(self.device)

                    p_y_pred, q_target, q_context = \
                        self.neural_process(img, context_mask, target_mask)

                    # Calculate y_target as this will be required for loss
                    _, y_target = img_mask_to_np_input(img, target_mask)
                else:
                    x, y = data
                    x_context, y_context, x_target, y_target = \
                        context_target_split(x, y, num_context, num_extra_target)
                    p_y_pred, q_target, q_context = \
                        self.neural_process(x_context, y_context, x_target, y_target)

                loss = self._loss(p_y_pred, y_target, q_target, q_context, MMD)
                loss.backward()
                self.optimizer.step()

                epoch_loss += loss.item()

                self.steps += 1

                if self.steps % self.print_freq == 0:
                    print("iteration {}, loss {:.3f}".format(
                        self.steps, loss.item()))

            print("Epoch: {}, Avg_loss: {}".format(
                epoch, epoch_loss / len(data_loader)))
            self.epoch_loss_history.append(epoch_loss / len(data_loader))

            self.neural_process.training = False

            for i in range(64):
                # Neural process returns distribution over y_target
                p_y_pred = self.neural_process(x_context_plot, y_context_plot,
                                               self.x_target_plot)
                # Extract mean of distribution
                mu = p_y_pred.loc.detach()
                std = p_y_pred.stddev.detach()
                plt.plot(self.x_target_plot.numpy()[0],
                         mu.numpy()[0],
                         alpha=0.05,
                         c='b')

            plt.scatter(x_context_plot[0].numpy(),
                        y_context_plot[0].numpy(),
                        c='k')

            if self.MMD == True:
                st = 'MMD'
            else:
                st = 'KLD'

            if self.fixed_sigma == True:
                ss = 'FIXED_SIGMA={}'.format(self.sig)
            else:
                ss = None

            plt.savefig('./NewNewPlots/{}{}alpha{}epoch{}.png'.format(
                ss, st, self.alpha, epoch))
            plt.clf()

            plt.plot(self.x_target_plot.numpy()[0], std.numpy()[0], c='b')
            plt.savefig('./NewNewPlots/{}{}alpha{}epoch{}_variance.png'.format(
                ss, st, self.alpha, epoch))
            plt.clf()

            self.neural_process.training = True
    np_trainer.train(data_loader, 1)

    # Save losses at every epoch
    with open(directory + '/losses.json', 'w') as f:
        json.dump(np_trainer.epoch_loss_history, f)

    # Save model at every epoch
    torch.save(np_trainer.neural_process.state_dict(), directory + '/model.pt')

    if epoch % 50 == 0:

        if epoch == 0:
            for batch in data_loader:
                break
            x, y = batch
            x_context, y_context, _, _ = context_target_split(
                x[0:1], y[0:1], 4, 4)

            x_target = torch.Tensor(np.linspace(-pi, pi, 100))
            x_target = x_target.unsqueeze(1).unsqueeze(0)

        input_data.training = False

        for i in range(64):
            # Neural process returns distribution over y_target
            p_y_pred = input_data(x_context, y_context, x_target)
            # Extract mean of distribution
            mu = p_y_pred.loc.detach()
            plt.plot(x_target.numpy()[0], mu.numpy()[0], alpha=0.05, c='b')

        input_data.training = True
for xp, freq in enumerate(freqs):
    #import data
    from datasets import FreqSineData
    from math import pi
    dataset = FreqSineData(amplitude_range=(0.5, 1.),
                           shift_range=(-.0000, .00001),
                           freq_range=(freq, freq + 1e-5),
                           num_samples=100)
    x_min, x_max = -pi, pi
    y_min, y_max = -1.1, 1.1
    initial_x = -3.2
    t = torch.cat([torch.Tensor(t).unsqueeze(0) for t, x in dataset.data])
    x = torch.cat([torch.Tensor(x).unsqueeze(0) for t, x in dataset.data])

    # fix locations so all times series have the same context
    t_context, x_context, t_target, _ = context_target_split(
        t, x, num_context, num_extra_target, locations=locations)

    # compute q(z|C)
    p_y_pred, _, q_context = model(t_context, x_context, t_target, z=None)
    mu_context = q_context.loc
    sigma_context = q_context.scale
    results.append((freq, mu_context, sigma_context))

# Experiment 1: interpolation
# Decode from mu_2 to mu_1 along mu_1 - mu_2.

# select mu for dynamics (freq = 1)
mu_1 = results[0][1][0]
# select mu for dynamics (freq = 2)
mu_2 = results[-1][1][0]
delta_mu = mu_2 - mu_1