示例#1
0
    def test_loop(self, n_support, optimizer=None): # no optimizer needed for GP
        inputs, targets = get_batch(test_people)

        support_ind = list(np.random.choice(list(range(19)), replace=False, size=n_support))
        query_ind   = [i for i in range(19) if i not in support_ind]

        x_all = inputs.cuda()
        y_all = targets.cuda()

        x_support = inputs[:,support_ind,:,:,:].cuda()
        y_support = targets[:,support_ind].cuda()
        x_query   = inputs[:,query_ind,:,:,:]
        y_query   = targets[:,query_ind].cuda()

        # choose a random test person
        n = np.random.randint(0, len(test_people)-1)

        z_support = self.feature_extractor(x_support[n]).detach()
        self.model.set_train_data(inputs=z_support, targets=y_support[n], strict=False)

        self.model.eval()
        self.feature_extractor.eval()
        self.likelihood.eval()

        with torch.no_grad():
            z_query = self.feature_extractor(x_all[n]).detach()
            pred    = self.likelihood(self.model(z_query))
            lower, upper = pred.confidence_region() #2 standard deviations above and below the mean

        mse = self.mse(pred.mean, y_all[n])

        return mse
示例#2
0
    def train_loop(self, epoch, optimizer, params, results_logger):
        if params.dataset != "sines":
            batch, batch_labels = get_batch(train_people)
        else:
            batch, batch_labels, amp, phase = SinusoidalDataGenerator(
                params.update_batch_size * 2, params.meta_batch_size,
                params.num_tasks, params.multidimensional_amp,
                params.multidimensional_phase, params.noise,
                params.out_of_range).generate()

            batch = torch.from_numpy(batch)
            batch_labels = torch.from_numpy(batch_labels)

        batch, batch_labels = batch.to(self.device), batch_labels.to(
            self.device)

        for inputs, labels in zip(batch, batch_labels):
            optimizer.zero_grad()
            output = self.model(self.feature_extractor(inputs))
            loss = self.criterion(output, labels)
            loss.backward()
            optimizer.step()

            if (epoch % 10 == 0):
                print('[%d] - Loss: %.3f' % (epoch, loss.item()))
                results_logger.log("epoch", epoch)
                results_logger.log("loss", loss.item())
    def test_loop(self, n_support,
                  optimizer):  # we need optimizer to take one gradient step
        inputs, targets = get_batch(test_people)

        support_ind = list(
            np.random.choice(list(range(19)), replace=False, size=n_support))
        query_ind = [i for i in range(19) if i not in support_ind]

        x_all = inputs.cuda()
        y_all = targets.cuda()

        x_support = inputs[:, support_ind, :, :, :].cuda()
        y_support = targets[:, support_ind].cuda()
        x_query = inputs[:, query_ind, :, :, :].cuda()
        y_query = targets[:, query_ind].cuda()

        # choose a random test person
        n = np.random.randint(0, len(test_people) - 1)

        optimizer.zero_grad()
        z_support = self.feature_extractor(x_support[n]).detach()
        output_support = self.model(z_support).squeeze()
        loss = self.criterion(output_support, y_support[n])
        loss.backward()
        optimizer.step()

        self.feature_extractor.eval()
        self.model.eval()
        z_all = self.feature_extractor(x_all[n]).detach()
        output_all = self.model(z_all).squeeze()
        return self.criterion(output_all, y_all[n])
示例#4
0
    def train_loop(self, epoch, optimizer, params, results_logger):
        # print("NUM KERNEL PARAMS {}".format(sum([p.numel() for p in self.model.parameters() if p.requires_grad])))
        # print("NUM TRANSFORM PARAMS {}".format(sum([p.numel() for p in self.feature_extractor.parameters() if p.requires_grad])))

        self.model.train()
        self.feature_extractor.train()
        self.likelihood.train()
        if self.is_flow:
            self.cnf.train()

        if self.dataset != "sines":
            batch, batch_labels = get_batch(train_people)
        else:
            batch, batch_labels, amp, phase = SinusoidalDataGenerator(
                params.update_batch_size * 2, params.meta_batch_size,
                params.num_tasks, params.multidimensional_amp,
                params.multidimensional_phase, params.noise,
                params.out_of_range).generate()

            if self.num_tasks == 1:
                batch = torch.from_numpy(batch)
                batch_labels = torch.from_numpy(batch_labels).view(
                    batch_labels.shape[0], -1)
            else:
                batch = torch.from_numpy(batch)
                batch_labels = torch.from_numpy(batch_labels)

        batch, batch_labels = batch.to(self.device), batch_labels.to(
            self.device)
        # print(batch.shape, batch_labels.shape)
        for inputs, labels in zip(batch, batch_labels):
            optimizer.zero_grad()
            z = self.feature_extractor(inputs)
            if self.add_noise:
                labels = labels + torch.normal(0, 0.1,
                                               size=labels.shape).to(labels)
            if self.is_flow:
                delta_log_py, labels, y = self.apply_flow(labels, z)
            else:
                y = labels
            self.model.set_train_data(inputs=z, targets=y)
            predictions = self.model(z)
            loss = -self.mll(predictions, self.model.train_targets)
            if self.is_flow:
                loss = loss + torch.mean(delta_log_py)
            loss.backward()
            optimizer.step()

            mse, _ = self.compute_mse(labels, predictions, z)

            if epoch % 10 == 0:
                print('[%d] - Loss: %.3f  MSE: %.3f noise: %.3f' %
                      (epoch, loss.item(), mse.item(),
                       self.model.likelihood.noise.item()))
                results_logger.log("epoch", epoch)
                results_logger.log("loss", loss.item())
                results_logger.log("MSE", mse.item())
                results_logger.log("noise", self.model.likelihood.noise.item())
示例#5
0
 def get_support_query_qmul(self, n_support):
     inputs, targets = get_batch(test_people)
     support_ind = list(
         np.random.choice(list(range(19)), replace=False, size=n_support))
     query_ind = [i for i in range(19) if i not in support_ind]
     x_all = inputs.to(self.device)
     y_all = targets.to(self.device)
     x_support = inputs[:, support_ind, :, :, :].to(self.device)
     y_support = targets[:, support_ind].to(self.device)
     x_query = inputs[:, query_ind, :, :, :]
     y_query = targets[:, query_ind].to(self.device)
     return x_all, x_support, y_all, y_support
    def train_loop(self, epoch, optimizer):
        batch, batch_labels = get_batch(train_people)
        batch, batch_labels = batch.cuda(), batch_labels.cuda()

        for inputs, labels in zip(batch, batch_labels):
            optimizer.zero_grad()
            output = self.model(self.feature_extractor(inputs))
            loss = self.criterion(output, labels)
            loss.backward()
            optimizer.step()

            if (epoch % 10 == 0):
                print('[%d] - Loss: %.3f' % (epoch, loss.item()))
    def train_loop(self, epoch, optimizer):
        batch, batch_labels = get_batch(train_people)
        batch, batch_labels = batch.cuda(), batch_labels.cuda()
        for inputs, labels in zip(batch, batch_labels):
            optimizer.zero_grad()
            z = self.feature_extractor(inputs)

            self.model.set_train_data(inputs=z, targets=labels)
            predictions = self.model(z)
            loss = -self.mll(predictions, self.model.train_targets)

            loss.backward()
            optimizer.step()
            mse = self.mse(predictions.mean, labels)

            if (epoch % 10 == 0):
                print('[%d] - Loss: %.3f  MSE: %.3f noise: %.3f' %
                      (epoch, loss.item(), mse.item(),
                       self.model.likelihood.noise.item()))
示例#8
0
    def test_loop(self, n_support, optimizer,
                  params):  # we need optimizer to take one gradient step
        if params.dataset != "sines":
            inputs, targets = get_batch(train_people)
        else:
            batch, batch_labels, amp, phase = SinusoidalDataGenerator(
                params.update_batch_size * 2, params.meta_batch_size,
                params.num_tasks, params.multidimensional_amp,
                params.multidimensional_phase, params.noise,
                params.out_of_range).generate()

            inputs = torch.from_numpy(batch)
            targets = torch.from_numpy(batch_labels)

        inputs, targets = inputs.to(self.device), targets.to(self.device)

        support_ind = list(
            np.random.choice(list(range(19)), replace=False, size=n_support))
        query_ind = [i for i in range(19) if i not in support_ind]

        x_all = inputs.to(self.device)
        y_all = targets.to(self.device)

        x_support = inputs[:, support_ind, :, :, :].to(self.device)
        y_support = targets[:, support_ind].to(self.device)
        x_query = inputs[:, query_ind, :, :, :].to(self.device)
        y_query = targets[:, query_ind].to(self.device)

        # choose a random test person
        n = np.random.randint(0, len(test_people) - 1)

        optimizer.zero_grad()
        z_support = self.feature_extractor(x_support[n]).detach()
        output_support = self.model(z_support).squeeze()
        loss = self.criterion(output_support, y_support[n])
        loss.backward()
        optimizer.step()

        self.feature_extractor.eval()
        self.model.eval()
        z_all = self.feature_extractor(x_all[n]).detach()
        output_all = self.model(z_all).squeeze()
        return self.criterion(output_all, y_all[n])