Пример #1
0
 def compute_loss(self, batch, current_epoch, converging):
     """
     loss = ||Y - Yc||^2 + lambda * (||A_1||_{2,1} + ||A_2 ^T||_{2,1})
     """
     modules = self.find_modules()
     lambda_factor = self.args.regularization_factor
     q = self.args.q
     loss_proj = []
     for l in range(len(modules)):
         projection = self.sparse_param(modules[l])
         loss_proj.append(
             torch.sum(
                 torch.sum(projection.squeeze().t()**2,
                           dim=1)**(q / 2))**(1 / q))
         # save the norm distribution during the optimization phase
         if not converging and (batch + 1) % 300 == 0:
             path = os.path.join(self.args.dir_save, self.args.save,
                                 'Filter{}_norm_distribution'.format(l))
             if not os.path.exists(path):
                 os.makedirs(path)
             filename = os.path.join(
                 path, 'Epoch{}_Batch{}'.format(current_epoch,
                                                batch + 1))
             plot_figure(projection.squeeze(), l, filename)
     lossp = sum(loss_proj)
     if self.args.optimizer == 'SGD':
         lossp *= lambda_factor
     return lossp
Пример #2
0
 def compute_loss(self, batch, current_epoch, converging):
     """
     loss = ||Y - Yc||^2 + lambda * (||A_1||_{2,1} + ||A_2 ^T||_{2,1})
     """
     lambda_factor = self.args.regularization_factor
     q = self.args.q
     loss_proj1 = []
     loss_proj2 = []
     for l, m in enumerate(self.find_modules()):
         projection1, projection2 = self.sparse_param(m)
         loss_proj1.append(torch.sum(torch.sum(projection1.squeeze().t() ** 2, dim=0) ** (q / 2)) ** (1 / q))
         loss_proj2.append(torch.sum(torch.sum(projection2.squeeze().t() ** 2, dim=1) ** (q / 2)) ** (1 / q))
         # save the norm distribution during the optimization phase
         if not converging and (batch + 1) % 300 == 0:
             path = os.path.join(self.args.dir_save, self.args.save, 'ResBlock{}_norm_distribution'.format(l))
             if not os.path.exists(path):
                 os.makedirs(path)
             filename1 = os.path.join(path, 'P1_Epoch{}_Batch{}'.format(current_epoch, batch + 1))
             filename2 = os.path.join(path, 'P2_Epoch{}_Batch{}'.format(current_epoch, batch + 1))
             plot_figure(projection1.squeeze().t(), l, filename1)
             plot_figure(projection2.squeeze(), l, filename2)
     lossp1 = sum(loss_proj1) #/ len(loss_proj1)
     lossp2 = sum(loss_proj2) #/ len(loss_proj2)
     if self.args.optimizer == 'SGD':
         lossp1 *= lambda_factor
         lossp2 *= lambda_factor
     return lossp1, lossp2
Пример #3
0
        def compute_loss(self, batch, current_epoch, converging):
            """
            loss = ||Y - Yc||^2 + lambda * (||A_1||_{2,1} + ||A_2 ^T||_{2,1})
            """
            modules = self.find_modules()
            lambda_factor = self.args.regularization_factor
            q = self.args.q
            loss_proj11 = []
            loss_proj12 = []
            for l, m in enumerate(modules):
                projection1 = self.sparse_param(m)
                # if batch % 100 == 0 and i == 0:
                #     with print_array_on_one_line():
                #         print('Norm1: \n{}'.format((torch.sum(projection1.squeeze().t() ** 2, dim=0) ** (q / 2)).detach().cpu().numpy()))
                #         print('Norm2: \n{}'.format((torch.sum(projection2.squeeze().t() ** 2, dim=1) ** (q / 2)).detach().cpu().numpy()))
                loss_proj11.append(
                    torch.sum(
                        torch.sum(projection1.squeeze().t()**2,
                                  dim=1)**(q / 2))**(1 / q))
                loss_proj12.append(
                    torch.sum(
                        torch.sum(projection1.squeeze().t()**2,
                                  dim=0)**(q / 2))**(1 / q))
                # embed()
                if not converging and (batch + 1) % 200 == 0:
                    path = os.path.join(
                        self.args.dir_save, self.args.save,
                        'ResBlock{}_norm_distribution'.format(l))
                    if not os.path.exists(path):
                        os.makedirs(path)
                    filename1 = os.path.join(
                        path,
                        'Row_Epoch{}_Batch{}'.format(current_epoch, batch + 1))
                    filename2 = os.path.join(
                        path, 'Column_Epoch{}_Batch{}'.format(
                            current_epoch, batch + 1))
                    plot_figure(projection1.squeeze().t(), l, filename1)
                    plot_figure(projection1.squeeze(), l, filename2)
            # print(loss_proj11[6], loss_proj12[6])
            lossp1 = sum(loss_proj11)  #/ len(loss_proj11)
            lossp2 = sum(loss_proj12)  #/ len(loss_proj12)

            # loss_proj1 = torch.sum((torch.sum(projection1 ** 2, dim=0) ** q))
            # loss_proj2 = torch.sum((torch.sum(projection2 ** 2, dim=1) ** q))
            if self.args.optimizer == 'SGD':
                lossp1 *= lambda_factor
                lossp2 *= lambda_factor
            return lossp1, lossp2