def _process_log_after_step(self, packs, **kwargs):
     # Get iteration for log & tfboard
     iter_checker = kwargs['iter_checker'] if 'iter_checker' in kwargs.keys(
     ) else 'iter'
     # Logging
     if chk_d(self._meters, 'counter_log',
              lambda c: c.check(self._meters['i'][iter_checker])):
         if 'lmd_generate_log' in kwargs.keys():
             kwargs['lmd_generate_log']()
         # (1) Logs
         if 'log_main' in self._logs.keys() and 'log' in packs.keys(
         ) and not chk_d(kwargs, 'disable_log'):
             # Update io & optimize timers
             if 'io' in self._meters['timers']:
                 packs['log']['t_io'] = self._meters['timers'][
                     'io'].get_duration_and_reset()
             if 'opt' in self._meters['timers']:
                 packs['log']['t_opt'] = self._meters['timers'][
                     'opt'].get_duration_and_reset()
             # Show information
             log_kwargs = {'items': packs['log']} if 'lmd_process_log' not in kwargs.keys() else \
                 kwargs['lmd_process_log'](packs['log'])
             self._logs['log_main'].info_formatted(
                 fet_d(self._meters['i'],
                       *self._logs['log_main'].formatted_counters),
                 **log_kwargs)
         # (2) Tensorboard
         if 'tfboard' in self._logs.keys() and 'tfboard' in packs.keys(
         ) and not chk_d(kwargs, 'disable_tfboard'):
             tfboard_add_multi_scalars(self._logs['tfboard'],
                                       packs['tfboard'],
                                       self._meters['i'][iter_checker])
Пример #2
0
 def _api_eval_out_detection(self, epoch, iter_index, test_dataloader,
                             out_dataloader, out_name):
     torch.cuda.empty_cache()
     # 1. Evaluating
     evaluator = OutDetectionEvaluator(
         self._Enc,
         self._Dec,
         device=self._cfg.args.device,
         temper=self._cfg.args.eval_odin_temper,
         noise_magnitude=self._cfg.args.eval_odin_noise_mag,
         num_delta=self._cfg.args.eval_odin_num_delta)
     scores = evaluator(in_dataloader=test_dataloader,
                        out_dataloader=out_dataloader)
     # 2. Logging
     self._logs['log_eval_detection'].info_formatted(
         counters={
             'epoch': epoch,
             'iter': iter_index
         },
         items={'%s_%s' % (out_name, k): v
                for k, v in scores.items()})
     tfboard_add_multi_scalars(
         self._logs['tfboard'],
         multi_scalars={'eval/out_detection_%s' % out_name: scores},
         global_step=iter_index)
 def _process_after_batch(self, epoch, batch_index, iter_index, packs,
                          **kwargs):
     # Logging
     if chk_d(self._meters, 'counter_log', lambda c: c.check(iter_index)):
         if 'lmd_generate_log' in kwargs.keys():
             kwargs['lmd_generate_log']()
         # (1) Logs
         if 'log_main' in self._logs.keys() and not chk_d(
                 kwargs, 'disable_log'):
             # Update io & optimize timers
             if 'io' in self._meters['timers']:
                 packs['log']['t_io'] = self._meters['timers'][
                     'io'].get_duration_and_reset()
             if 'opt' in self._meters['timers']:
                 packs['log']['t_opt'] = self._meters['timers'][
                     'opt'].get_duration_and_reset()
             # Show information
             log_kwargs = {'items': packs['log']} if 'lmd_process_log' not in kwargs.keys() else \
                 kwargs['lmd_process_log'](packs['log'])
             self._logs['log_main'].info_formatted(
                 [epoch, batch_index, iter_index], **log_kwargs)
         # (2) Tensorboard
         if 'tfboard' in self._logs.keys() and not chk_d(
                 kwargs, 'disable_tfboard'):
             tfboard_add_multi_scalars(self._logs['tfboard'],
                                       packs['tfboard'],
                                       global_step=iter_index)
Пример #4
0
    def _api_eval_mi(self, epoch, iter_index, train_dataloader,
                     test_dataloader):
        def _func_encode(_x):
            _emb = self._Enc(_x)
            return {'emb': _emb, 'params': self._Enc.params}

        evaluator = MIEvaluator(_func_encode,
                                self._Dec,
                                device=self._cfg.args.device)
        # 1. Evaluating on train & test dataloader
        ret = {}
        for d_name, dataloader in zip(['train', 'test'],
                                      [train_dataloader, test_dataloader]):
            ret['%s_mi_x_class' %
                d_name] = evaluator.eval_mi_x_z_monte_carlo(dataloader)
            ret['%s_mi_y_class' %
                d_name] = evaluator.eval_mi_y_z_variational_lb(dataloader)
        # 2. Logging
        self._logs['log_eval_mi'].info_formatted(counters={
            'epoch': epoch,
            'iter': iter_index
        },
                                                 items=ret)
        tfboard_add_multi_scalars(self._logs['tfboard'],
                                  multi_scalars={
                                      'eval/mi_x_class': {
                                          d_name: ret['%s_mi_x_class' % d_name]
                                          for d_name in ['train', 'test']
                                      },
                                      'eval/mi_y_class': {
                                          d_name: ret['%s_mi_y_class' % d_name]
                                          for d_name in ['train', 'test']
                                      }
                                  },
                                  global_step=iter_index)
Пример #5
0
 def _api_eval_accuracy(self, epoch, iter_index, train_dataloader,
                        test_dataloader):
     evaluator = AccuracyEvaluator(self._Enc,
                                   self._Dec,
                                   device=self._cfg.args.device)
     # 1. Evaluating on train & test dataloader
     train_acc = evaluator(train_dataloader)
     test_acc = evaluator(test_dataloader)
     # 2. Logging
     self._logs['log_eval_acc'].info_formatted(counters={
         'epoch': epoch,
         'iter': iter_index
     },
                                               items={
                                                   'train_acc': train_acc,
                                                   'test_acc': test_acc
                                               })
     tfboard_add_multi_scalars(
         self._logs['tfboard'],
         multi_scalars={'eval/acc': {
             'train': train_acc,
             'test': test_acc
         }},
         global_step=iter_index)
     # Return
     return test_acc
Пример #6
0
    def _api_eval_mi(self, train_dataloader, test_dataloader):
        def _func_encode_class(_x):
            _class_emb = self._Enc_class(_x)
            _std = self._cfg.args.class_std * torch.ones(
                size=_class_emb.size(), device=_class_emb.device)
            return {'emb': _class_emb, 'params': (_class_emb, _std)}

        def _func_encode_style(_x):
            _style_emb = self._Enc_style(_x)
            _std = self._cfg.args.style_std * torch.ones(
                size=_style_emb.size(), device=_style_emb.device)
            return {'emb': _style_emb, 'params': (_style_emb, _std)}

        evaluator_class = MIEvaluator(_func_encode_class,
                                      self._Dec,
                                      device=self._cfg.args.device)
        evaluator_style = MIEvaluator(_func_encode_style,
                                      self._Dec,
                                      device=self._cfg.args.device)
        # 1. Evaluating on train & test dataloader
        ret = {}
        for d_name, dataloader in zip(['train', 'test'],
                                      [train_dataloader, test_dataloader]):
            ret['%s_mi_x_class' %
                d_name] = evaluator_class.eval_mi_x_z_monte_carlo(dataloader)
            ret['%s_mi_y_class' %
                d_name] = evaluator_class.eval_mi_y_z_variational_lb(
                    dataloader)
            ret['%s_mi_x_style' %
                d_name] = evaluator_style.eval_mi_x_z_monte_carlo(dataloader)
        # 2. Logging
        self._logs['log_eval_mi'].info_formatted(fet_d(self._meters['i'],
                                                       'epoch', 'step',
                                                       'iter'),
                                                 items=ret)
        tfboard_add_multi_scalars(self._logs['tfboard'],
                                  multi_scalars={
                                      'eval/mi_x_class': {
                                          d_name: ret['%s_mi_x_class' % d_name]
                                          for d_name in ['train', 'test']
                                      },
                                      'eval/mi_y_class': {
                                          d_name: ret['%s_mi_y_class' % d_name]
                                          for d_name in ['train', 'test']
                                      },
                                      'eval/mi_x_style': {
                                          d_name: ret['%s_mi_x_style' % d_name]
                                          for d_name in ['train', 'test']
                                      }
                                  },
                                  global_step=self._meters['i']['iter'])
Пример #7
0
 def _api_eval_attack_robustness(self, train_dataloader, test_dataloader):
     torch.cuda.empty_cache()
     # 1. Evaluating
     evaluator = AdvAttackEvaluator(
         self._Enc_class,
         self._Dec,
         device=self._cfg.args.device,
         epsilon_list=self._cfg.args.eval_attack_epsilons)
     train_attack_acc = evaluator(train_dataloader)
     test_attack_acc = evaluator(test_dataloader)
     # 2. Logging
     items = {'train_%s' % k: v for k, v in train_attack_acc.items()}
     items.update({'test_%s' % k: v for k, v in test_attack_acc.items()})
     self._logs['log_eval_attack'].info_formatted(fet_d(
         self._meters['i'], 'epoch', 'step', 'iter'),
                                                  items=items)
     tfboard_add_multi_scalars(
         self._logs['tfboard'],
         multi_scalars={'eval/attack_robustness': items},
         global_step=self._meters['i']['iter'])
Пример #8
0
 def _api_eval_accuracy(self, train_dataloader, test_dataloader):
     evaluator = AccuracyEvaluator(self._Enc_class,
                                   self._Dec,
                                   device=self._cfg.args.device)
     # 1. Evaluating on train & test dataloader
     train_acc = evaluator(train_dataloader)
     test_acc = evaluator(test_dataloader)
     # 2. Logging
     self._logs['log_eval_acc'].info_formatted(fet_d(
         self._meters['i'], 'epoch', 'step', 'iter'),
                                               items={
                                                   'train_acc': train_acc,
                                                   'test_acc': test_acc
                                               })
     tfboard_add_multi_scalars(
         self._logs['tfboard'],
         multi_scalars={'eval/acc': {
             'train': train_acc,
             'test': test_acc
         }},
         global_step=self._meters['i']['iter'])
     # Return
     return test_acc