示例#1
0
 def _lmd_generate_log():
     r_tfboard = {
         'train/losses': fet_d(packs['log'], prefix='loss_', remove=('loss_', '_NO_DISPLAY')),
         'train/est': fet_d(packs['log'], prefix='est_')
     }
     packs['log'] = packs['log'].dict
     packs['tfboard'] = r_tfboard
 def _process_log_after_step(self, packs, **kwargs):
     # Get iteration for log & tfboard
     iter_checker = kwargs['iter_checker'] if 'iter_checker' in kwargs.keys(
     ) else 'iter'
     # Logging
     if chk_d(self._meters, 'counter_log',
              lambda c: c.check(self._meters['i'][iter_checker])):
         if 'lmd_generate_log' in kwargs.keys():
             kwargs['lmd_generate_log']()
         # (1) Logs
         if 'log_main' in self._logs.keys() and 'log' in packs.keys(
         ) and not chk_d(kwargs, 'disable_log'):
             # Update io & optimize timers
             if 'io' in self._meters['timers']:
                 packs['log']['t_io'] = self._meters['timers'][
                     'io'].get_duration_and_reset()
             if 'opt' in self._meters['timers']:
                 packs['log']['t_opt'] = self._meters['timers'][
                     'opt'].get_duration_and_reset()
             # Show information
             log_kwargs = {'items': packs['log']} if 'lmd_process_log' not in kwargs.keys() else \
                 kwargs['lmd_process_log'](packs['log'])
             self._logs['log_main'].info_formatted(
                 fet_d(self._meters['i'],
                       *self._logs['log_main'].formatted_counters),
                 **log_kwargs)
         # (2) Tensorboard
         if 'tfboard' in self._logs.keys() and 'tfboard' in packs.keys(
         ) and not chk_d(kwargs, 'disable_tfboard'):
             tfboard_add_multi_scalars(self._logs['tfboard'],
                                       packs['tfboard'],
                                       self._meters['i'][iter_checker])
示例#3
0
    def _api_eval_mi(self, train_dataloader, test_dataloader):
        def _func_encode_class(_x):
            _class_emb = self._Enc_class(_x)
            _std = self._cfg.args.class_std * torch.ones(
                size=_class_emb.size(), device=_class_emb.device)
            return {'emb': _class_emb, 'params': (_class_emb, _std)}

        def _func_encode_style(_x):
            _style_emb = self._Enc_style(_x)
            _std = self._cfg.args.style_std * torch.ones(
                size=_style_emb.size(), device=_style_emb.device)
            return {'emb': _style_emb, 'params': (_style_emb, _std)}

        evaluator_class = MIEvaluator(_func_encode_class,
                                      self._Dec,
                                      device=self._cfg.args.device)
        evaluator_style = MIEvaluator(_func_encode_style,
                                      self._Dec,
                                      device=self._cfg.args.device)
        # 1. Evaluating on train & test dataloader
        ret = {}
        for d_name, dataloader in zip(['train', 'test'],
                                      [train_dataloader, test_dataloader]):
            ret['%s_mi_x_class' %
                d_name] = evaluator_class.eval_mi_x_z_monte_carlo(dataloader)
            ret['%s_mi_y_class' %
                d_name] = evaluator_class.eval_mi_y_z_variational_lb(
                    dataloader)
            ret['%s_mi_x_style' %
                d_name] = evaluator_style.eval_mi_x_z_monte_carlo(dataloader)
        # 2. Logging
        self._logs['log_eval_mi'].info_formatted(fet_d(self._meters['i'],
                                                       'epoch', 'step',
                                                       'iter'),
                                                 items=ret)
        tfboard_add_multi_scalars(self._logs['tfboard'],
                                  multi_scalars={
                                      'eval/mi_x_class': {
                                          d_name: ret['%s_mi_x_class' % d_name]
                                          for d_name in ['train', 'test']
                                      },
                                      'eval/mi_y_class': {
                                          d_name: ret['%s_mi_y_class' % d_name]
                                          for d_name in ['train', 'test']
                                      },
                                      'eval/mi_x_style': {
                                          d_name: ret['%s_mi_x_style' % d_name]
                                          for d_name in ['train', 'test']
                                      }
                                  },
                                  global_step=self._meters['i']['iter'])
示例#4
0
 def _api_eval_attack_robustness(self, train_dataloader, test_dataloader):
     torch.cuda.empty_cache()
     # 1. Evaluating
     evaluator = AdvAttackEvaluator(
         self._Enc_class,
         self._Dec,
         device=self._cfg.args.device,
         epsilon_list=self._cfg.args.eval_attack_epsilons)
     train_attack_acc = evaluator(train_dataloader)
     test_attack_acc = evaluator(test_dataloader)
     # 2. Logging
     items = {'train_%s' % k: v for k, v in train_attack_acc.items()}
     items.update({'test_%s' % k: v for k, v in test_attack_acc.items()})
     self._logs['log_eval_attack'].info_formatted(fet_d(
         self._meters['i'], 'epoch', 'step', 'iter'),
                                                  items=items)
     tfboard_add_multi_scalars(
         self._logs['tfboard'],
         multi_scalars={'eval/attack_robustness': items},
         global_step=self._meters['i']['iter'])
示例#5
0
 def _api_eval_out_detection(self, test_dataloader, out_dataloader,
                             out_name):
     torch.cuda.empty_cache()
     # 1. Evaluating
     evaluator = OutDetectionEvaluator(
         self._Enc_class,
         self._Dec,
         device=self._cfg.args.device,
         temper=self._cfg.args.eval_odin_temper,
         noise_magnitude=self._cfg.args.eval_odin_noise_mag,
         num_delta=self._cfg.args.eval_odin_num_delta)
     scores = evaluator(in_dataloader=test_dataloader,
                        out_dataloader=out_dataloader)
     # 2. Logging
     self._logs['log_eval_detection'].info_formatted(
         fet_d(self._meters['i'], 'epoch', 'step', 'iter'),
         items={'%s_%s' % (out_name, k): v
                for k, v in scores.items()})
     tfboard_add_multi_scalars(
         self._logs['tfboard'],
         multi_scalars={'eval/out_detection_%s' % out_name: scores},
         global_step=self._meters['i']['iter'])
示例#6
0
 def _api_eval_accuracy(self, train_dataloader, test_dataloader):
     evaluator = AccuracyEvaluator(self._Enc_class,
                                   self._Dec,
                                   device=self._cfg.args.device)
     # 1. Evaluating on train & test dataloader
     train_acc = evaluator(train_dataloader)
     test_acc = evaluator(test_dataloader)
     # 2. Logging
     self._logs['log_eval_acc'].info_formatted(fet_d(
         self._meters['i'], 'epoch', 'step', 'iter'),
                                               items={
                                                   'train_acc': train_acc,
                                                   'test_acc': test_acc
                                               })
     tfboard_add_multi_scalars(
         self._logs['tfboard'],
         multi_scalars={'eval/acc': {
             'train': train_acc,
             'test': test_acc
         }},
         global_step=self._meters['i']['iter'])
     # Return
     return test_acc
示例#7
0
 def _lmd_generate_log():
     r_tfboard = {'train/losses': fet_d(packs['log'], prefix='loss_')}
     packs['log'] = packs['log'].dict
     packs['tfboard'] = r_tfboard