def test(self): self.cfg.model_flow = 'test' print('======test======') score_strs = [] score_summary = [] for test_dataset_name, test_dict in self.test_loader.items(): self.cfg.eval.test_feat_cache_file = osp.join(self.cfg.log.exp_dir, '{}_to_{}_feat_cache.pkl'.format( self.cfg.dataset.train.source.name, test_dataset_name)) self.cfg.eval.score_prefix = '{} -> {}'.format(self.cfg.dataset.train.source.name, test_dataset_name).ljust(30) score_dict = evaluation_creation(self.model_for_eval, test_dict['query'], test_dict['gallery'], deepcopy(self.cfg)) score_strs.append(score_dict['scores_str']) score_summary.append("{}->{}: {} ({})".format(self.cfg.dataset.train.source.name, test_dataset_name, score_str(score_dict['cmc_scores'][0]).replace('%', ''), score_str(score_dict['mAP']).replace('%', ''))) score_str_ = join_str(score_strs, '\n') score_summary = ('Epoch {}'.format(self.current_ep)).ljust(12) + ', '.join(score_summary) + '\n' write_to_file(self.cfg.log.score_file, score_summary, append=True) self.cfg.model_flow = 'train' return score_str_
def print_log(cfg, current_ep, current_step, optimizer, loss_functions, analyze_functions, epoch_start_time): time_log = 'Ep {}, Step {}, {:.2f}s'.format(current_ep + 1, current_step + 1, time.time() - epoch_start_time) lr_log = 'lr {}'.format(get_optimizer_lr_str(cfg, optimizer)) loss_meter_log = join_str([ m.avg_str for lf in loss_functions.values() for m in lf.meter_dict.values() ], ', ') if analyze_functions is not None: analyze_meter_log = join_str([ m.avg_str for lf in analyze_functions.values() for m in lf.meter_dict.values() ], ', ') else: analyze_meter_log = None if analyze_meter_log is not None: log = join_str([time_log, lr_log, loss_meter_log, analyze_meter_log], ', ') else: log = join_str([time_log, lr_log, loss_meter_log], ', ') return print(log)
def test(self): self.cfg.model_flow = 'test' print('======test======') score_strs = [] score_summary = [] for test_dataset_name, test_dict in self.test_loader.items(): self.cfg.eval.test_feat_cache_file = osp.join( self.cfg.log.exp_dir, '{}_to_{}_feat_cache.pkl'.format( self.cfg.dataset.train.source.name, test_dataset_name)) self.cfg.eval.score_prefix = '{} -> {}'.format( self.cfg.dataset.train.source.name, test_dataset_name).ljust(30) if self.cfg.only_test is True: self._evaluation(score_strs, score_summary, test_dataset_name, test_dict, use_gcn=False, use_gm=False) self._evaluation(score_strs, score_summary, test_dataset_name, test_dict, use_gcn=True, use_gm=False) self._evaluation(score_strs, score_summary, test_dataset_name, test_dict, use_gcn=True, use_gm=True) score_str_ = join_str(score_strs, '\n') score_summary = ('Epoch {}'.format( self.current_ep)).ljust(12) + ', '.join(score_summary) + '\n' write_to_file(self.cfg.log.score_file, score_summary, append=True) self.cfg.model_flow = 'train' return score_str_