def predict(self, model, data_loader, output_format='prob', logger=None): assert output_format in ['prob', 'label'] num_data = len(data_loader) num_class = data_loader.dataset.num_classes p = [] bar = Bar('Query Mode', max=(num_data)) with torch.no_grad(): for idx, data_batch in enumerate(data_loader): output = model.test_step(data_batch) results = output['results'] if output_format == 'prob': prob = F.softmax(results, dim=1) p.append(prob.cpu()) else: pred = results.max(1)[1] p.append(prob.cpu()) print_str = '[{}/{}]'.format(idx + 1, num_data) Bar.suffix = print_str bar.next() p = torch.cat(p) return p
def test(self, data_loader, mode, **kwargs): self.model.set_mode(mode) num_batch = len(data_loader) bar = Bar('Extracting results ', max=(num_batch // self.vis_interval)) self.meters.before_epoch() pred = [] for i, data_batch in enumerate(data_loader): start_time = time.time() # during train iter with torch.no_grad(): outputs = self.model.test_step(data_batch, **kwargs) # after train iter results = outputs['results'] pred.append(results.max(1)[1].cpu()) end_time = time.time() self.meters.update({'batch_time': end_time - start_time}) self.meters.average() if i % self.vis_interval == 0: print_str = '[{}/{}]'.format( (i // self.vis_interval) + 1, num_batch // self.vis_interval) for name, val in self.meters.output.items(): print_str += ' | {} {:.4f}'.format(name, val) Bar.suffix = print_str self.log.debug(f'Extracting results :' + print_str) bar.next() self.meters.clear_output() bar.finish() pred = np.array(pred) data_loader.dataset.evaluate(pred, self.log)
def val(self, data_loader, mode, **kwargs): self.model.set_mode(mode) val_batches = len(data_loader) bar = Bar( 'Val Epoch {}'.format(self.epoch), max=(val_batches // self.vis_interval)) self.meters.before_epoch() for i, data_batch in enumerate(data_loader): start_time = time.time() self._inner_iter = i with torch.no_grad(): outputs = self.model.val_step(data_batch, **kwargs) outputs = outputs['results'] self.meters.update(outputs['log_vars'], outputs['num_samples']) end_time = time.time() self.meters.update({'batch_time': end_time - start_time}) self.meters.during_train_iter(self._inner_iter, self.vis_interval) # self._iter += 1 if i % self.vis_interval == 0: print_str = '[{}/{}]'.format( (i // self.vis_interval) + 1, val_batches // self.vis_interval) for name, val in self.meters.output.items(): print_str += ' | {} {:.4f}'.format(name, val) Bar.suffix = print_str bar.next() bar.finish() self.meters.after_val_epoch() # TODO: SUMMARY NOT COMPLETE self.summary[f'epoch_{self._epoch}']['val_loss'] = \ self.meters.output['loss'].detach().cpu().data
def val(self, data_loader, mode, **kwargs): self.model.set_mode(mode) val_batches = len(data_loader) bar = Bar('Val Epoch {}'.format(self.epoch), max=(val_batches // self.vis_interval)) self.meters.before_epoch() preds = [] labels = [] for i, data_batch in enumerate(data_loader): start_time = time.time() self._inner_iter = i with torch.no_grad(): outputs = self.model.test_step(data_batch, self.optimizer, **kwargs) results = outputs['results'] pred = results['pred'] preds.append(pred.detach().cpu().squeeze().numpy()) labels.append( data_batch['label'].detach().cpu().squeeze().numpy()) end_time = time.time() self.meters.update({'loss': results['val_loss'].item()}) self.meters.update({'batch_time': end_time - start_time}) self.meters.during_train_iter(self._inner_iter, self.vis_interval) # self._iter += 1 if i % self.vis_interval == 0: print_str = '[{}/{}]'.format((i // self.vis_interval) + 1, val_batches // self.vis_interval) for name, val in self.meters.output.items(): print_str += ' | {} {:.6f}'.format(name, val) Bar.suffix = print_str bar.next() bar.finish() self.summary[f'epoch_{self._epoch}']['val_loss'] = \ self.meters.output['loss'] self.meters.after_val_epoch() preds = np.array(preds) labels = np.array(labels) fpr, tpr, _ = roc_curve(labels, preds, pos_label=1) auc_val = auc(fpr, tpr) thres = 0.5 neg_labels = (labels == 0).astype(np.int) pred_labels = (preds >= thres).astype(np.int) tp = sum(labels * (pred_labels == 1)) fp = sum(neg_labels * (pred_labels == 1)) fn = sum(labels * (pred_labels == 0)) # precision: precision = tp / (tp + fp + 1e-6) recall = tp / (tp + fn + 1e-6) self.log.info( f'AUC Value: {auc_val} | Precision: {precision} | Recall: {recall}' ) self.summary[f'epoch_{self._epoch}']['auc'] = auc_val self.summary[f'epoch_{self._epoch}']['precision'] = precision self.summary[f'epoch_{self._epoch}']['recall'] = recall
def train(self, data_loader, mode, **kwargs): self.model.set_mode(mode) self._epoch += 1 num_batches = len(data_loader) # self._max_iter = self._max_epoch * train_batches self.adjust_lr_by(num_batches, self.epoch, self.cur_iter, phase='epoch') bar = Bar('Train Epoch {}'.format(self.epoch), max=(num_batches // self.vis_interval)) self.meters.before_epoch() for i, data_batch in enumerate(data_loader): start_time = time.time() # during train iter self._inner_iter = i self.adjust_lr_by(num_batches, self.epoch, self.cur_iter, phase='iter') outputs = self.model.train_step(data_batch, optimizer=self.optimizer, **kwargs) self.meters.update(outputs['log_vars'], outputs['num_samples']) # after train iter end_time = time.time() self.meters.update({'batch_time': end_time - start_time}) self.meters.during_train_iter(self._inner_iter, self.vis_interval) self._iter += 1 if i % self.vis_interval == 0: print_str = '[{}/{}]'.format((i // self.vis_interval) + 1, num_batches // self.vis_interval) for name, val in self.meters.output.items(): print_str += ' | {} {:.6f}'.format(name, val) Bar.suffix = print_str self.log.debug(f'Epoch: {self.epoch}' + print_str) bar.next() # self.meters.after_train_iter() bar.finish() # TODO: SUMMARY NOT COMPLETE self.summary[f'epoch_{self._epoch}']['train_loss'] = \ self.meters.output['loss'] self.meters.after_train_epoch() self.checkpointer.after_train_epoch(self)
def test_det(self, data_loader, mode, **kwargs): self.model.set_mode(mode=mode) num_batch = len(data_loader) bar = Bar('Testing Det: ', max=(num_batch // self.vis_interval)) self.meters.before_epoch() all_hm, all_wh, all_reg, all_label, metas = [], [], [], [], [] for i, data_batch in enumerate(data_loader): start_time = time.time() with torch.no_grad(): outputs = self.model.test_step(data_batch, **kwargs) outputs = outputs['results'] all_hm.append(outputs['hm']) all_wh.append(outputs['wh']) all_reg.append(outputs.get('reg', None)) all_label.append(data_batch['label']) metas.append(data_batch['meta']) end_time = time.time() self.meters.update({'batch_time': end_time - start_time}) self.meters.average() if i % self.vis_interval == 0: print_str = '[{}/{}]'.format((i // self.vis_interval) + 1, num_batch // self.vis_interval) for name, val in self.meters.output.items(): print_str += ' | {} {:.4f}'.format(name, val) Bar.suffix = print_str self.log.debug(f'Testing Det: {self.epoch}' + print_str) bar.next() self.meters.clear_output() bar.finish() self.log.info('Calculating Evaluation Results ...') if hasattr(data_loader.dataset, mode): data_loader.dataset.evaluate(mode='test_det', all_hm=all_hm, all_wh=all_wh, all_reg=all_reg, all_label=all_label, metas=metas, logger=self.log) else: raise ValueError(f'No {mode} method in dataset class found.')
def test_emb(self, data_loader, mode, **kwargs): self.model.set_mode(mode=mode) embedding, id_labels = [], [] num_batch = len(data_loader) bar = Bar('Extracting Feature: ', max=(num_batch // self.vis_interval)) self.meters.before_epoch() for i, data_batch in enumerate(data_loader): start_time = time.time() # during train iter with torch.no_grad(): outputs = self.model.test_step(data_batch, **kwargs) outputs = outputs['results'] id_head = outputs['id'] id_target = data_batch['ids'][data_batch['reg_mask'] > 0] for i in range(id_head.shape[0]): feat, label = id_head[i], id_target[i].long() if label != 1: embedding.append(feat) id_labels.append(label) end_time = time.time() self.meters.update({'batch_time': end_time - start_time}) self.meters.average() if i % self.vis_interval == 0: print_str = '[{}/{}]'.format((i // self.vis_interval) + 1, num_batch // self.vis_interval) for name, val in self.meters.output.items(): print_str += ' | {} {:.4f}'.format(name, val) Bar.suffix = print_str self.log.debug(f'Extracting Feature: {self.epoch}' + print_str) bar.next() self.meters.clear_output() bar.finish() self.log.info(f'Calculating Evaluation Results ...') if hasattr(data_loader.dataset, mode): data_loader.dataset.evaluate(mode='test_emb', embedding=embedding, id_labels=id_labels, logger=self.log) else: raise ValueError(f'No {mode} method in dataset class found.')
def test(self, data_loader, mode, **kwargs): self.model.set_mode(mode) num_batch = len(data_loader) bar = Bar('Extracting results ', max=(num_batch // self.vis_interval)) self.meters.before_epoch() for i, data_batch in enumerate(data_loader): start_time = time.time() # during train iter with torch.no_grad(): outputs = self.model.test_step(data_batch, **kwargs) # after train iter outputs = outputs['results'] filename = data_batch['filename'] direction = self.algorithm['direction'] img_path = [ filename['A'] if direction == 'AtoB' else filename['B'] ] end_time = time.time() self.meters.update({'batch_time': end_time - start_time}) self.meters.average() save_dir = osp.join(self.work_dir, 'results') utils.mkdir_or_exist(save_dir) short_path = osp.basename(img_path[0]) name = osp.splitext(short_path)[0] for label, output in outputs.items(): output = utils.tensor2img(output) image_name = '%s_%s.png' % (name, label) save_path = osp.join(save_dir, image_name) utils.imwrite(output, save_path) if i % self.vis_interval == 0: print_str = '[{}/{}]'.format((i // self.vis_interval) + 1, num_batch // self.vis_interval) for name, val in self.meters.output.items(): print_str += ' | {} {:.4f}'.format(name, val) Bar.suffix = print_str self.log.debug(f'Extracting results :' + print_str) bar.next() self.meters.clear_output() bar.finish()
def test(self, data_loader, mode, **kwargs): self.model.set_mode(mode) num_batch = len(data_loader) bar = Bar('Extracting results ', max=(num_batch // self.vis_interval)) self.meters.before_epoch() results = [] for i, data_batch in enumerate(data_loader): start_time = time.time() # during train iter with torch.no_grad(): outputs = self.model.test_step(data_batch, **kwargs) # after train iter outputs = outputs['results'] results.extend(outputs) end_time = time.time() self.meters.update({'batch_time': end_time - start_time}) self.meters.average() if i % self.vis_interval == 0: print_str = '[{}/{}]'.format((i // self.vis_interval) + 1, num_batch // self.vis_interval) for name, val in self.meters.output.items(): print_str += ' | {} {:.4f}'.format(name, val) Bar.suffix = print_str self.log.debug(f'Extracting results :' + print_str) bar.next() self.meters.clear_output() bar.finish() if self.test_cfg.get('output') is not None: output_path = self.test_cfg['output'] suffix = output_path.split('.')[-1] data_loader.dataset.dump_results(results, output_path, suffix) if self.test_cfg.get('eval_config') is not None: eval_config = self.test_cfg['eval_config'] else: eval_config = {} eval_results = data_loader.dataset.evaluate(results, **eval_config) for name, value in eval_results.items(): self.log(f'{name}: {value:.04f}')
def test(self, data_loader, mode, **kwargs): self.model.set_mode(mode) num_batch = len(data_loader) bar = Bar('Extracting results ', max=(num_batch // self.vis_interval)) self.meters.before_epoch() for i, data_batch in enumerate(data_loader): start_time = time.time() self._inner_iter = i with torch.no_grad(): outputs = self.model.test_step(data_batch, **kwargs) results = outputs['results'] num_samples = outputs['num_samples'] end_time = time.time() inference_dir = self.test_cfg['inference_dir'] if inference_dir is not None: data_loader.dataset.dump_results(results, data_batch, num_samples) else: raise ValueError( 'inference_dir should not be None when using dump_results.' ) self.meters.update({'batch_time': end_time - start_time}) self.meters.after_val_iter(self._inner_iter, self.vis_interval) if i % self.vis_interval == 0: print_str = '[{}/{}]'.format((i // self.vis_interval) + 1, num_batch // self.vis_interval) for name, val in self.meters.output.items(): print_str += ' | {} {:.4f}'.format(name, val) Bar.suffix = print_str bar.next() bar.finish() self.meters.after_val_epoch() data_loader.dataset.evaluate(metric=self.test_cfg['metric'], logger=self.log) self._epoch += 1
def test(self, data_loader, mode, **kwargs): self.model.set_mode(mode) assert 'test' in self.tasks num_batch = len(data_loader) bar = Bar('Extracting Feature: ', max=(num_batch // self.vis_interval)) self.meters.before_epoch() results = [] for i, data_batch in enumerate(data_loader): start_time = time.time() with torch.no_grad(): outputs = self.model.test_step(data_batch, **kwargs) result = outputs['results'] if isinstance(results, list): results.extend(result) else: results.append(result) end_time = time.time() self.meters.update({'batch_time': end_time - start_time}) self.meters.average() if i % self.vis_interval == 0: print_str = '[{}/{}]'.format((i // self.vis_interval) + 1, num_batch // self.vis_interval) for name, val in self.meters.output.items(): print_str += ' | {} {:.4f}'.format(name, val) Bar.suffix = print_str self.log.debug(f'Extracting Feature: {self.epoch}' + print_str) bar.next() self.meters.clear_output() bar.finish() self.log.info(f'Calculating Evaluation Results ...') data_loader.dataset.evaluate(results, metric=self.test_cfg['evaluate_metric'], self.log)
def test_track(self, data_loader, mode, **kwargs): assert 'test_track' in self.workflow self.model.set_mode(mode) save_images = self.test_cfg['save_images'] save_videos = self.test_cfg['save_videos'] data_prefix = self.data_cfg['data_prefix'] output_dir = osp.join(self.work_dir, 'outputs') utils.mkdir_or_exist(output_dir) seqs = self.test_cfg['sequences'] accs = [] for seq in seqs: frame_id = 0 save_dir = osp.join(output_dir, seq) utils.mkdir_or_exist(save_dir) results = [] num_batch = len(data_loader) self.meters.before_epoch() data_loader.dataset.seq = seq num_batch = len(data_loader.dataset) bar = Bar(f'Sequence: {seq}', max=(num_batch // self.vis_interval)) for i, data_batch in enumerate(data_loader): start_time = time.time() outputs = self.model.test_track(data_batch, **kwargs) # after train iter online_tlwhs = outputs['online_tlwhs'] online_ids = outputs['online_ids'] end_time = time.time() self.meters.update({'batch_time': end_time - start_time}) self.meters.average() results.append((frame_id + 1, online_tlwhs, online_ids)) if save_images: online_im = utils.plot_tracking( data_batch['original_imgs'], online_tlwhs, online_ids, frame_id=frame_id) utils.imwrite( online_im, osp.join(save_dir, '{:05d}.jpg'.format(frame_id))) if i % self.vis_interval == 0: print_str = '[{}/{}]'.format( (i // self.vis_interval) + 1, num_batch // self.vis_interval) for name, val in self.meters.output.items(): print_str += ' | {} {:.4f}'.format(name, val) Bar.suffix = print_str self.log.debug(f'Seqence: {seq}' + print_str) bar.next() self.meters.clear_output() frame_id += 1 bar.finish() results_path = osp.join(save_dir, 'results.txt') self.write_results(results_path, results) if save_videos: video_path = osp.join(save_dir, '{}.mp4'.format(seq)) cmd = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format( save_dir, video_path) os.system(cmd) gt_path = osp.join(data_prefix, seq, 'gt', 'gt.txt') if osp.isfile(gt_path): accs.append(data_loader.dataset.evaluate( gt_path, results_path)) metrics = mm.metrics.motchallenge_metrics mh = mm.metrics.create() summary = data_loader.dataset.get_eval_summary(accs, seqs, metrics) strsummary = mm.io.render_summary( summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names) self.log.info(strsummary) eval_summary_path = osp.join(self.work_dir, 'eval_summary_mot.xlsx') data_loader.dataset.save_eval_summary(summary, eval_summary_path)
def test(self, data_loader, mode, **kwargs): self.model.set_mode(mode) test_batches = len(data_loader) thresh_1_25 = 0 thresh_1_25_2 = 0 thresh_1_25_3 = 0 rmse_linear = 0.0 rmse_log = 0.0 rmse_log_scale_invariant = 0.0 ard = 0.0 srd = 0.0 bar = Bar('Testing ', max=(test_batches // self.vis_interval)) self.meters.before_epoch() self.model.eval() for i, data_batch in enumerate(data_loader): self._inner_iter = i start_time = time.time() with torch.no_grad(): outputs = self.model.test_step(data_batch, **kwargs) results = outputs['results'] # num_samples = outputs['num_samples'] end_time = time.time() self.meters.update({'batch_time': end_time - start_time}) self.meters.after_val_iter(self._inner_iter, self.vis_interval) out = data_loader.dataset.evaluate( self._inner_iter + 1, results, data_batch, test_batches, thresh_1_25, thresh_1_25_2, thresh_1_25_3, rmse_linear, rmse_log, rmse_log_scale_invariant, ard, srd) thresh_1_25 += out[0] thresh_1_25_2 += out[1] thresh_1_25_3 += out[2] rmse_linear += out[3] rmse_log += out[4] rmse_log_scale_invariant += out[5] ard += out[6] srd += out[7] if i % self.vis_interval == 0: print_str = '[{}/{}]'.format((i // self.vis_interval) + 1, test_batches // self.vis_interval) for name, val in self.meters.output.items(): print_str += ' | {} {:.4f}'.format(name, val) Bar.suffix = print_str bar.next() bar.finish() self.meters.after_val_epoch() self.log.info('\nThreshold_1.25: {}'.format(thresh_1_25)) self.log.info('\nThreshold_1.25^2: {}'.format(thresh_1_25_2)) self.log.info('\nThreshold_1.25^3: {}'.format(thresh_1_25_3)) self.log.info('\nRMSE_linear: {}'.format(rmse_linear)) self.log.info('\nRMSE_log: {}'.format(rmse_log)) self.log.info( '\nRMSE_log_scale_invariant: {}'.format(rmse_log_scale_invariant)) self.log.info('\nARD: {}'.format(ard)) self.log.info('\nSRD: {}'.format(srd)) self._epoch += 1