def val(self): """ Validation function during the train phase. """ self.pose_net.eval() start_time = time.time() with torch.no_grad(): for i, data_dict in enumerate(self.val_loader): # Forward pass. out = self.pose_net(data_dict) # Compute the loss of the val batch. loss_dict = self.pose_loss(out) self.val_losses.update({key: loss.item() for key, loss in loss_dict.items()}, data_dict['img'].size(0)) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.runner_state['val_loss'] = self.val_losses.avg['loss'] RunnerHelper.save_net(self, self.pose_net, val_loss=self.val_losses.avg['loss']) # Print the log info & reset the states. Log.info( 'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {0}\n'.format(self.val_losses.info(), batch_time=self.batch_time)) self.batch_time.reset() self.val_losses.reset() self.pose_net.train()
def val(self): """ Validation function during the train phase. """ self.gan_net.eval() start_time = time.time() for j, data_dict in enumerate(self.val_loader): with torch.no_grad(): # Forward pass. out_dict = self.gan_net(data_dict) # Compute the loss of the val batch. self.val_losses.update( out_dict['loss_G'].mean().item() + out_dict['loss_D'].mean().item(), len(DCHelper.tolist(data_dict['meta']))) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() RunnerHelper.save_net(self, self.gan_net, val_loss=self.val_losses.avg) # Print the log info & reset the states. Log.info('Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time, loss=self.val_losses)) self.batch_time.reset() self.val_losses.reset() self.gan_net.train()
def train(self): """ Train function of every epoch during train phase. """ self.det_net.train() start_time = time.time() # Adjust the learning rate after every epoch. self.runner_state['epoch'] += 1 for i, data_dict in enumerate(self.train_loader): Trainer.update(self, solver_dict=self.configer.get('solver')) self.data_time.update(time.time() - start_time) # Forward pass. data_dict = RunnerHelper.to_device(self, data_dict) out = self.det_net(data_dict) loss_dict = self.det_loss(out) loss = loss_dict['loss'].mean() self.train_losses.update(loss.item(), len(DCHelper.tolist(data_dict['meta']))) self.optimizer.zero_grad() loss.backward() RunnerHelper.clip_grad(self.det_net, 10.) self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.runner_state['iters'] += 1 # Print the log info & reset the states. if self.runner_state['iters'] % self.configer.get( 'solver', 'display_iter') == 0: Log.info( 'Train Epoch: {0}\tTrain Iteration: {1}\t' 'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n' 'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n' .format(self.runner_state['epoch'], self.runner_state['iters'], self.configer.get('solver', 'display_iter'), RunnerHelper.get_lr(self.optimizer), batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() if self.configer.get('solver', 'lr')['metric'] == 'iters' \ and self.runner_state['iters'] == self.configer.get('solver', 'max_iters'): break # Check to val the current model. if self.runner_state['iters'] % self.configer.get( 'solver', 'test_interval') == 0: self.val()
def train(self): """ Train function of every epoch during train phase. """ self.seg_net.train() start_time = time.time() # Adjust the learning rate after every epoch. for i, data_dict in enumerate(self.train_loader): Trainer.update(self, warm_list=(0,), solver_dict=self.configer.get('solver')) self.data_time.update(time.time() - start_time) # Forward pass. data_dict = RunnerHelper.to_device(self, data_dict) out = self.seg_net(data_dict) # Compute the loss of the train batch & backward. loss_dict = self.loss(out) loss = loss_dict['loss'] self.train_losses.update({key: loss.item() for key, loss in loss_dict.items()}, data_dict['img'].size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.runner_state['iters'] += 1 # Print the log info & reset the states. if self.runner_state['iters'] % self.configer.get('solver', 'display_iter') == 0: Log.info('Train Epoch: {0}\tTrain Iteration: {1}\t' 'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n' 'Learning rate = {4}\tLoss = {3}\n'.format( self.runner_state['epoch'], self.runner_state['iters'], self.configer.get('solver', 'display_iter'), self.train_losses.info(), RunnerHelper.get_lr(self.optimizer), batch_time=self.batch_time, data_time=self.data_time)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() if self.runner_state['iters'] % self.configer.get('solver.save_iters') == 0 \ and self.configer.get('local_rank') == 0: RunnerHelper.save_net(self, self.seg_net) if self.configer.get('solver', 'lr')['metric'] == 'iters' \ and self.runner_state['iters'] == self.configer.get('solver', 'max_iters'): break # Check to val the current model. if self.runner_state['iters'] % self.configer.get('solver', 'test_interval') == 0 \ and not self.configer.get('network.distributed'): self.val() self.runner_state['epoch'] += 1
def val(self): """ Validation function during the train phase. """ self.det_net.eval() start_time = time.time() with torch.no_grad(): for j, data_dict in enumerate(self.val_loader): # Forward pass. data_dict = RunnerHelper.to_device(self, data_dict) out = self.det_net(data_dict) loss_dict = self.det_loss(out) # Compute the loss of the train batch & backward. loss = loss_dict['loss'].mean() out_dict, _ = RunnerHelper.gather(self, out) self.val_losses.update(loss.item(), len(DCHelper.tolist(data_dict['meta']))) test_indices_and_rois, test_roi_locs, test_roi_scores, test_rois_num = out_dict[ 'test_group'] batch_detections = FastRCNNTest.decode( test_roi_locs, test_roi_scores, test_indices_and_rois, test_rois_num, self.configer, DCHelper.tolist(data_dict['meta'])) batch_pred_bboxes = self.__get_object_list(batch_detections) self.det_running_score.update(batch_pred_bboxes, [ item['ori_bboxes'] for item in DCHelper.tolist(data_dict['meta']) ], [ item['ori_labels'] for item in DCHelper.tolist(data_dict['meta']) ]) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() RunnerHelper.save_net(self, self.det_net, iters=self.runner_state['iters']) # Print the log info & reset the states. Log.info( 'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time, loss=self.val_losses)) Log.info('Val mAP: {}\n'.format(self.det_running_score.get_mAP())) self.det_running_score.reset() self.batch_time.reset() self.val_losses.reset() self.det_net.train()
def val(self, data_loader=None): """ Validation function during the train phase. """ self.seg_net.eval() start_time = time.time() data_loader = self.val_loader if data_loader is None else data_loader for j, data_dict in enumerate(data_loader): data_dict = RunnerHelper.to_device(self, data_dict) with torch.no_grad(): # Forward pass. out = self.seg_net(data_dict) loss_dict = self.loss(out) # Compute the loss of the val batch. out_dict, _ = RunnerHelper.gather(self, out) self.val_losses.update( {key: loss.item() for key, loss in loss_dict.items()}, data_dict['img'].size(0)) self._update_running_score(out_dict['out'], DCHelper.tolist(data_dict['meta'])) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.runner_state['performance'] = self.seg_running_score.get_mean_iou( ) self.runner_state['val_loss'] = self.val_losses.avg['loss'] RunnerHelper.save_net( self, self.seg_net, performance=self.seg_running_score.get_mean_iou(), val_loss=self.val_losses.avg['loss']) # Print the log info & reset the states. Log.info('Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss = {0}\n'.format(self.val_losses.info(), batch_time=self.batch_time)) Log.info('Mean IOU: {}\n'.format( self.seg_running_score.get_mean_iou())) Log.info('Pixel ACC: {}\n'.format( self.seg_running_score.get_pixel_acc())) self.batch_time.reset() self.val_losses.reset() self.seg_running_score.reset() self.seg_net.train()
def val(self, data_loader=None): """ Validation function during the train phase. """ self.gan_net.eval() start_time = time.time() data_loader = self.val_loader if data_loader is None else data_loader for j, data_dict in enumerate(data_loader): with torch.no_grad(): # Forward pass. out_dict = self.gan_net(data_dict) # Compute the loss of the val batch. self.val_losses.update(out_dict['loss'].mean().item(), len(DCHelper.tolist(data_dict['meta']))) meta_list = DCHelper.tolist(data_dict['meta']) probe_features = [] gallery_features = [] probe_labels = [] gallery_labels = [] for idx in range(len(meta_list)): gallery_features.append(out_dict['featB'][idx].cpu().numpy()) gallery_labels.append(meta_list[idx]['labelB']) probe_features.append(out_dict['featA'][idx].cpu().numpy()) probe_labels.append(meta_list[idx]['labelA']) rank_1, vr_far_001 = FaceGANTest.decode(probe_features, gallery_features, probe_labels, gallery_labels) Log.info('Rank1 accuracy is {}'.format(rank_1)) Log.info('VR@FAR=0.1% accuracy is {}'.format(vr_far_001)) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() RunnerHelper.save_net(self, self.gan_net, val_loss=self.val_losses.avg) # Print the log info & reset the states. Log.info('Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time, loss=self.val_losses)) self.batch_time.reset() self.val_losses.reset() self.gan_net.train()
def val(self): """ Validation function during the train phase. """ self.det_net.eval() start_time = time.time() with torch.no_grad(): for i, data_dict in enumerate(self.val_loader): # Forward pass. out_dict = self.det_net(data_dict) # Compute the loss of the val batch. loss = out_dict['loss'].mean() self.val_losses.update(loss.item(), len(DCHelper.tolist(data_dict['meta']))) batch_detections = YOLOv3Test.decode( out_dict['dets'], self.configer, DCHelper.tolist(data_dict['meta'])) batch_pred_bboxes = self.__get_object_list(batch_detections) self.det_running_score.update(batch_pred_bboxes, [ item['ori_bboxes'] for item in DCHelper.tolist(data_dict['meta']) ], [ item['ori_labels'] for item in DCHelper.tolist(data_dict['meta']) ]) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() RunnerHelper.save_net(self, self.det_net, iters=self.runner_state['iters']) # Print the log info & reset the states. Log.info( 'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time, loss=self.val_losses)) Log.info('Val mAP: {}'.format(self.det_running_score.get_mAP())) self.det_running_score.reset() self.batch_time.reset() self.val_losses.reset() self.det_net.train()
def _init_model(self): self.gan_net = self.model_manager.gan_model() self.gan_net = RunnerHelper.load_net(self, self.gan_net) self.optimizer, self.scheduler = Trainer.init(self._get_parameters(), self.configer.get('solver')) self.train_loader = self.seg_data_loader.get_trainloader() self.val_loader = self.seg_data_loader.get_valloader()
def _init_model(self): # torch.multiprocessing.set_sharing_strategy('file_system') self.det_net = self.det_model_manager.object_detector() self.det_net = RunnerHelper.load_net(self, self.det_net) self.optimizer, self.scheduler = Trainer.init( self._get_parameters(), self.configer.get('solver')) self.train_loader = self.det_data_loader.get_trainloader() self.val_loader = self.det_data_loader.get_valloader() self.det_loss = self.det_model_manager.get_det_loss()
def test(self, test_dir, out_dir): for _, data_dict in enumerate(self.test_loader.get_testloader(test_dir=test_dir)): data_dict['testing'] = True data_dict = RunnerHelper.to_device(self, data_dict) out_dict = self.det_net(data_dict) meta_list = DCHelper.tolist(data_dict['meta']) test_indices_and_rois, test_roi_locs, test_roi_scores, test_rois_num = out_dict['test_group'] batch_detections = self.decode(test_roi_locs, test_roi_scores, test_indices_and_rois, test_rois_num, self.configer, meta_list) for i in range(len(meta_list)): ori_img_bgr = ImageHelper.read_image(meta_list[i]['img_path'], tool='cv2', mode='BGR') json_dict = self.__get_info_tree(batch_detections[i]) image_canvas = self.det_parser.draw_bboxes(ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('res', 'vis_conf_thre')) ImageHelper.save(image_canvas, save_path=os.path.join(out_dir, 'vis/{}.png'.format(meta_list[i]['filename']))) Log.info('Json Path: {}'.format(os.path.join(out_dir, 'json/{}.json'.format(meta_list[i]['filename'])))) JsonHelper.save_file(json_dict, save_path=os.path.join(out_dir, 'json/{}.json'.format(meta_list[i]['filename'])))
def __init__(self, configer): self.configer = configer self.runner_state = dict() self.batch_time = AverageMeter() self.data_time = AverageMeter() self.train_losses = DictAverageMeter() self.val_losses = DictAverageMeter() self.cls_model_manager = ModelManager(configer) self.cls_data_loader = DataLoader(configer) self.running_score = ClsRunningScore(configer) self.cls_net = self.cls_model_manager.get_cls_model() self.solver_dict = self.configer.get('solver') self.cls_net = RunnerHelper.load_net(self, self.cls_net) self.optimizer, self.scheduler = Trainer.init(self._get_parameters(), self.solver_dict) self.train_loader = self.cls_data_loader.get_trainloader() self.val_loader = self.cls_data_loader.get_valloader() self.loss = self.cls_model_manager.get_cls_loss()
def val(self): """ Validation function during the train phase. """ self.cls_net.eval() start_time = time.time() with torch.no_grad(): for j, data_dict in enumerate(self.val_loader): # Forward pass. data_dict = RunnerHelper.to_device(self, data_dict) out = self.cls_net(data_dict) loss_dict = self.loss(out) out_dict, label_dict, _ = RunnerHelper.gather(self, out) self.running_score.update(out_dict, label_dict) self.val_losses.update( {key: loss.item() for key, loss in loss_dict.items()}, data_dict['img'].size(0)) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() RunnerHelper.save_net(self, self.cls_net) # Print the log info & reset the states. Log.info('Test Time {batch_time.sum:.3f}s'.format( batch_time=self.batch_time)) Log.info('TestLoss = {}'.format(self.val_losses.info())) Log.info('Top1 ACC = {}'.format( RunnerHelper.dist_avg(self, self.running_score.get_top1_acc()))) Log.info('Top3 ACC = {}'.format( RunnerHelper.dist_avg(self, self.running_score.get_top3_acc()))) Log.info('Top5 ACC = {}'.format( RunnerHelper.dist_avg(self, self.running_score.get_top5_acc()))) self.batch_time.reset() self.batch_time.reset() self.val_losses.reset() self.running_score.reset() self.cls_net.train()
def _init_model(self): self.pose_net = self.pose_model_manager.get_pose_model() self.pose_net = RunnerHelper.load_net(self, self.pose_net) self.pose_net.eval()
def _init_model(self): self.gan_net = self.model_manager.gan_model() self.gan_net = RunnerHelper.load_net(self, self.gan_net) self.gan_net.eval()
def _init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = RunnerHelper.load_net(self, self.det_net) self.det_net.eval()