def _plot_base(self): with vplt.set_draw(name='loss_base') as ax: ax.plot(list(self.meters.loss_coord_train.keys()), np.mean(list(self.meters.loss_coord_train.values()), axis=1), label='loss_coord_train') ax.plot(list(self.meters.loss_coord_val.keys()), list(self.meters.loss_coord_val.values()), label='loss_coord_val') ax.set_title('loss base') ax.set_xlabel('epoch') ax.set_ylabel('loss') ax.set_xlim(0, self.temps.epochs) ax.legend() ax.grid(True) with vplt.set_draw(name='prec_base') as ax: ax.plot(list(self.meters.prec_coord_train.keys()), np.mean(list(self.meters.prec_coord_train.values()), axis=1), label='prec_coord_train') ax.plot(list(self.meters.prec_coord_val.keys()), list(self.meters.prec_coord_val.values()), label='prec_coord_val') ax.set_title('prec base') ax.set_xlabel('epoch') ax.set_ylabel('prec') ax.set_xlim(0, self.temps.epochs) ax.legend() ax.grid(True)
def _train_headpose_epoch(self): logger = self.temps.headpose_logger.getChild('epoch') # prepare models refine_depth = self._prepare_model(self.models.refine_depth) depth_loss = self.models.depth_loss device = th.device("cpu") if not self.is_cuda else th.device("cuda") # prepare solvers self.temps.headpose_solver = optim.SGD(self._group_weight(self.models.refine_depth, lr=self.temps.lr), weight_decay=5e-4) self.timeit() for i, batch in enumerate(self.temps.train_loader): self.temps.iter = i # prepare data face_image = batch['face_image'].to(device) face_depth = batch['face_depth'].to(device) # measure data loading time self.temps.data_time = self._timeit() # forward head_pose, refined_depth = refine_depth(face_image, face_depth) loss_depth = depth_loss(refined_depth, face_depth) # update resnet & decoder self.temps.headpose_solver.zero_grad() loss_depth.backward() self.temps.headpose_solver.step() # record loss & accuracy epoch = self.temps.epoch self.meters.loss_depth_train[epoch].append(loss_depth.item()) # visualize and save results face_depth.detach_() refined_depth.detach_() with th.no_grad(): depth_grid_gt = make_grid(face_depth).cpu() depth_grid_rf = make_grid(refined_depth).cpu() with vplt.set_draw(name='train_depth_groundtruth') as ax: ax.imshow(depth_grid_gt.numpy().transpose((1, 2, 0))) with vplt.set_draw(name='train_depth_refined') as ax: ax.imshow(depth_grid_rf.numpy().transpose((1, 2, 0))) save_image(depth_grid_gt, os.path.join(self.result_root, "train", "depth", f"ep{self.temps.epoch:02d}iter{i:04d}_gt.png")) save_image(depth_grid_rf, os.path.join(self.result_root, "train", "depth", f"ep{self.temps.epoch:02d}iter{i:04d}_rf.png")) # measure batch time self.temps.batch_time = self._timeit() # logging infofmt = "[{temps.epoch}][{temps.iter}/{temps.num_iters}]\t" \ "data_time: {temps.data_time:.2f} batch_time: {temps.batch_time:.2f}\t" \ "loss_depth_train: {loss_depth_train:.4f} " infodict = dict( temps=self.temps, loss_depth_train=self.meters.loss_depth_train[epoch][-1], ) logger.info(infofmt.format(**infodict))
def _test_headpose(self): logger = self.temps.headpose_logger.getChild('val') refine_depth = self._prepare_model(self.models.refine_depth) depth_loss = self.models.depth_loss loss_depth, num_batchs = 0, 0 device = th.device("cpu") if not self.is_cuda else th.device("cuda") for i, batch in enumerate(self.temps.val_loader): self.temps.iter = i # prepare data face_image = batch['face_image'].to(device) face_depth = batch['face_depth'].to(device) # measure data loading time self.temps.data_time = self._timeit() # forward with th.no_grad(): head_pose, refined_depth = refine_depth(face_image, face_depth) loss_depth_iter = depth_loss(refined_depth, face_depth) depth_grid_gt = make_grid(face_depth).cpu() depth_grid_rf = make_grid(refined_depth).cpu() with vplt.set_draw(name='val_depth_groundtruth') as ax: ax.imshow(depth_grid_gt.numpy().transpose((1, 2, 0))) with vplt.set_draw(name='val_train_depth_refined') as ax: ax.imshow(depth_grid_rf.numpy().transpose((1, 2, 0))) save_image( depth_grid_gt, os.path.join( self.result_root, "val", "depth", f"ep{self.temps.epoch:02d}iter{i:04d}_gt.png")) save_image( depth_grid_rf, os.path.join( self.result_root, "val", "depth", f"ep{self.temps.epoch:02d}iter{i:04d}_rf.png")) # accumulate meters loss_depth += loss_depth_iter.item() num_batchs += 1 # logging infofmt = "[{temps.epoch}]\t" \ "loss_depth: {loss_depth:.4f}" infodict = dict( temps=self.temps, loss_depth=loss_depth_iter, ) logger.info(infofmt.format(**infodict)) # record meters epoch = self.temps.epoch self.meters.loss_depth_val[epoch] = loss_depth / num_batchs
def _plot_headpose(self): with vplt.set_draw(name='loss_depth') as ax: ax.plot(list(self.meters.loss_depth_train.keys()), np.mean(list(self.meters.loss_depth_train.values()), axis=1), label='loss_depth_train') ax.plot(list(self.meters.loss_depth_val.keys()), list(self.meters.loss_depth_val.values()), label='loss_depth_val') ax.set_title('loss depth') ax.set_xlabel('epoch') ax.set_ylabel('loss') ax.set_xlim(0, self.temps.epochs) ax.legend() ax.grid(True)