def forward(self, outputs, batch): opt = self.opt hm_w_loss, hm_h_loss, wh_loss, off_loss = 0, 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] if not opt.mse_loss: output['hm_w'] = _sigmoid(output['hm_w']) output['hm_h'] = _sigmoid(output['hm_h']) if opt.eval_oracle_hm: output['hm_w'] = batch['hm_w'] output['hm_h'] = batch['hm_h'] if opt.eval_oracle_wh: output['wh'] = torch.from_numpy( gen_oracle_map(batch['wh'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['wh'].shape[3], output['wh'].shape[2])).to(opt.device) if opt.eval_oracle_offset: output['reg'] = torch.from_numpy( gen_oracle_map(batch['reg'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['reg'].shape[3], output['reg'].shape[2])).to(opt.device) hm_w_loss += self.crit(output['hm_w'], batch['hm_w']) / opt.num_stacks hm_h_loss += self.crit(output['hm_h'], batch['hm_h']) / opt.num_stacks if opt.wh_weight > 0: if opt.dense_wh: mask_weight = batch['dense_wh_mask'].sum() + 1e-4 wh_loss += (self.crit_wh( output['wh'] * batch['dense_wh_mask'], batch['dense_wh'] * batch['dense_wh_mask']) / mask_weight) / opt.num_stacks elif opt.cat_spec_wh: wh_loss += self.crit_wh( output['wh'], batch['cat_spec_mask'], batch['ind'], batch['cat_spec_wh']) / opt.num_stacks else: wh_loss += self.crit_reg(output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks loss = opt.hm_weight * hm_w_loss + opt.hm_weight * hm_h_loss + opt.wh_weight * wh_loss + \ opt.off_weight * off_loss loss_stats = { 'loss': loss, 'hm_w_loss': hm_w_loss, 'hm_h_loss': hm_h_loss, 'wh_loss': wh_loss, 'off_loss': off_loss } return loss, loss_stats
def forward(self, outputs, batch): opt = self.opt hm_loss, wh_loss, off_loss = 0, 0, 0 lm_loss, off_loss, hm_hp_loss, hp_offset_loss = 0, 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] output['hm'] = output['hm'] # if opt.hm_hp and not opt.mse_loss: # output['hm_hp'] = _sigmoid(output['hm_hp']) if opt.eval_oracle_hmhp: output['hm_hp'] = batch['hm_hp'] if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_kps: if opt.dense_hp: output['hps'] = batch['dense_hps'] else: output['hps'] = torch.from_numpy(gen_oracle_map( batch['hps'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), opt.output_res, opt.output_res)).to(opt.device) if opt.eval_oracle_hp_offset: output['hp_offset'] = torch.from_numpy(gen_oracle_map( batch['hp_offset'].detach().cpu().numpy(), batch['hp_ind'].detach().cpu().numpy(), opt.output_res, opt.output_res)).to(opt.device) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks # 1. focal loss,求目标的中心, if opt.wh_weight > 0: wh_loss += self.crit_reg(output['wh'], batch['reg_mask'], # 2. 人脸bbox高度和宽度的loss batch['ind'], batch['wh'], batch['wight_mask']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['hm_offset'], batch['reg_mask'], # 3. 人脸bbox中心点下采样,所需要的偏差补偿 batch['ind'], batch['hm_offset'], batch['wight_mask']) / opt.num_stacks if opt.dense_hp: mask_weight = batch['dense_hps_mask'].sum() + 1e-4 lm_loss += (self.crit_kp(output['hps'] * batch['dense_hps_mask'], batch['dense_hps'] * batch['dense_hps_mask']) / mask_weight) / opt.num_stacks else: lm_loss += self.crit_kp(output['landmarks'], batch['hps_mask'], # 4. 关节点的偏移 batch['ind'], batch['landmarks']) / opt.num_stacks # if opt.reg_hp_offset and opt.off_weight > 0: # 关节点的中心偏移 # hp_offset_loss += self.crit_reg( # output['hp_offset'], batch['hp_mask'], # batch['hp_ind'], batch['hp_offset']) / opt.num_stacks # if opt.hm_hp and opt.hm_hp_weight > 0: # 关节点的热力图 # hm_hp_loss += self.crit_hm_hp( # output['hm_hp'], batch['hm_hp']) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ opt.off_weight * off_loss + opt.lm_weight * lm_loss loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'lm_loss': lm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss} return loss, loss_stats
def forward(self, outputs, batch): opt = self.opt hm_loss, wh_loss, off_loss = 0, 0, 0 hp_loss, off_loss, hm_hp_loss, hp_offset_loss = 0, 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] output['hm'] = _sigmoid(output['hm']) if opt.hm_hp and not opt.mse_loss: output['hm_hp'] = _sigmoid(output['hm_hp']) if opt.eval_oracle_hmhp: output['hm_hp'] = batch['hm_hp'] if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_kps: if opt.dense_hp: output['hps'] = batch['dense_hps'] else: output['hps'] = torch.from_numpy(gen_oracle_map( batch['hps'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), opt.output_res, opt.output_res)).to(opt.device) if opt.eval_oracle_hp_offset: output['hp_offset'] = torch.from_numpy(gen_oracle_map( batch['hp_offset'].detach().cpu().numpy(), batch['hp_ind'].detach().cpu().numpy(), opt.output_res, opt.output_res)).to(opt.device) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks if opt.dense_hp: mask_weight = batch['dense_hps_mask'].sum() + 1e-4 hp_loss += (self.crit_kp(output['hps'] * batch['dense_hps_mask'], batch['dense_hps'] * batch['dense_hps_mask']) / mask_weight) / opt.num_stacks else: hp_loss += self.crit_kp(output['hps'], batch['hps_mask'], batch['ind'], batch['hps']) / opt.num_stacks if opt.wh_weight > 0: wh_loss += self.crit_reg(output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks if opt.reg_hp_offset and opt.off_weight > 0: hp_offset_loss += self.crit_reg( output['hp_offset'], batch['hp_mask'], batch['hp_ind'], batch['hp_offset']) / opt.num_stacks if opt.hm_hp and opt.hm_hp_weight > 0: hm_hp_loss += self.crit_hm_hp( output['hm_hp'], batch['hm_hp']) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ opt.off_weight * off_loss + opt.hp_weight * hp_loss + \ opt.hm_hp_weight * hm_hp_loss + opt.off_weight * hp_offset_loss loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'hp_loss': hp_loss, 'hm_hp_loss': hm_hp_loss, 'hp_offset_loss': hp_offset_loss, 'wh_loss': wh_loss, 'off_loss': off_loss} return loss, loss_stats
def forward(self, outputs, batch): opt = self.opt hm_loss, wh_loss, off_loss = 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] ####若不采用均方误差,则使用sigmoid对hm进行处理 if not opt.mse_loss: output['hm'] = _sigmoid(output['hm']) if opt.eval_oracle_hm: output['hm'] = batch['hm'] ####eval_oracle_wh啥意思??? if opt.eval_oracle_wh: ####好像是向cpu或是gpu分配数据 ####变成numpy.ndarray数据格式 output['wh'] = torch.from_numpy(gen_oracle_map( batch['wh'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['wh'].shape[3], output['wh'].shape[2])).to(opt.device) if opt.eval_oracle_offset: output['reg'] = torch.from_numpy(gen_oracle_map( batch['reg'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['reg'].shape[3], output['reg'].shape[2])).to(opt.device) ###num_Stacks是什么??? hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks ###wh_weight是loss weight for bounding box size ###总损失公式为:loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ opt.off_weight * off_loss if opt.wh_weight > 0: if opt.dense_wh: mask_weight = batch['dense_wh_mask'].sum() + 1e-4 ###计算wh损失项 wh_loss += ( self.crit_wh(output['wh'] * batch['dense_wh_mask'], batch['dense_wh'] * batch['dense_wh_mask']) / mask_weight) / opt.num_stacks elif opt.cat_spec_wh: wh_loss += self.crit_wh( output['wh'], batch['cat_spec_mask'], batch['ind'], batch['cat_spec_wh']) / opt.num_stacks else: wh_loss += self.crit_reg( output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks ####计算总损失 loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ opt.off_weight * off_loss loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss} return loss, loss_stats
def forward(self, outputs, batch, source_grl=None, target_grl=None, DA_switch=False): opt = self.opt hm_loss, wh_loss, off_loss = 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] if not opt.mse_loss: output['hm'] = _sigmoid(output['hm']) if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_wh: output['wh'] = torch.from_numpy(gen_oracle_map( batch['wh'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['wh'].shape[3], output['wh'].shape[2])).to(opt.device) if opt.eval_oracle_offset: output['reg'] = torch.from_numpy(gen_oracle_map( batch['reg'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['reg'].shape[3], output['reg'].shape[2])).to(opt.device) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks if opt.wh_weight > 0: if opt.dense_wh: mask_weight = batch['dense_wh_mask'].sum() + 1e-4 wh_loss += ( self.crit_wh(output['wh'] * batch['dense_wh_mask'], batch['dense_wh'] * batch['dense_wh_mask']) / mask_weight) / opt.num_stacks elif opt.cat_spec_wh: wh_loss += self.crit_wh( output['wh'], batch['cat_spec_mask'], batch['ind'], batch['cat_spec_wh']) / opt.num_stacks else: wh_loss += self.crit_reg( output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks da_loss = torch.zeros_like(hm_loss).type(torch.cuda.FloatTensor) if DA_switch: da_source_label = torch.zeros_like(source_grl).type(torch.cuda.FloatTensor) da_target_label = torch.ones_like(target_grl).type(torch.cuda.FloatTensor) grl_s_loss = F.binary_cross_entropy_with_logits(source_grl, da_source_label) grl_t_loss = F.binary_cross_entropy_with_logits(target_grl, da_target_label) da_loss = grl_s_loss + grl_t_loss loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ opt.off_weight * off_loss + opt.da_weight * da_loss # loss = da_loss loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss, 'da_loss': da_loss} return loss, loss_stats
def forward(self, outputs, targets_heatmaps, targets_scale, targets_offset, targets_inds, targets_reg_mask): opt = self.opt hm_loss, wh_loss, off_loss = 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] if not opt.mse_loss: output['hm'] = _sigmoid(output['hm']) # Optional: Use ground truth for validation if opt.eval_oracle_hm: output['hm'] = targets_heatmaps if opt.eval_oracle_wh: output['wh'] = nd.from_numpy( gen_oracle_map(targets_scale.asnumpy(), targets_inds.asnumpy(), output['wh'].shape[3], output['wh'].shape[2])).as_in_context( opt.device) if opt.eval_oracle_offset: output['reg'] = nd.from_numpy( gen_oracle_map(targets_offset.asnumpy(), targets_inds.asnumpy(), output['reg'].shape[3], output['reg'].shape[2])).as_in_context( opt.device) # 1. heatmap loss hm_loss = hm_loss + self.crit(output['hm'], targets_heatmaps) / opt.num_stacks # 2. scale loss if opt.wh_weight > 0: wh_loss = wh_loss + self.crit_reg( output['wh'], targets_reg_mask, targets_inds, targets_scale) / opt.num_stacks # 3. offset loss if opt.reg_offset and opt.off_weight > 0: off_loss = off_loss + self.crit_reg( output['reg'], targets_reg_mask, targets_inds, targets_offset) / opt.num_stacks # total loss loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ opt.off_weight * off_loss loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss } return loss
def forward(self, outputs, batch): opt = self.opt hm_loss, dep_loss, rot_loss, dim_loss = 0, 0, 0, 0 wh_loss, off_loss = 0, 0 for s in range(opt.num_stacks): output = outputs[s] output['hm'] = _sigmoid(output['hm']) output['dep'][:, :, :, 0] = 1. / (output['dep'][:, :, :, 0].sigmoid() + 1e-6) - 1. output['dep'][:, :, :, 1] = output['dep'][:, :, :, 1].sigmoid() if opt.eval_oracle_dep: output['dep'] = torch.from_numpy( gen_oracle_map(batch['dep'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), opt.output_w, opt.output_h)).to(opt.device) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks if opt.dep_weight > 0: dep_loss += self.dept_reg(output['dep'], batch['reg_mask'], batch['ind'], batch['dep']) / opt.num_stacks if opt.dim_weight > 0: dim_loss += self.crit_reg(output['dim'], batch['reg_mask'], batch['ind'], batch['dim']) / opt.num_stacks if opt.rot_weight > 0: rot_loss += self.crit_rot(output['rot'], batch['rot_mask'], batch['ind'], batch['rotbin'], batch['rotres']) / opt.num_stacks if opt.reg_bbox and opt.wh_weight > 0: wh_loss += self.crit_reg(output['wh'], batch['rot_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['rot_mask'], batch['ind'], batch['reg']) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.dep_weight * dep_loss + \ opt.dim_weight * dim_loss + opt.rot_weight * rot_loss + \ 0*opt.wh_weight * wh_loss + opt.off_weight * off_loss loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'dep_loss': dep_loss, 'dim_loss': dim_loss, 'rot_loss': rot_loss, 'wh_loss': wh_loss, 'off_loss': off_loss } return loss, loss_stats
def forward(self, outputs, \ targets_center, targets_2d_wh, targets_2d_offset, \ targets_3d_depth, targets_3d_dim, targets_3d_rotbin, targets_3d_rotres, \ targets_inds, targets_2d_wh_mask, targets_3d_rot_mask): opt = self.opt hm_loss, dep_loss, rot_loss, dim_loss = 0, 0, 0, 0 wh_loss, off_loss = 0, 0 for s in range(opt.num_stacks): output = outputs[s] output['hm'] = _sigmoid(output['hm']) output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1. if opt.eval_oracle_dep: output['dep'] = nd.array( gen_oracle_map(targets_3d_depth.asnumpy(), targets_inds.asnumpy(), opt.output_w, opt.output_h)).as_in_context(opt.device) hm_loss = hm_loss + self.crit(output['hm'], targets_center) / opt.num_stacks if opt.dep_weight > 0: dep_loss = dep_loss + self.crit_reg( output['dep'], targets_2d_wh_mask, targets_inds, targets_3d_depth) / opt.num_stacks if opt.dim_weight > 0: dim_loss = dim_loss + self.crit_reg( output['dim'], targets_2d_wh_mask, targets_inds, targets_3d_dim) / opt.num_stacks if opt.rot_weight > 0: rot_loss = rot_loss + self.crit_rot( output['rot'], targets_3d_rot_mask, targets_inds, targets_3d_rotbin, targets_3d_rotres) / opt.num_stacks if opt.reg_bbox and opt.wh_weight > 0: wh_loss = wh_loss + self.crit_reg( output['wh'], targets_3d_rot_mask, targets_inds, targets_2d_wh) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss = off_loss + self.crit_reg( output['reg'], targets_3d_rot_mask, targets_inds, targets_2d_offset) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.dep_weight * dep_loss + \ opt.dim_weight * dim_loss + opt.rot_weight * rot_loss + \ opt.wh_weight * wh_loss + opt.off_weight * off_loss #print("hm_loss: {}, dep_loss: {}, dim_loss: {}, rot_loss: {}, wh_loss: {}, off_loss: {}".format(hm_loss, dep_loss, dim_loss, rot_loss, wh_loss, off_loss)) #loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'dep_loss': dep_loss, # 'dim_loss': dim_loss, 'rot_loss': rot_loss, # 'wh_loss': wh_loss, 'off_loss': off_loss} return loss
def forward(self, outputs, batch): opt = self.opt hm_loss, dep_loss, rot_loss = 0, 0, 0 wh_loss, off_loss, xyz_loss = 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] output['hm'] = _sigmoid(output['hm']) output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1. if opt.eval_oracle_dep: output['dep'] = torch.from_numpy( gen_oracle_map(batch['dep'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), opt.output_w, opt.output_h)).to(opt.device) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks if opt.dep_weight > 0: dep_loss += self.crit_reg(output['dep'], batch['reg_mask'], batch['ind'], batch['dep']) / opt.num_stacks if opt.rot_weight > 0: rot_pred = F.normalize(output['rot'], p=2, dim=1) rot_loss += self.crit_reg(rot_pred, batch['rot_mask'], batch['ind'], batch['rot']) / opt.num_stacks if opt.reg_bbox and opt.wh_weight > 0: wh_loss += self.crit_reg(output['wh'], batch['rot_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['rot_mask'], batch['ind'], batch['reg']) / opt.num_stacks if opt.xyz_mask and opt.xyz_weight > 0: xyz_loss += self.crit_xyz(output['xyz'], batch['xyz_mask']) / opt.num_stacks loss = (opt.hm_weight * hm_loss + opt.dep_weight * dep_loss + opt.rot_weight * rot_loss + opt.wh_weight * wh_loss + opt.off_weight * off_loss + opt.xyz_weight * xyz_loss) loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'dep_loss': dep_loss, 'rot_loss': rot_loss, 'wh_loss': wh_loss, 'off_loss': off_loss } if opt.xyz_mask: loss_stats.update({'xyz_loss': xyz_loss}) return loss, loss_stats
def forward(self, outputs, batch): opt = self.opt center_loss, hm_loss, wh_loss, off_loss = 0, 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] if not opt.mse_loss: pass # output['hm'] = _sigmoid(output['hm']) # print(_tanh(output['hm'])) # print(output['hm']) # exit() if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_wh: output['wh'] = torch.from_numpy(gen_oracle_map( batch['wh'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['wh'].shape[3], output['wh'].shape[2])).to(opt.device) if opt.eval_oracle_offset: output['reg'] = torch.from_numpy(gen_oracle_map( batch['reg'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['reg'].shape[3], output['reg'].shape[2])).to(opt.device) # print(output['hm']) # print(batch['hm']) # print(self.crit(output['hm'], batch['hm'])) # exit() # hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks ft_shape = output['ft'].shape hm_shape = output['hm'].shape # print(ft_shape) # print(hm_shape) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks # print(hm_loss) # print(output['hm']) # print(batch['hm']) # print(self.crit(output['hm'], batch['hm'])) # exit() if opt.center_weight > 0: center_loss += self.centerloss(output['ft'].view(ft_shape[0], ft_shape[1], -1), output['center'], output['hm'].view(hm_shape[0], hm_shape[1], -1), batch['hm']) / opt.num_stacks if opt.wh_weight > 0: if opt.dense_wh: mask_weight = batch['dense_wh_mask'].sum() + 1e-4 wh_loss += ( self.crit_wh(output['wh'] * batch['dense_wh_mask'], batch['dense_wh'] * batch['dense_wh_mask']) / mask_weight) / opt.num_stacks elif opt.cat_spec_wh: wh_loss += self.crit_wh( output['wh'], batch['cat_spec_mask'], batch['ind'], batch['cat_spec_wh']) / opt.num_stacks else: wh_loss += self.crit_reg( output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ opt.off_weight * off_loss + opt.center_weight * center_loss loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss, 'center_loss': center_loss} return loss, loss_stats
def forward(self, outputs, batch, fea, feao, sign, model, _means, precision_matrices): opt = self.opt hm_loss, wh_loss, off_loss = 0, 0, 0 sign_record = 0 r1, r3 = [], [] r2, r4 = [], [] r5, r7 = [], [] r6, r8 = [], [] # fea[0]= fea[0] - fea[0].mean(3,True).mean(2,True).mean(1,True) # feao[0]= feao[0]- feao[0].mean(3,True).mean(2,True).mean(1,True) # fea[1]= fea[1] - fea[1].mean(3,True).mean(2,True).mean(1,True) # feao[1]= feao[1]- feao[1].mean(3,True).mean(2,True).mean(1,True) #### ewc_loss = torch.tensor(0).float().to('cuda') if (sign[0] == 0): for n, p in model.named_parameters(): _loss = 0.1 * (p - _means[n])**2 ewc_loss += _loss.sum() ##### for i in range(len(sign)): if (sign[i] == 0): if (len(r1) == 0): r1, r3 = fea[0][i].unsqueeze(0), fea[1][i].unsqueeze(0) r2, r4 = feao[0][i].unsqueeze(0), feao[1][i].unsqueeze(0) r5, r7 = fea[2][i].unsqueeze(0), fea[3][i].unsqueeze(0) r6, r8 = feao[2][i].unsqueeze(0), feao[3][i].unsqueeze(0) else: r1, r3 = torch.cat((r1, fea[0][i].unsqueeze(0)), 0), torch.cat( (r3, fea[1][i].unsqueeze(0)), 0) r2, r4 = torch.cat((r2, feao[0][i].unsqueeze(0)), 0), torch.cat( (r4, feao[1][i].unsqueeze(0)), 0) r5, r7 = torch.cat((r5, fea[2][i].unsqueeze(0)), 0), torch.cat( (r7, fea[3][i].unsqueeze(0)), 0) r6, r8 = torch.cat((r6, feao[2][i].unsqueeze(0)), 0), torch.cat( (r8, feao[3][i].unsqueeze(0)), 0) sign_record = 1 for s in range(opt.num_stacks): output = outputs[s] if not opt.mse_loss: output['hm'] = _sigmoid(output['hm']) if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_wh: output['wh'] = torch.from_numpy( gen_oracle_map(batch['wh'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['wh'].shape[3], output['wh'].shape[2])).to(opt.device) if opt.eval_oracle_offset: output['reg'] = torch.from_numpy( gen_oracle_map(batch['reg'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['reg'].shape[3], output['reg'].shape[2])).to(opt.device) croosloss = torch.tensor(0).float().to('cuda') if (sign_record != 0): croosloss = 1 * (torch.sum( ((r1 - r2)**2)) / (512 * 32 * 32) + torch.sum( ((r3 - r4)**2)) / (256 * 256 * 64) + torch.sum( ((r5 - r6)**2)) / (256 * 256 * 1) + torch.sum( ((r7 - r8)**2)) / (256 * 256 * 2)) / fea[0].shape[0] hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks if opt.wh_weight > 0: if opt.dense_wh: mask_weight = batch['dense_wh_mask'].sum() + 1e-4 wh_loss += (self.crit_wh( output['wh'] * batch['dense_wh_mask'], batch['dense_wh'] * batch['dense_wh_mask']) / mask_weight) / opt.num_stacks elif opt.cat_spec_wh: wh_loss += self.crit_wh( output['wh'], batch['cat_spec_mask'], batch['ind'], batch['cat_spec_wh']) / opt.num_stacks else: wh_loss += self.crit_reg(output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.off_weight * off_loss + ewc_loss + croosloss # loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ # opt.off_weight * off_loss loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'off_loss': off_loss} return loss, loss_stats
def forward(self, outputs, batch): opt = self.opt hm_loss, wh_loss, off_loss, lincomb_mask_loss, segm_loss = 0, 0, 0, 0, 0 # hm_loss, wh_loss, off_loss, segm_loss = 0, 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] if not opt.mse_loss: output['hm'] = _sigmoid(output['hm']) if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_wh: output['wh'] = torch.from_numpy( gen_oracle_map(batch['wh'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['wh'].shape[3], output['wh'].shape[2])).to(opt.device) if opt.eval_oracle_offset: output['reg'] = torch.from_numpy( gen_oracle_map(batch['reg'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['reg'].shape[3], output['reg'].shape[2])).to(opt.device) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks if opt.wh_weight > 0: if opt.dense_wh: mask_weight = batch['dense_wh_mask'].sum() + 1e-4 wh_loss += (self.crit_wh( output['wh'] * batch['dense_wh_mask'], batch['dense_wh'] * batch['dense_wh_mask']) / mask_weight) / opt.num_stacks elif opt.cat_spec_wh: wh_loss += self.crit_wh( output['wh'], batch['cat_spec_mask'], batch['ind'], batch['cat_spec_wh']) / opt.num_stacks else: wh_loss += self.crit_reg(output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks if opt.use_semantic_segmentation_loss: segm_loss += self.semantic_segmentation_loss( output['segm'], batch['masks'], batch['gt_bbox_lbl']) / opt.num_stacks lincomb_mask_loss += self.lincomb_mask_loss( output['masks'], output['proto'], batch['reg_mask'], batch['masks'], batch['gt_bbox_lbl']) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ opt.off_weight * off_loss + \ opt.lincomb_mask_weight * lincomb_mask_loss + opt.segm_weight * segm_loss loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss, 'lincomb_mask_loss': lincomb_mask_loss, 'segm_loss': segm_loss } # loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ # opt.off_weight * off_loss + opt.segm_weight * segm_loss # loss_stats = {'loss': loss, 'hm_loss': hm_loss, # 'wh_loss': wh_loss, 'off_loss': off_loss, 'segm_loss': segm_loss} return loss, loss_stats
def forward(self, outputs, batch, global_step, tb_writer): opt = self.opt hm_loss, wh_loss, off_loss = 0, 0, 0 batch_hm_loss, batch_wh_loss, batch_off_loss = 0, 0, 0 #per batch losses for s in range(opt.num_stacks): output = outputs[s] output['hm'] = _sigmoid(output['hm']) if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_wh: output['wh'] = torch.from_numpy( gen_oracle_map(batch['wh'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['wh'].shape[3], output['wh'].shape[2])).to(opt.device) if opt.eval_oracle_offset: output['reg'] = torch.from_numpy( gen_oracle_map(batch['reg'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['reg'].shape[3], output['reg'].shape[2])).to(opt.device) tmp = self.crit(output['hm'], batch['hm']) hm_loss = hm_loss + tmp[0] / opt.num_stacks batch_hm_loss = batch_hm_loss + tmp[1] / opt.num_stacks if opt.wh_weight > 0: if opt.mdn: BS = output['mdn_logits'].shape[0] M = opt.mdn_n_comps H, W = output['mdn_logits'].shape[-2:] K = opt.num_classes if opt.cat_spec_wh else 1 mdn_logits = output['mdn_logits'] mdn_logits = mdn_logits.reshape((BS, M, K, H, W)).permute( (2, 0, 1, 3, 4)) # mdn_logits.shape: torch.Size([80, 2, 3, 128, 128]) mdn_pi = torch.clamp( torch.nn.Softmax(dim=2)(mdn_logits), 1e-4, 1. - 1e-4) # mdn_pi.shape: torch.Size([80, 2, 3, 128, 128]) mdn_sigma = torch.clamp( torch.nn.ELU()(output['mdn_sigma']) + opt.mdn_min_sigma, 1e-4, 1e5) mdn_sigma = mdn_sigma.reshape( (BS, M * 2, K, H, W)).permute((2, 0, 1, 3, 4)) # mdn_sigma.shape: torch.Size([80, 2, 6, 128, 128]) mdn_mu = output['wh'] mdn_mu = mdn_mu.reshape((BS, M * 2, K, H, W)).permute( (2, 0, 1, 3, 4)) # mdn_mu.shape: torch.Size([80, 2, 6, 128, 128]) gt = batch['cat_spec_wh'] if opt.cat_spec_wh else batch[ 'wh'] gt = gt.reshape( (BS, -1, opt.num_classes if opt.cat_spec_wh else 1, 2)).permute((2, 0, 1, 3)) # gt.shape: torch.Size([80, 2, 128, 2]) if opt.cat_spec_wh: mask = batch['cat_spec_mask'][:, :, 0::2].unsqueeze( -1).permute((2, 0, 1, 3)) # mask.shape: torch.Size([80, 2, 128, 1]) else: mask = batch['reg_mask'].unsqueeze(2).unsqueeze(0) # print("mask.shape:", mask.shape) # mask.shape: torch.Size([1, 2, 128, 1]) V = torch.Tensor([opt.mdn_V]).cuda() I = mask.shape[-2] _gt = gt.reshape((K * BS, I, -1)) _mask = mask.reshape((K * BS, I, -1)) batch_ind = torch.repeat_interleave(batch['ind'], K, dim=0) _mdn_mu = _tranpose_and_gather_feat( mdn_mu.reshape((K * BS, -1, H, W)), batch_ind) _mdn_sigma = _tranpose_and_gather_feat( mdn_sigma.reshape((K * BS, -1, H, W)), batch_ind) _mdn_pi = _tranpose_and_gather_feat( mdn_pi.reshape((K * BS, -1, H, W)), batch_ind) # mdn_n_comps=3 # batch['ind'].shape: torch.Size([2, 128]) # gt.shape: torch.Size([1, 2, 128, 2]) # mask.shape: torch.Size([1, 2, 128, 1]) # mdn_mu.shape: torch.Size([1, 2, 6, 128, 128]) # mdn_pi.shape: torch.Size([1, 2, 3, 128, 128]) # mdn_sigma.shape: torch.Size([1, 2, 6, 128, 128]) # batch['ind'].shape: torch.Size([2, 128]) # _gt.shape: torch.Size([2, 128, 2]) # _mask.shape: torch.Size([2, 128, 1]) # _mdn_mu.shape: torch.Size([2, 128, 6]) # _mdn_pi.shape: torch.Size([2, 128, 3]) # _mdn_sigma.shape: torch.Size([2, 128, 6]) tmp = self.crit_wh(_gt, _mdn_mu, _mdn_sigma, _mdn_pi, _mask, V, C=1) wh_loss += tmp[0] / opt.num_stacks batch_wh_loss += tmp[1] / opt.num_stacks for _c in range(opt.num_classes if opt.cat_spec_wh else 1): _mdn_pi = _tranpose_and_gather_feat( mdn_pi[_c], batch['ind']) _mdn_sigma = _tranpose_and_gather_feat( mdn_sigma[_c], batch['ind']) _, _max_pi_ind = torch.max(_mdn_pi, -1) if tb_writer is not None: _cat = opt.cls_id_to_cls_name(_c) tb_writer.add_histogram( 'mdn_pi_max_comp/{}'.format(_cat), _max_pi_ind + 1, global_step=global_step) for i in range(_mdn_pi.shape[2]): tb_writer.add_histogram( 'mdn_pi/{}/{}'.format(_cat, i), _mdn_pi[:, :, i], global_step=global_step) tb_writer.add_histogram( 'mdn_sigma/{}/{}'.format(_cat, i), _mdn_sigma[:, :, i * 2:i * 2 + 2], global_step=global_step) else: if opt.dense_wh: mask_weight = batch['dense_wh_mask'].sum() + 1e-4 wh_loss += (self.crit_wh( output['wh'] * batch['dense_wh_mask'], batch['dense_wh'] * batch['dense_wh_mask']) / mask_weight) / opt.num_stacks elif opt.cat_spec_wh: wh_loss += self.crit_wh( output['wh'], batch['cat_spec_mask'], batch['ind'], batch['cat_spec_wh']) / opt.num_stacks else: tmp = self.crit_reg(output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) wh_loss += tmp[0] / opt.num_stacks batch_wh_loss += tmp[1] / opt.num_stacks ''' output['wh'].shape: torch.Size([2, 160, 128, 128]) batch['ind'].shape: torch.Size([2, 128]) batch['cat_spec_mask'].shape: torch.Size([2, 128, 160]) ''' if opt.reg_offset and opt.off_weight > 0: tmp = self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) off_loss += tmp[0] / opt.num_stacks batch_off_loss += tmp[1] / opt.num_stacks loss_stats = {} loss, batch_loss = 0, 0 loss += opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ opt.off_weight * off_loss batch_loss += opt.hm_weight * batch_hm_loss + opt.wh_weight * batch_wh_loss + \ opt.off_weight * batch_off_loss loss_stats.update({'loss': loss, 'hm_loss': hm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss, 'batch_loss': batch_loss, 'batch_hm_loss': batch_hm_loss,\ 'batch_wh_loss': batch_wh_loss, 'batch_off_loss': batch_off_loss}) return loss, loss_stats
def forward(self, outputs, batch): opt = self.opt hm_loss, wh_loss, off_loss = 0, 0, 0 # hughes for s in range(opt.num_stacks): output = outputs[s] if not opt.mse_loss: output['hm'] = _sigmoid(output['hm']) if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_wh: output['wh'] = torch.from_numpy( gen_oracle_map(batch['wh'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['wh'].shape[3], output['wh'].shape[2])).to(opt.device) if opt.eval_oracle_offset: output['reg'] = torch.from_numpy( gen_oracle_map(batch['reg'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['reg'].shape[3], output['reg'].shape[2])).to(opt.device) # STD # write_hmgt = np.array(batch['hm'].detach().squeeze(0).squeeze(0)) # write_hmgt = ((write_hmgt - np.min(write_hmgt) / np.max(write_hmgt)) * 255).astype(np.uint8) # cv2.imwrite('/store/datasets/UA-Detrac/exp/tensors/HM/HM_GT.jpg', write_hmgt) # write_hmre = np.array(output['hm'].detach().squeeze(0).squeeze(0)) # write_hmre = ((write_hmre - np.min(write_hmre) / np.max(write_hmre)) * 255).astype(np.uint8) # cv2.imwrite('/store/datasets/UA-Detrac/exp/tensors/HM/HM_RE.jpg', write_hmre) # exit() hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks # print('hm loss ' + str(s) + ': ' + str(hm_loss)) if opt.wh_weight > 0: if opt.dense_wh: mask_weight = batch['dense_wh_mask'].sum() + 1e-4 wh_loss += (self.crit_wh( output['wh'] * batch['dense_wh_mask'], batch['dense_wh'] * batch['dense_wh_mask']) / mask_weight) / opt.num_stacks elif opt.cat_spec_wh: wh_loss += self.crit_wh( output['wh'], batch['cat_spec_mask'], batch['ind'], batch['cat_spec_wh']) / opt.num_stacks else: wh_loss += self.crit_reg(output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ opt.off_weight * off_loss loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss } # hughes return loss, loss_stats
def forward(self, outputs, batch): opt = self.opt focal_loss, pull_loss, push_loss, reg_loss = 0, 0, 0, 0 lm_focal_loss, rm_focal_loss, ct_focal_loss = 0, 0, 0 lm_reg_loss, rm_reg_loss, ct_reg_loss = 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] if not opt.mse_loss: output['lm'] = _sigmoid(output['lm']) output['rm'] = _sigmoid(output['rm']) output['ct'] = _sigmoid(output['ct']) if opt.eval_oracle_lm: output['lm'] = batch['lm'] if opt.eval_oracle_rm: output['rm'] = batch['rm'] if opt.eval_oracle_ct: output['ct'] = batch['ct'] if opt.eval_oracle_ae: output['lm_tag'] = torch.from_numpy( gen_oracle_map(batch['lm_tag'].detach().cpu().numpy(), batch['lm_tag'].detach().cpu().numpy(), output['lm_tag'].shape[3], output['lm_tag'].shape[2])).to(opt.device) output['rm_tag'] = torch.from_numpy( gen_oracle_map(batch['rm_tag'].detach().cpu().numpy(), batch['rm_tag'].detach().cpu().numpy(), output['rm_tag'].shape[3], output['rm_tag'].shape[2])).to(opt.device) if opt.eval_oracle_offset: output['lm_reg'] = torch.from_numpy( gen_oracle_map(batch['lm_reg'].detach().cpu().numpy(), batch['lm_tag'].detach().cpu().numpy(), output['lm_reg'].shape[3], output['lm_reg'].shape[2])).to(opt.device) output['rm_reg'] = torch.from_numpy( gen_oracle_map(batch['rm_reg'].detach().cpu().numpy(), batch['rm_tag'].detach().cpu().numpy(), output['rm_reg'].shape[3], output['rm_reg'].shape[2])).to(opt.device) output['ct_reg'] = torch.from_numpy( gen_oracle_map(batch['ct_reg'].detach().cpu().numpy(), batch['ct_tag'].detach().cpu().numpy(), output['ct_reg'].shape[3], output['ct_reg'].shape[2])).to(opt.device) # focal loss lm_focal_loss = self.crit(output['lm'], batch['lm']) / opt.num_stacks rm_focal_loss = self.crit(output['rm'], batch['rm']) / opt.num_stacks ct_focal_loss = self.crit(output['ct'], batch['ct']) / opt.num_stacks focal_loss += lm_focal_loss focal_loss += rm_focal_loss focal_loss += ct_focal_loss # tag loss pull, push = self.crit_tag(output['rm_tag'], output['lm_tag'], batch['rm_tag'], batch['lm_tag'], batch['reg_mask']) pull_loss += opt.pull_weight * pull / opt.num_stacks push_loss += opt.push_weight * push / opt.num_stacks # reg loss lm_reg_loss = opt.regr_weight * self.crit_reg( output['lm_reg'], batch['reg_mask'], batch['lm_tag'], batch['lm_reg']) / opt.num_stacks rm_reg_loss = opt.regr_weight * self.crit_reg( output['rm_reg'], batch['reg_mask'], batch['rm_tag'], batch['rm_reg']) / opt.num_stacks ct_reg_loss = opt.regr_weight * self.crit_reg( output['ct_reg'], batch['reg_mask'], batch['ct_tag'], batch['ct_reg']) / opt.num_stacks reg_loss += lm_reg_loss reg_loss += rm_reg_loss reg_loss += ct_reg_loss loss = focal_loss + pull_loss + push_loss + reg_loss loss_stats = { 'loss': loss, 'focal_loss': focal_loss, 'pull_loss': pull_loss, 'push_loss': push_loss, 'reg_loss': reg_loss, 'lm_focal_loss': lm_focal_loss, 'rm_focal_loss': rm_focal_loss, 'ct_focal_loss': ct_focal_loss, 'lm_reg_loss': lm_reg_loss, 'rm_reg_loss': rm_reg_loss, 'ct_reg_loss': ct_reg_loss } return loss, loss_stats
def forward(self, outputs, batch,global_step,tb_writer): opt = self.opt hm_loss, wh_loss, off_loss = 0, 0, 0 hp_loss, off_loss, hm_hp_loss, hp_offset_loss = 0, 0, 0, 0 loss_stats = {} for s in range(opt.num_stacks): output = outputs[s] output['hm'] = _sigmoid(output['hm']) if opt.hm_hp: output['hm_hp'] = _sigmoid(output['hm_hp']) if opt.eval_oracle_hmhp: output['hm_hp'] = batch['hm_hp'] if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_kps: if opt.dense_hp: output['hps'] = batch['dense_hps'] else: output['hps'] = torch.from_numpy(gen_oracle_map( batch['hps'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), opt.output_res, opt.output_res)).to(opt.device) if opt.eval_oracle_hp_offset: output['hp_offset'] = torch.from_numpy(gen_oracle_map( batch['hp_offset'].detach().cpu().numpy(), batch['hp_ind'].detach().cpu().numpy(), opt.output_res, opt.output_res)).to(opt.device) if opt.mdn: V=torch.Tensor((np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0).astype(np.float32)).float().cuda() hm_loss += self.crit(output['hm'], batch['hm'])[0] / opt.num_stacks if opt.mdn: mdn_logits = output['mdn_logits'] #mdn_logits.shape: torch.Size([2, 3, 128, 128]) if opt.mdn_dropout > 0 and opt.epoch<opt.mdn_dropout_stop: M=opt.mdn_n_comps ridx= torch.randperm(M)[:torch.randint(1,1+opt.mdn_dropout,(1,))] drop_mask = torch.ones((M,)) drop_mask[ridx]=0 drop_mask = torch.reshape(drop_mask,(1,-1,1,1)).float().cuda() mdn_logits = mdn_logits*drop_mask if tb_writer is not None: tb_writer.add_histogram('drop_out_idx',ridx+1,global_step=global_step) else: mdn_pi = torch.clamp(torch.nn.Softmax(dim=1)(mdn_logits), 1e-4, 1.-1e-4) mdn_sigma= torch.clamp(torch.nn.ELU()(output['mdn_sigma'])+opt.mdn_min_sigma,1e-4,1e5) mdn_mu = output['hps'] if tb_writer is not None: for i in range(mdn_pi.shape[1]): tb_writer.add_histogram('mdn_pi/{}'.format(i),mdn_pi[:,i],global_step=global_step) tb_writer.add_histogram('mdn_sigma/{}'.format(i),mdn_sigma[:,i*2:i*2+2],global_step=global_step) if opt.dense_hp: gt = batch['dense_hps'] mask = batch['dense_hps_mask'][:,0::2,:,:] _,max_pi_ind = torch.max(mdn_pi,1) else: gt = batch['hps'] mask = batch['hps_mask'][:,:,0::2] mdn_mu= _tranpose_and_gather_feat(mdn_mu, batch['ind']) mdn_pi= _tranpose_and_gather_feat(mdn_pi, batch['ind']) mdn_sigma= _tranpose_and_gather_feat(mdn_sigma, batch['ind']) _,max_pi_ind = torch.max(mdn_pi,-1) if tb_writer is not None: tb_writer.add_histogram('mdn_pi_max_comp',max_pi_ind+1,global_step=global_step) ''' mdn_n_comps=3 batch['hps'].shape: torch.Size([2, 32, 34]) batch['hps_mask'].shape: torch.Size([2, 32, 34]) batch['ind'].shape: torch.Size([2, 32]) gt.shape: torch.Size([2, 32, 34]) mask.shape: torch.Size([2, 32, 17]) before gather, after gather mdn_mu.shape: torch.Size([2, 102, 128, 128]), torch.Size([2, 32, 102]) mdn_pi.shape: torch.Size([2, 3, 128, 128]), torch.Size([2, 32, 3]) mdn_sigma.shape: torch.Size([2, 6, 128, 128]), torch.Size([2, 32, 6]) ''' if opt.mdn_inter: hp_loss += self.crit_kp(gt,mdn_mu,mdn_sigma,mdn_pi,mask,V,debug=opt.debug==6)[0] / opt.num_stacks else: hp_loss = self.crit_kp(gt,mdn_mu,mdn_sigma,mdn_pi,mask,V,debug=opt.debug==6)[0] / opt.num_stacks else: if opt.dense_hp: mask_weight = batch['dense_hps_mask'].sum() + 1e-4 hp_loss += (self.crit_kp(output['hps'] * batch['dense_hps_mask'], batch['dense_hps'] * batch['dense_hps_mask']) / mask_weight) / opt.num_stacks else: hp_loss += self.crit_kp(output['hps'], batch['hps_mask'], batch['ind'], batch['hps']) / opt.num_stacks if opt.wh_weight > 0: wh_loss += self.crit_reg(output['wh'], batch['reg_mask'], batch['ind'], batch['wh'])[0] / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg'])[0] / opt.num_stacks if opt.reg_hp_offset and opt.off_weight > 0: hp_offset_loss += self.crit_reg( output['hp_offset'], batch['hp_mask'], batch['hp_ind'], batch['hp_offset'])[0] / opt.num_stacks if opt.hm_hp and opt.hm_hp_weight > 0: hm_hp_loss += self.crit_hm_hp( output['hm_hp'], batch['hm_hp'])[0] / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ opt.off_weight * off_loss + opt.hp_weight * hp_loss + \ opt.hm_hp_weight * hm_hp_loss + opt.off_weight * hp_offset_loss loss_stats.update({'loss': loss, 'hm_loss': hm_loss, 'hp_loss': hp_loss, 'hm_hp_loss': hm_hp_loss, 'hp_offset_loss': hp_offset_loss, 'wh_loss': wh_loss, 'off_loss': off_loss}) return loss, loss_stats
def forward(self, outputs, batch): cfg = self.cfg hm_loss, wh_loss, off_loss, seg_loss, seg_feat_loss = 0, 0, 0, 0, 0 hp_loss, off_loss, hm_hp_loss, hp_offset_loss = 0, 0, 0, 0 hm, wh, hps, reg, hm_hp, hp_offset, seg_feat, seg = outputs for s in range(cfg.MODEL.NUM_STACKS): hm = _sigmoid(hm) if cfg.LOSS.HM_HP and not cfg.LOSS.MSE_LOSS: hm_hp = _sigmoid(hm_hp) if cfg.TEST.EVAL_ORACLE_HMHP: hm_hp = batch['hm_hp'] if cfg.TEST.EVAL_ORACLE_HM: hm = batch['hm'] if cfg.TEST.EVAL_ORACLE_KPS: if cfg.LOSS.DENSE_HP: hps = batch['dense_hps'] else: hps = torch.from_numpy( gen_oracle_map( batch['hps'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), cfg.MODEL.OUTPUT_RES, cfg.MODEL.OUTPUT_RES)).to( torch.device('cuda:%d' % self.local_rank)) if cfg.TEST.EVAL_ORACLE_HP_OFFSET: hp_offset = torch.from_numpy( gen_oracle_map( hp_offset.detach().cpu().numpy(), batch['hp_ind'].detach().cpu().numpy(), cfg.MODEL.OUTPUT_RES, cfg.MODEL.OUTPUT_RES)).to( torch.device('cuda:%d' % self.local_rank)) hm_loss += self.crit(hm, batch['hm']) / cfg.MODEL.NUM_STACKS if cfg.LOSS.DENSE_HP: mask_weight = batch['dense_hps_mask'].sum() + 1e-4 hp_loss += (self.crit_kp( hps * batch['dense_hps_mask'], batch['dense_hps'] * batch['dense_hps_mask']) / mask_weight) / cfg.MODEL.NUM_STACKS else: hp_loss += self.crit_kp(hps, batch['hps_mask'], batch['ind'], batch['hps']) / cfg.MODEL.NUM_STACKS if cfg.LOSS.WH_WEIGHT > 0: wh_loss += self.crit_reg(wh, batch['reg_mask'], batch['ind'], batch['wh']) / cfg.MODEL.NUM_STACKS if cfg.LOSS.REG_OFFSET and cfg.LOSS.OFF_WEIGHT > 0: off_loss += self.crit_reg(reg, batch['reg_mask'], batch['ind'], batch['reg']) / cfg.MODEL.NUM_STACKS if cfg.LOSS.REG_HP_OFFSET and cfg.LOSS.OFF_WEIGHT > 0: hp_offset_loss += self.crit_reg( hp_offset, batch['hp_mask'], batch['hp_ind'], batch['hp_offset']) / cfg.MODEL.NUM_STACKS if cfg.LOSS.HM_HP and cfg.LOSS.HM_HP_WEIGHT > 0: hm_hp_loss += self.crit_hm_hp( hm_hp, batch['hm_hp']) / cfg.MODEL.NUM_STACKS seg_loss += self.crit_seg(seg, seg_feat, batch['ind'], batch['seg']) loss = cfg.LOSS.HM_WEIGHT * hm_loss + cfg.LOSS.WH_WEIGHT * wh_loss + \ cfg.LOSS.OFF_WEIGHT * off_loss + cfg.LOSS.HP_WEIGHT * hp_loss + \ cfg.LOSS.HM_HP_WEIGHT * hm_hp_loss + cfg.LOSS.OFF_WEIGHT * hp_offset_loss+\ seg_loss loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'hp_loss': hp_loss, 'hm_hp_loss': hm_hp_loss, 'hp_offset_loss': hp_offset_loss, 'wh_loss': wh_loss, 'off_loss': off_loss, 'seg_loss': seg_loss } return loss, loss_stats
def forward(self, outputs, batch): opt = self.opt hm_loss, Dis_loc_loss, Dis_rot_loss, Dis_dim_loss, wh_loss = 0, 0, 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] output['hm'] = _sigmoid(output['hm']) output['rot'] = _sigmoid(output['rot']) denominator = torch.rsqrt(output['rot'][:, 0:1, :, :]**2 + output['rot'][:, 0:1, :, :]**2) output['rot'] = output['rot'] * denominator output['dim'] = _sigmoid(output['dim']) - 0.5 if opt.eval_oracle_dep: output['dep'] = torch.from_numpy( gen_oracle_map(batch['dep'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), opt.output_w, opt.output_h)).to(opt.device) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks #todo ex Dis_loc_loss += self.bbox_loss(output['dep'], output['reg'], batch, 'loc') Dis_rot_loss += self.bbox_loss(output['rot'], output['reg'], batch, 'rot') Dis_dim_loss += self.bbox_loss(output['dim'], output['reg'], batch, 'dim') if opt.reg_bbox and opt.wh_weight > 0: wh_loss += self.crit_reg(output['wh'], batch['rot_mask'], batch['ind'], batch['wh']) / opt.num_stacks loss = 1*opt.hm_weight * hm_loss + opt.loc_weight * Dis_loc_loss + \ 1*opt.dim_weight * Dis_dim_loss + 1*opt.rot_weight * Dis_rot_loss +\ 1*opt.wh_weight*wh_loss # if opt.dep_weight > 0: # dep_loss += self.dept_reg(output['dep'], batch['reg_mask'], # batch['ind'], batch['dep']) / opt.num_stacks # if opt.dim_weight > 0: # dim_loss += self.crit_reg(output['dim'], batch['reg_mask'], # batch['ind'], batch['dim']) / opt.num_stacks # if opt.rot_weight > 0: # rot_loss += self.crit_rot(output['rot'], batch['rot_mask'], # batch['ind'], batch['rotbin'], # batch['rotres']) / opt.num_stacks # if opt.reg_bbox and opt.wh_weight > 0: # wh_loss += self.crit_reg(output['wh'], batch['rot_mask'], # batch['ind'], batch['wh']) / opt.num_stacks # if opt.reg_offset and opt.off_weight > 0: # off_loss += self.crit_reg(output['reg'], batch['rot_mask'], # batch['ind'], batch['reg']) / opt.num_stacks # loss = opt.hm_weight * hm_loss + opt.dep_weight * dep_loss + \ # opt.dim_weight * dim_loss + opt.rot_weight * rot_loss + \ # 0*opt.wh_weight * wh_loss + opt.off_weight * off_loss loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'Dis_loc_loss': Dis_loc_loss, 'Dis_rot_loss': Dis_rot_loss, 'Dis_dim_loss': Dis_dim_loss, 'wh_loss': wh_loss } # loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'Dis_loc_loss': Dis_loc_loss, 'Dis_rot_loss': Dis_rot_loss # } return loss, loss_stats
def forward(self, outputs, batch): opt = self.opt cor_att_names = self.opt.cor_att_names hm_loss, wh_loss, off_loss, cor_att_loss, \ offset_loss, final_wh_loss, final_reg_loss = 0, 0, 0, 0, 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] if not opt.mse_loss: output['hm'] = _sigmoid(output['hm']) output['cor_att'] = _sigmoid(output['cor_att']) if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_wh: output['wh'] = torch.from_numpy( gen_oracle_map(batch['wh'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['wh'].shape[3], output['wh'].shape[2])).to(opt.device) if opt.eval_oracle_offset: output['reg'] = torch.from_numpy( gen_oracle_map(batch['reg'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['reg'].shape[3], output['reg'].shape[2])).to(opt.device) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks if opt.wh_weight > 0: if opt.dense_wh: mask_weight = batch['dense_wh_mask'].sum() + 1e-4 wh_loss += (self.crit_wh( output['wh'] * batch['dense_wh_mask'], batch['dense_wh'] * batch['dense_wh_mask']) / mask_weight) / opt.num_stacks elif opt.cat_spec_wh: wh_loss += self.crit_wh( output['wh'], batch['cat_spec_mask'], batch['ind'], batch['cat_spec_wh']) / opt.num_stacks else: wh_loss += self.crit_reg(output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.use_agg_att: final_wh_loss += self.crit_reg(output['final_wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.use_agg_att_ctreg: final_reg_loss += self.crit_reg( output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks cor_att_loss += self.crit( output['cor_att'], batch['cor_att']) / opt.num_stacks / len(cor_att_names) if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks loss = opt.hm_weight * hm_loss + \ opt.off_weight * off_loss + \ opt.wh_weight * wh_loss + \ opt.cor_att_weight * cor_att_loss loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss, 'cor_att_loss': cor_att_loss } if opt.use_agg_att: loss += opt.weight_final_wh_loss * final_wh_loss loss_stats.update({'final_wh_loss': final_wh_loss}) if opt.use_agg_att_ctreg: loss += opt.weight_final_reg_loss * final_reg_loss loss_stats.update({'final_reg_loss': final_reg_loss}) return loss, loss_stats
def forward(self, outputs, batch): opt = self.opt hm_loss, off_loss, off_sigmawh_loss, KL_loss = 0, 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] #if not opt.mse_loss: # output['hm'] = _sigmoid(output['hm']) #output['hm'] = torch.relu_(output['hm']) + 1e-20 if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_offset: output['reg'] = torch.from_numpy( gen_oracle_map(batch['reg'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['reg'].shape[3], output['reg'].shape[2])).to(opt.device) Kl, hm = self.crit(output['hm'], batch['hm'], batch['ct_ind'], batch['sigma_wh'], batch['hm_mask'], batch['sigmawh_mask']) if Kl is not None: hm_loss += hm / opt.num_stacks KL_loss += Kl / opt.num_stacks if opt.reg_sigma_offset_weight > 0: off_sigmawh_loss += self.crit_reg( output['reg_sigmawh_offset'], batch['reg_mask'], batch['ind'], batch['reg_sigmawh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.hm_weight * KL_loss + opt.reg_sigma_offset_weight * off_sigmawh_loss + \ opt.off_weight * off_loss loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'KL_loss': KL_loss, 'off_sigmawh_loss': off_sigmawh_loss, 'off_ct_loss': off_loss } else: hm_loss += hm / opt.num_stacks KL_loss = None if opt.reg_sigma_offset_weight > 0: off_sigmawh_loss += self.crit_reg( output['reg_sigmawh_offset'], batch['reg_mask'], batch['ind'], batch['reg_sigmawh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.reg_sigma_offset_weight * off_sigmawh_loss + \ opt.off_weight * off_loss loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'KL_loss': KL_loss, 'off_sigmawh_loss': off_sigmawh_loss, 'off_ct_loss': off_loss } return loss, loss_stats
def forward(self, outputs, batch): opt = self.opt hm_loss, dep_loss, rot_loss, dim_loss = 0, 0, 0, 0 wh_loss, off_loss = 0, 0 id_loss = 0 for s in range(opt.num_stacks): output = outputs[s] output['hm'] = _sigmoid(output['hm']) output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1. if opt.eval_oracle_dep: output['dep'] = torch.from_numpy( gen_oracle_map(batch['dep'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), opt.output_w, opt.output_h)).to(opt.device) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks if opt.dep_weight > 0: dep_loss += self.crit_reg(output['dep'], batch['reg_mask'], batch['ind'], batch['dep']) / opt.num_stacks if opt.dim_weight > 0: dim_loss += self.crit_reg(output['dim'], batch['reg_mask'], batch['ind'], batch['dim']) / opt.num_stacks if opt.rot_weight > 0: rot_loss += self.crit_rot(output['rot'], batch['rot_mask'], batch['ind'], batch['rotbin'], batch['rotres']) / opt.num_stacks if opt.reg_bbox and opt.wh_weight > 0: wh_loss += self.crit_reg(output['wh'], batch['rot_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['rot_mask'], batch['ind'], batch['reg']) / opt.num_stacks # if opt.reg_id and opt.id_weight > 0: # pass # id_head = _transpose_and_gather_feat(output['id'],batch['ind']) # id_head = id_head[batch['reg_mask'] > 0].contiguous() # id_head = self.emb_scale * F.normalize(id_head) # id_target = batch['ids'][batch['reg_mask'] > 0] # id_output = loss = opt.hm_weight * hm_loss + opt.dep_weight * dep_loss + \ opt.dim_weight * dim_loss + opt.rot_weight * rot_loss + \ opt.wh_weight * wh_loss + opt.off_weight * off_loss # loss = opt.hm_weight * hm_loss + opt.dep_weight * dep_loss + \ # opt.dim_weight * dim_loss + opt.rot_weight * rot_loss + \ # opt.off_weight * off_loss loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'dep_loss': dep_loss, 'dim_loss': dim_loss, 'rot_loss': rot_loss } #需要记录以下,这会儿还挺佩服自己解决了这个bug。 #当opt中加上了--not_reg_wh的时候,会报错 #File "/home/ubuntu/Anaconda3/envs/CenterTrack/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 63, in gather_map #return type(out)(map(gather_map, zip(*outputs))) #TypeError: zip argument #1 must support iteration #简直神奇,经过一天多的定位,查到了原来使用dataparallel的时候,从多个gpu合并tensor,必须要每一个元素都是tensor,并且tensor处于gpu内 #最终发现是loss_stats中,当没有reg_wh的时候,wh_loss是数字0,就会报这个错,把原本代码嵌入loss_statas的wh_loss取出来就好了 #脑溢血的地方就是加上wh就没问题,只要--not_reg_...就出错,以为遇见鬼了,必须是特定个数的网络hed才行。想到这里可能有问题是我把head保持原本个数还是报错 #才想到可能是硬编码问题。中间一度想放弃,但是想到后面还要魔改网络总要解决问题,最终坚持下来了,给记几点赞!2021.1.27 if opt.reg_bbox and opt.wh_weight > 0: loss_stats.update({'wh_loss': wh_loss}) if opt.reg_offset and opt.off_weight > 0: loss_stats.update({'off_loss': off_loss}) return loss, loss_stats
def forward(self, outputs, batch): eps = 1e-6 opt = self.opt hm_loss, wh_loss, off_loss, allmask_loss = 0, 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_wh: output['wh'] = torch.from_numpy( gen_oracle_map(batch['wh'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['wh'].shape[3], output['wh'].shape[2])).to(opt.device) if opt.eval_oracle_offset: output['reg'] = torch.from_numpy( gen_oracle_map(batch['reg'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['reg'].shape[3], output['reg'].shape[2])).to(opt.device) self.count += 1 if self.count % 3 == 0: myhm = output['hm'][0, 0, :, :].detach().cpu().numpy() cv2.imwrite("./results/pred.jpg", (myhm * (myhm > 0.) * 255).astype(np.uint8)) img = (batch['input'].detach().cpu().numpy()[0, :, :, :] * 255).astype(np.uint8) img = cv2.resize(img.transpose(1, 2, 0), (myhm.shape[1], myhm.shape[0])) cv2.imwrite("./results/input.jpg", img) cv2.imwrite("./results/center.jpg", (batch['hm'].detach().cpu().numpy()[0, 0, :, :] * 255).astype(np.uint8)) allmask = output['allmask'][ 0, 0:self.opt.num_maskclasses + levelnum, :, :].detach().cpu().numpy().transpose(1, 2, 0) assert (self.opt.num_maskclasses == 9) cv2.imwrite("./results/output_top.jpg", (allmask[:, :, 0:3] * 255).astype(np.uint8)) cv2.imwrite("./results/output_middle.jpg", (allmask[:, :, 3:6] * 255).astype(np.uint8)) cv2.imwrite("./results/output_bottom.jpg", (allmask[:, :, 6:9] * 255).astype(np.uint8)) cv2.imwrite("./results/output_large.jpg", (allmask[:, :, 9:12] * 255).astype(np.uint8)) cv2.imwrite("./results/output_small.jpg", (allmask[:, :, 12:15] * 255).astype(np.uint8)) hm_loss += self.crit(output['hm'], (batch['hm'] > 0.30).float()) / opt.num_stacks if opt.wh_weight > 0: if opt.dense_wh: mask_weight = batch['dense_wh_mask'].sum() + eps wh_loss += (self.crit_wh( output['wh'] * batch['dense_wh_mask'], batch['dense_wh'] * batch['dense_wh_mask']) / mask_weight) / opt.num_stacks elif opt.cat_spec_wh: wh_loss += self.crit_wh( output['wh'], batch['cat_spec_mask'], batch['ind'], batch['cat_spec_wh']) / opt.num_stacks else: wh_loss += self.crit_reg(output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks for i in range(0, output['allmask'].size()[1]): allmask_loss += self.crit_allmask( output['allmask'][:, i, :, :], batch['allmask'][:, i, :, :]) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + opt.off_weight * off_loss + opt.allmask_weight * allmask_loss loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss, 'allmask_loss': allmask_loss } return loss, loss_stats
def forward(self, outputs, batch): opt = self.opt hm_loss, dep_loss, rot_loss, dim_loss = 0, 0, 0, 0 wh_loss, off_loss = 0, 0 tilt_loss = 0 for s in range(opt.num_stacks): output = outputs[s] output['hm'] = _sigmoid(output['hm']) output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1. if opt.eval_oracle_dep: output['dep'] = torch.from_numpy( gen_oracle_map(batch['dep'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), opt.output_w, opt.output_h)).to(opt.device) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks if opt.dep_weight > 0: #depth : L1 Loss dep_loss += self.crit_reg(output['dep'], batch['reg_mask'], batch['ind'], batch['dep']) / opt.num_stacks if opt.dim_weight > 0: #3D dimension : L1 Loss dim_loss += self.crit_reg(output['dim'], batch['reg_mask'], batch['ind'], batch['dim']) / opt.num_stacks if opt.rot_weight > 0: #orientation rot_loss += self.crit_rot(output['rot'], batch['rot_mask'], batch['ind'], batch['rotbin'], batch['rotres']) / opt.num_stacks # print('rot_mask') # print(batch['rot_mask']) #tilt # if opt.tilt_weight > 0: # tilt_loss += self.crit_reg(output['tilt'], batch['rot_mask'], # batch['ind'], batch['tilt']) / opt.num_stacks # print('reg_mask') # print(batch['reg_mask']) if opt.reg_bbox and opt.wh_weight > 0: wh_loss += self.crit_reg(output['wh'], batch['rot_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['rot_mask'], batch['ind'], batch['reg']) / opt.num_stacks if opt.reg_bbox: loss = opt.hm_weight * hm_loss + opt.dep_weight * dep_loss + \ opt.dim_weight * dim_loss + opt.rot_weight * rot_loss + \ opt.wh_weight * wh_loss + opt.off_weight * off_loss loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'dep_loss': dep_loss, 'dim_loss': dim_loss, 'rot_loss': rot_loss, 'wh_loss': wh_loss, 'off_loss': off_loss, 'tilt_loss': tilt_loss } else: loss = opt.hm_weight * hm_loss + opt.dep_weight * dep_loss + \ opt.dim_weight * dim_loss + opt.rot_weight * rot_loss + \ opt.off_weight * off_loss loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'dep_loss': dep_loss, 'dim_loss': dim_loss, 'rot_loss': rot_loss, 'off_loss': off_loss } return loss, loss_stats
def forward(self, outputs_1, outputs_2, batch): opt = self.opt hm_loss_1, wh_loss_1, off_loss_1 = 0, 0, 0 hm_loss_2, wh_loss_2, off_loss_2 = 0, 0, 0 for s in range(opt.num_stacks): output_1 = outputs_1[s] output_2 = outputs_2[s] if not opt.mse_loss: output_1['hm_1'] = _sigmoid(output_1['hm_1']) output_2['hm_2'] = _sigmoid(output_2['hm_2']) if opt.eval_oracle_hm: output_1['hm_1'] = batch['hm_1'] output_2['hm_2'] = batch['hm_2'] if opt.eval_oracle_wh: output_1['wh_1'] = torch.from_numpy(gen_oracle_map( batch['wh_1'].detach().cpu().numpy(), batch['ind_1'].detach().cpu().numpy(), output_1['wh_1'].shape[3], output_1['wh_1'].shape[2])).to(opt.device) output_2['wh_2'] = torch.from_numpy(gen_oracle_map( batch['wh_2'].detach().cpu().numpy(), batch['ind_2'].detach().cpu().numpy(), output_2['wh_2'].shape[3], output_2['wh_2'].shape[2])).to(opt.device) if opt.eval_oracle_offset: output_1['reg_1'] = torch.from_numpy(gen_oracle_map( batch['reg_1'].detach().cpu().numpy(), batch['ind_1'].detach().cpu().numpy(), output_1['reg_1'].shape[3], output_1['reg_1'].shape[2])).to(opt.device) output_2['reg_2'] = torch.from_numpy(gen_oracle_map( batch['reg_2'].detach().cpu().numpy(), batch['ind_2'].detach().cpu().numpy(), output_2['reg_2'].shape[3], output_2['reg_2'].shape[2])).to(opt.device) hm_loss_1 += self.crit(output_1['hm_1'], batch['hm_1']) / opt.num_stacks hm_loss_2 += self.crit(output_2['hm_2'], batch['hm_2']) / opt.num_stacks if opt.wh_weight > 0: if opt.dense_wh: mask_weight = batch['dense_wh_mask'].sum() + 1e-4 # ''' wh_loss += ( self.crit_wh(output['wh'] * batch['dense_wh_mask'], # 不可用,沒更新成multitask版本 batch['dense_wh'] * batch['dense_wh_mask']) / mask_weight) / opt.num_stacks # ''' elif opt.cat_spec_wh: wh_loss_1 += self.crit_wh( output_1['wh_1'], batch['cat_spec_mask_1'], batch['ind_1'], batch['cat_spec_wh_1']) / opt.num_stacks wh_loss_2 += self.crit_wh( output_2['wh_2'], batch['cat_spec_mask_2'], batch['ind_2'], batch['cat_spec_wh_2']) / opt.num_stacks else: wh_loss_1 += self.crit_reg( output_1['wh_1'], batch['reg_mask_1'], batch['ind_1'], batch['wh_1'], output_1['hm_1'], batch['hm_1']) / opt.num_stacks wh_loss_2 += self.crit_reg( output_2['wh_2'], batch['reg_mask_2'], batch['ind_2'], batch['wh_2'], output_1['hm_2'], batch['hm_2']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss_1 += self.crit_reg(output_1['reg_1'], batch['reg_mask_1'], batch['ind_1'], batch['reg_1'], output_1['hm_1'], batch['hm_1']) / opt.num_stacks off_loss_2 += self.crit_reg(output_2['reg_2'], batch['reg_mask_2'], batch['ind_2'], batch['reg_2'], output_1['hm_2'], batch['hm_2']) / opt.num_stacks loss_1 = opt.hm_weight * hm_loss_1 + opt.wh_weight * wh_loss_1 + \ opt.off_weight * off_loss_1 loss_2 = opt.hm_weight * hm_loss_2 + opt.wh_weight * wh_loss_2 + \ opt.off_weight * off_loss_2 loss_type = 'normal' if loss_type == 'weighted': loss_weight_1, loss_weight_2 = task_weight[0], task_weight[1] loss = (loss_weight_1 * loss_1) + (loss_weight_2 * loss_2) elif loss_type == 'geometric': n = task loss = (loss_1 * loss_2)**(1/n) else: loss_weight_1, loss_weight_2 = 1.0, 1.0 loss = (loss_weight_1 * loss_1) + (loss_weight_2 * loss_2) loss_stats = {'loss': loss, 'loss_1': loss_1, 'hm_loss_1': hm_loss_1, 'wh_loss_1': wh_loss_1, 'off_loss_1': off_loss_1, 'loss_2': loss_2, 'hm_loss_2': hm_loss_2, 'wh_loss_2': wh_loss_2, 'off_loss_2': off_loss_2} return loss, loss_stats
def forward(self, outputs, batch): opt = self.opt hm_loss, wh_loss, off_loss = 0, 0, 0 hm_loss_bike, hm_loss_car, hm_loss_color_cone, hm_loss_person = 0, 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] if not opt.mse_loss: output['hm'] = _sigmoid(output['hm']) if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_wh: output['wh'] = torch.from_numpy( gen_oracle_map(batch['wh'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['wh'].shape[3], output['wh'].shape[2])).to(opt.device) if opt.eval_oracle_offset: output['reg'] = torch.from_numpy( gen_oracle_map(batch['reg'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['reg'].shape[3], output['reg'].shape[2])).to(opt.device) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks hm_loss_categorie = {} for i, categorie in enumerate(cf.categories): hm_loss_categorie[categorie] = self.crit( output['hm'][:, i, :, :], batch['hm'][:, i, :, :]) / opt.num_stacks # hm_loss_bike += self.crit(output['hm'][:,0,:,:], batch['hm'][:,0,:,:]) / opt.num_stacks # hm_loss_car += self.crit(output['hm'][:,1,:,:], batch['hm'][:,1,:,:]) / opt.num_stacks # hm_loss_color_cone += self.crit(output['hm'][:,2,:,:], batch['hm'][:,2,:,:]) / opt.num_stacks # hm_loss_person += self.crit(output['hm'][:,3,:,:], batch['hm'][:,3,:,:]) / opt.num_stacks if opt.wh_weight > 0: if opt.dense_wh: mask_weight = batch['dense_wh_mask'].sum() + 1e-4 wh_loss += (self.crit_wh( output['wh'] * batch['dense_wh_mask'], batch['dense_wh'] * batch['dense_wh_mask']) / mask_weight) / opt.num_stacks elif opt.cat_spec_wh: wh_loss += self.crit_wh( output['wh'], batch['cat_spec_mask'], batch['ind'], batch['cat_spec_wh']) / opt.num_stacks else: wh_loss += self.crit_reg(output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ opt.off_weight * off_loss loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss } for cat_name, cat_loss in hm_loss_categorie.items(): loss_stats.update({'hm_loss_' + cat_name: cat_loss}) return loss, loss_stats
def forward(self, outputs, batch): opt = self.opt hm_loss, wh_loss, off_loss, proposal_loss, proposal_scale_loss = 0, 0, 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] if not opt.mse_loss: output['hm'] = _sigmoid(output['hm']) if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_wh: output['wh'] = torch.from_numpy( gen_oracle_map(batch['wh'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['wh'].shape[3], output['wh'].shape[2])).to(opt.device) if opt.eval_oracle_offset: output['reg'] = torch.from_numpy( gen_oracle_map(batch['reg'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['reg'].shape[3], output['reg'].shape[2])).to(opt.device) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks if opt.wh_weight > 0: if opt.dense_wh: mask_weight = batch['dense_wh_mask'].sum() + 1e-4 wh_loss += (self.crit_wh( output['wh'] * batch['dense_wh_mask'], batch['dense_wh'] * batch['dense_wh_mask']) / mask_weight) / opt.num_stacks elif opt.cat_spec_wh: wh_loss += self.crit_wh( output['wh'], batch['cat_spec_mask'], batch['ind'], batch['cat_spec_wh']) / opt.num_stacks else: if opt.reg_proposal: out_scale = output['scale'].detach() wh_loss += self.crit_reg( output['wh'] + out_scale, batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks else: wh_loss += self.crit_reg( output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks if opt.reg_proposal and opt.proposal_weight > 0: output['proposal'] = _sigmoid( output['proposal']) # for focal loss ignore_mask = batch['proposal'].gt(-1).float() proposal_loss += self.crit_centerness( output['proposal'], batch['proposal'], ignore_mask) / opt.num_stacks ignore_scale_mask = batch['scale'].gt(0).float() valid_num = ignore_scale_mask.sum() proposal_scale_loss += self.crit_scale( output['scale'] * ignore_scale_mask, batch['scale']) / valid_num / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ opt.off_weight * off_loss + opt.proposal_weight * proposal_loss + opt.scale_weight * proposal_scale_loss if opt.reg_proposal: loss_stats = { 'loss': loss, 'proposal_loss': proposal_loss, 'scale_loss': proposal_scale_loss, 'hm_loss': hm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss } else: loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss } return loss, loss_stats
def forward(self, outputs, batches): opt = self.opt hm_loss, wh_loss, off_loss = 0, 0, 0 if opt.dataset == 'dota': a_loss = 0. # outputs_flat = [] # for s in range(opt.num_stacks): # for fpn_i in range(opt.num_fpn): # output = outputs[s][fpn_i] # outputs_flat.append(output) # for output, batch in zip(outputs_flat, batches): # # wh_gt = batch['wh'].detach().cpu().numpy().copy() # # a_gt = batch['a'].detach().cpu().numpy().copy() # if not opt.mse_loss: # output['hm'] = _sigmoid(output['hm']) # if opt.dataset == 'dota': # if opt.a_method == 0 or opt.a_method == 2: # output['a'] = _sigmoid(output['a']) # elif opt.a_method == 1: # output['a'] = 2. * _sigmoid(output['a']) - 1. # # output['a'] = torch.tanh(output['a']) # # if opt.eval_oracle_hm: # output['hm'] = batch['hm'] # if opt.eval_oracle_wh: # output['wh'] = torch.from_numpy(gen_oracle_map( # batch['wh'].detach().cpu().numpy(), # batch['ind'].detach().cpu().numpy(), # output['wh'].shape[3], output['wh'].shape[2])).to( # opt.device) # if opt.eval_oracle_offset: # output['reg'] = torch.from_numpy(gen_oracle_map( # batch['reg'].detach().cpu().numpy(), # batch['ind'].detach().cpu().numpy(), # output['reg'].shape[3], output['reg'].shape[2])).to( # opt.device) # # hm_loss += self.crit(output['hm'], batch['hm']) # if opt.wh_weight > 0: # if opt.dense_wh: # mask_weight = batch['dense_wh_mask'].sum() + 1e-4 # wh_loss += ( # self.crit_wh(output['wh'] * batch['dense_wh_mask'], # batch['dense_wh'] * batch[ # 'dense_wh_mask']) / # mask_weight) # elif opt.cat_spec_wh: # wh_loss += self.crit_wh( # output['wh'], batch['cat_spec_mask'], # batch['ind'], batch['cat_spec_wh']) # else: # wh_loss += self.crit_reg( # output['wh'], batch['reg_mask'], # batch['ind'], batch['wh']) # # if opt.a_weight > 0: # a_loss += self.crit_reg( # output['a'], batch['reg_mask'], # batch['ind'], batch['a']) # # if opt.reg_offset and opt.off_weight > 0: # off_loss += self.crit_reg(output['reg'], batch['reg_mask'], # batch['ind'], batch['reg']) # # hm_loss /= (opt.num_stacks * opt.num_fpn) # wh_loss /= (opt.num_stacks * opt.num_fpn) # off_loss /= (opt.num_stacks * opt.num_fpn) # loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ # opt.off_weight * off_loss # if opt.dataset == 'dota': # loss += (opt.a_weight * a_loss / (opt.num_stacks * opt.num_fpn)) outputs = outputs[0] batch = batches[0] for s in range(opt.num_stacks): output = outputs[s] if not opt.mse_loss: output['hm'] = _sigmoid(output['hm']) # if opt.dataset == 'dota': # output['a'] = 2. * _sigmoid(output['a']) - 1. if opt.dataset == 'dota': if opt.a_method == 0 or opt.a_method == 2: output['a'] = _sigmoid(output['a']) elif opt.a_method == 1: output['a'] = 2. * _sigmoid(output['a']) - 1. # output['a'] = torch.tanh(output['a']) if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_wh: output['wh'] = torch.from_numpy( gen_oracle_map(batch['wh'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['wh'].shape[3], output['wh'].shape[2])).to(opt.device) if opt.eval_oracle_offset: output['reg'] = torch.from_numpy( gen_oracle_map(batch['reg'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['reg'].shape[3], output['reg'].shape[2])).to(opt.device) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks if opt.wh_weight > 0: if opt.dense_wh: mask_weight = batch['dense_wh_mask'].sum() + 1e-4 wh_loss += (self.crit_wh( output['wh'] * batch['dense_wh_mask'], batch['dense_wh'] * batch['dense_wh_mask']) / mask_weight) / opt.num_stacks elif opt.cat_spec_wh: wh_loss += self.crit_wh( output['wh'], batch['cat_spec_mask'], batch['ind'], batch['cat_spec_wh']) / opt.num_stacks else: wh_loss += self.crit_reg(output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.a_weight > 0: a_loss += self.crit_reg(output['a'], batch['reg_mask'], batch['ind'], batch['a']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \ opt.off_weight * off_loss if opt.dataset == 'dota': loss += opt.a_weight * a_loss loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss } if opt.dataset == 'dota': loss_stats['a_loss'] = a_loss return loss, loss_stats
def forward(self, outputs, batch): opt = self.opt hm_loss, dep_loss, rot_loss, dim_loss = 0, 0, 0, 0 wh_loss, off_loss = 0, 0 # opt.num_stacks = 2 if opt.arch == 'hourglass' else 1 for s in range(opt.num_stacks): output = outputs[s] output['hm'] = _sigmoid(output['hm']) output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1. if opt.eval_oracle_dep: output['dep'] = torch.from_numpy( gen_oracle_map(batch['dep'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), opt.output_w, opt.output_h)).to(opt.device) import pdb pdb.set_trace() # opt.num_stacks:1 # L2损失 hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks if opt.dep_weight > 0: # output['dep']:torch.Size([1, 1, 96, 320]) # batch['reg_mask']:torch.Size([1, 50]) # batch['ind']:torch.Size([1, 50]) # batch['dep']:torch.Size([1, 50, 1]) dep_loss += self.crit_reg(output['dep'], batch['reg_mask'], batch['ind'], batch['dep']) / opt.num_stacks if opt.dim_weight > 0: # 定义的L1损失函数 dim_loss += self.crit_reg(output['dim'], batch['reg_mask'], batch['ind'], batch['dim']) / opt.num_stacks if opt.rot_weight > 0: rot_loss += self.crit_rot(output['rot'], batch['rot_mask'], batch['ind'], batch['rotbin'], batch['rotres']) / opt.num_stacks if opt.reg_bbox and opt.wh_weight > 0: wh_loss += self.crit_reg(output['wh'], batch['rot_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['rot_mask'], batch['ind'], batch['reg']) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.dep_weight * dep_loss + \ opt.dim_weight * dim_loss + opt.rot_weight * rot_loss + \ opt.wh_weight * wh_loss + opt.off_weight * off_loss loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'dep_loss': dep_loss, 'dim_loss': dim_loss, 'rot_loss': rot_loss, 'wh_loss': wh_loss, 'off_loss': off_loss } return loss, loss_stats
def forward(self, outputs, batch): opt = self.opt hm_loss, dep_loss, rot_loss, dim_loss = 0, 0, 0, 0 wh_loss, off_loss = 0, 0 vertex_hm_loss,vertex_coordinate_loss, vertex_off_loss = 0, 0, 0 for s in range(opt.num_stacks): output = outputs[s] output['hm'] = _sigmoid(output['hm']) # add vertex loss output['vertex_hm'] = _sigmoid(output['vertex_hm']) output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1. if opt.eval_oracle_dep: output['dep'] = torch.from_numpy(gen_oracle_map( batch['dep'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), opt.output_w, opt.output_h)).to(opt.device) hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks # add weights vertex_hm_loss += self.crit(output['vertex_hm'], batch['vertex_hm']) / opt.num_stacks for i in range(0,18,2): vertex_coordinate_loss += self.crit_reg(output['vertex_coordinate'][:,i:i+2], batch['reg_mask'],batch['vertex_ind'][:,:,int(i/2)], batch['vertex_coordinate'][...,i:i+2]) / opt.num_stacks if opt.dep_weight > 0: dep_loss += self.crit_reg(output['dep'], batch['reg_mask'], batch['ind'], batch['dep']) / opt.num_stacks if opt.dim_weight > 0: dim_loss += self.crit_reg(output['dim'], batch['reg_mask'], batch['ind'], batch['dim']) / opt.num_stacks if opt.rot_weight > 0: rot_loss += self.crit_rot(output['rot'], batch['rot_mask'], batch['ind'], batch['rotbin'], batch['rotres']) / opt.num_stacks if opt.reg_bbox and opt.wh_weight > 0: wh_loss += self.crit_reg(output['wh'], batch['rot_mask'], batch['ind'], batch['wh']) / opt.num_stacks if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['rot_mask'], batch['ind'], batch['reg']) / opt.num_stacks if opt.vertex_reg_offset and opt.vertex_off_weight > 0: for i in range(0,18,2): vertex_off_loss += self.crit_reg(output['vertex_reg_offset'], batch['rot_mask'], batch['vertex_ind'][:,:,int(i/2)], batch['vertex_reg_offset'][...,i:i+2]) / opt.num_stacks loss = opt.hm_weight * hm_loss + opt.dep_weight * dep_loss + \ opt.dim_weight * dim_loss + opt.rot_weight * rot_loss + \ opt.wh_weight * wh_loss + opt.off_weight * off_loss + \ vertex_hm_loss + vertex_coordinate_loss + opt.vertex_off_weight * vertex_off_loss loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'dep_loss': dep_loss, 'dim_loss': dim_loss, 'rot_loss': rot_loss, 'wh_loss': wh_loss, 'off_loss': off_loss, 'vertex_hm_loss': vertex_hm_loss,'vertex_coordinate_loss':vertex_coordinate_loss , 'vertex_off_loss': vertex_off_loss} return loss, loss_stats
def forward(self, outputs, batch): opt = self.opt hm_loss, wh_loss, off_loss = 0, 0, 0 for s in range(opt.num_stacks): # extract outputs for loss calculation output = outputs[s] # ! pc: why sigmoid? if not opt.mse_loss: output['hm'] = _sigmoid(output['hm']) # ? evaluate groundtruth if opt.eval_oracle_hm: output['hm'] = batch['hm'] if opt.eval_oracle_wh: output['wh'] = torch.from_numpy( gen_oracle_map(batch['wh'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['wh'].shape[3], output['wh'].shape[2])).to(opt.device) if opt.eval_oracle_offset: output['reg'] = torch.from_numpy( gen_oracle_map(batch['reg'].detach().cpu().numpy(), batch['ind'].detach().cpu().numpy(), output['reg'].shape[3], output['reg'].shape[2])).to(opt.device) # calculate hm loss hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks # calculate size loss if opt.wh_weight > 0: if opt.dense_wh: mask_weight = batch['dense_wh_mask'].sum() + 1e-4 wh_loss += (self.crit_wh( output['wh'] * batch['dense_wh_mask'], batch['dense_wh'] * batch['dense_wh_mask']) / mask_weight) / opt.num_stacks elif opt.cat_spec_wh: wh_loss += self.crit_wh( output['wh'], batch['cat_spec_mask'], batch['ind'], batch['cat_spec_wh']) / opt.num_stacks else: wh_loss += self.crit_reg(output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks # calculate off-center loss if opt.reg_offset and opt.off_weight > 0: off_loss += self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks # weighted overall loss loss = opt.hm_weight * hm_loss + \ opt.wh_weight * wh_loss + \ opt.off_weight * off_loss # construct loss dictionary loss_stats = { 'loss': loss, 'hm_loss': hm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss } return loss, loss_stats