def forward(self, x):
        # print('input:', x.size())
        x = self.base(x)
        # print('after_base:', type(x), len(x), x[0].size(), x[1].size(), x[2].size(), x[3].size(), x[4].size(), x[5].size())
        x = self.dla_up(x)
        # print('after_dla_up:', type(x), len(x), x[0].size(), x[1].size(), x[2].size(), x[3].size())

        y = []
        for i in range(self.last_level - self.first_level):  # 5-2
            y.append(x[i].clone())
        # print('before_ida_up:', type(y), len(y), y[0].size(), y[1].size(), y[2].size())
        self.ida_up(y, 0, len(y))
        # print('after_ida_up:', type(y), len(y), y[0].size(), y[1].size(), y[2].size())
        # y[0]->y[1]->y[2]聚合度越来越高,却接近图表中的OUT标志

        z = {}
        # print('self.heads:', self.heads)
        for head in self.heads:
            # print(head, 'before:', y[-1].size())
            # print(head, 'struture:', self.__getattr__(head))
            z[head] = self.__getattr__(head)(y[-1])
            # print(head, 'after:', z[head].size())
        z['hm'] = _sigmoid(z['hm'])

        # print(len([z]), [z][0].keys(), [z][0]['hm'].size(), [z][0]['wh'].size(), [z][0]['reg'].size())
        return [z]
Esempio n. 2
0
    def forward(self, outputs, batch):
        opt = self.opt
        hm_loss, wh_loss, off_loss = 0, 0, 0
        for s in range(opt.num_stacks):
            output = outputs[s]
            if not opt.mse_loss:
                output['hm'] = _sigmoid(output['hm'])

            if opt.eval_oracle_hm:
                output['hm'] = batch['hm']
            if opt.eval_oracle_wh:
                output['wh'] = torch.from_numpy(
                    gen_oracle_map(batch['wh'].detach().cpu().numpy(),
                                   batch['ind'].detach().cpu().numpy(),
                                   output['wh'].shape[3],
                                   output['wh'].shape[2])).to(opt.device)
            if opt.eval_oracle_offset:
                output['reg'] = torch.from_numpy(
                    gen_oracle_map(batch['reg'].detach().cpu().numpy(),
                                   batch['ind'].detach().cpu().numpy(),
                                   output['reg'].shape[3],
                                   output['reg'].shape[2])).to(opt.device)

            hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks
            if opt.wh_weight > 0:
                if opt.dense_wh:
                    mask_weight = batch['dense_wh_mask'].sum() + 1e-4
                    wh_loss += (self.crit_wh(
                        output['wh'] * batch['dense_wh_mask'],
                        batch['dense_wh'] * batch['dense_wh_mask']) /
                                mask_weight) / opt.num_stacks
                elif opt.cat_spec_wh:
                    wh_loss += self.crit_wh(
                        output['wh'], batch['cat_spec_mask'], batch['ind'],
                        batch['cat_spec_wh']) / opt.num_stacks
                else:
                    wh_loss += self.crit_reg(output['wh'], batch['reg_mask'],
                                             batch['ind'],
                                             batch['wh']) / opt.num_stacks

            if opt.reg_offset and opt.off_weight > 0:
                off_loss += self.crit_reg(output['reg'], batch['reg_mask'],
                                          batch['ind'],
                                          batch['reg']) / opt.num_stacks

        loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \
               opt.off_weight * off_loss
        loss_stats = {
            'loss': loss,
            'hm_loss': hm_loss,
            'wh_loss': wh_loss,
            'off_loss': off_loss
        }
        return loss, loss_stats
Esempio n. 3
0
  def forward(self, outputs, batch):
    opt = self.opt
    hm_loss, wh_loss, off_loss = 0, 0, 0
    for s in range(opt.num_stacks):
      output = outputs[s]

      if not opt.mse_loss:
        output['hm'] = _sigmoid(output['hm'])

      # input_img2 = np.transpose(batch['hm'][0][14].unsqueeze(0).cpu().numpy(), [1, 2, 0])  # 第14类是person
      # input_img4 = np.transpose(output['hm'][0][14].unsqueeze(0).cpu().detach().numpy(), [1, 2, 0])
      #
      # cv2.imshow('input2', input_img2)
      # cv2.imshow('input4', input_img4)
      # cv2.waitKey(0)

      if opt.eval_oracle_hm:
        output['hm'] = batch['hm']
      if opt.eval_oracle_wh:
        output['wh'] = torch.from_numpy(gen_oracle_map(
          batch['wh'].detach().cpu().numpy(), 
          batch['ind'].detach().cpu().numpy(), 
          output['wh'].shape[3], output['wh'].shape[2])).to(opt.device)
      if opt.eval_oracle_offset:
        output['reg'] = torch.from_numpy(gen_oracle_map(
          batch['reg'].detach().cpu().numpy(), 
          batch['ind'].detach().cpu().numpy(), 
          output['reg'].shape[3], output['reg'].shape[2])).to(opt.device)
      # print(111111111, hm_loss, output['hm'].size(), batch['hm'].size())
      hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks
      if opt.wh_weight > 0:
        if opt.dense_wh:
          mask_weight = batch['dense_wh_mask'].sum() + 1e-4
          wh_loss += (self.crit_wh(output['wh'] * batch['dense_wh_mask'],
            batch['dense_wh'] * batch['dense_wh_mask']) /
            mask_weight) / opt.num_stacks
        elif opt.cat_spec_wh:
          wh_loss += self.crit_wh(
            output['wh'], batch['cat_spec_mask'],
            batch['ind'], batch['cat_spec_wh']) / opt.num_stacks
        else:
          wh_loss += self.crit_reg(
            output['wh'], batch['reg_mask'],
            batch['ind'], batch['wh']) / opt.num_stacks
      
      if opt.reg_offset and opt.off_weight > 0:
        off_loss += self.crit_reg(output['reg'], batch['reg_mask'],
                             batch['ind'], batch['reg']) / opt.num_stacks
        
    loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \
           opt.off_weight * off_loss
    loss_stats = {'loss': loss, 'hm_loss': hm_loss,
                  'wh_loss': wh_loss, 'off_loss': off_loss}
    return loss, loss_stats
Esempio n. 4
0
 def forward(self, outputs, batch):
     opt = self.opt
     hm_loss, reg_loss = 0, 0
     for s in range(opt.num_stacks):
         output = outputs[s]
         for p in self.parts:
             tag = 'hm_{}'.format(p)
             output[tag] = _sigmoid(output[tag])
             hm_loss += self.crit(output[tag], batch[tag]) / opt.num_stacks
             if p != 'c' and opt.reg_offset and opt.off_weight > 0:
                 reg_loss += self.crit_reg(
                     output['reg_{}'.format(p)], batch['reg_mask'],
                     batch['ind_{}'.format(p)],
                     batch['reg_{}'.format(p)]) / opt.num_stacks
     loss = opt.hm_weight * hm_loss + opt.off_weight * reg_loss
     loss_stats = {'loss': loss, 'off_loss': reg_loss, 'hm_loss': hm_loss}
     return loss, loss_stats
Esempio n. 5
0
  def forward(self, outputs, batch):
    opt = self.opt

    hm_loss, dep_loss, rot_loss, dim_loss = 0, 0, 0, 0
    wh_loss, off_loss = 0, 0
    for s in range(opt.num_stacks):
      output = outputs[s]
      output['hm'] = _sigmoid(output['hm'])
      output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1.
      
      if opt.eval_oracle_dep:
        output['dep'] = torch.from_numpy(gen_oracle_map(
          batch['dep'].detach().cpu().numpy(), 
          batch['ind'].detach().cpu().numpy(), 
          opt.output_w, opt.output_h)).to(opt.device)
      
      hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks
      if opt.dep_weight > 0:
        dep_loss += self.crit_reg(output['dep'], batch['reg_mask'],
                                  batch['ind'], batch['dep']) / opt.num_stacks
      if opt.dim_weight > 0:
        dim_loss += self.crit_reg(output['dim'], batch['reg_mask'],
                                  batch['ind'], batch['dim']) / opt.num_stacks
      if opt.rot_weight > 0:
        rot_loss += self.crit_rot(output['rot'], batch['rot_mask'],
                                  batch['ind'], batch['rotbin'],
                                  batch['rotres']) / opt.num_stacks
      if opt.reg_bbox and opt.wh_weight > 0:
        wh_loss += self.crit_reg(output['wh'], batch['rot_mask'],
                                 batch['ind'], batch['wh']) / opt.num_stacks
      if opt.reg_offset and opt.off_weight > 0:
        off_loss += self.crit_reg(output['reg'], batch['rot_mask'],
                                  batch['ind'], batch['reg']) / opt.num_stacks
    loss = opt.hm_weight * hm_loss + opt.dep_weight * dep_loss + \
           opt.dim_weight * dim_loss + opt.rot_weight * rot_loss + \
           opt.wh_weight * wh_loss + opt.off_weight * off_loss

    loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'dep_loss': dep_loss, 
                  'dim_loss': dim_loss, 'rot_loss': rot_loss, 
                  'wh_loss': wh_loss, 'off_loss': off_loss}
    return loss, loss_stats
Esempio n. 6
0
    def forward(self, outputs, batch):
        opt = self.opt
        hm_loss, wh_loss, off_loss = 0, 0, 0
        hp_loss, off_loss, hm_hp_loss, hp_offset_loss = 0, 0, 0, 0
        for s in range(opt.num_stacks):
            output = outputs[s]
            output['hm'] = _sigmoid(output['hm'])
            if opt.hm_hp and not opt.mse_loss:
                output['hm_hp'] = _sigmoid(output['hm_hp'])

            if opt.eval_oracle_hmhp:
                output['hm_hp'] = batch['hm_hp']
            if opt.eval_oracle_hm:
                output['hm'] = batch['hm']
            if opt.eval_oracle_kps:
                if opt.dense_hp:
                    output['hps'] = batch['dense_hps']
                else:
                    output['hps'] = torch.from_numpy(
                        gen_oracle_map(batch['hps'].detach().cpu().numpy(),
                                       batch['ind'].detach().cpu().numpy(),
                                       opt.output_res,
                                       opt.output_res)).to(opt.device)
            if opt.eval_oracle_hp_offset:
                output['hp_offset'] = torch.from_numpy(
                    gen_oracle_map(batch['hp_offset'].detach().cpu().numpy(),
                                   batch['hp_ind'].detach().cpu().numpy(),
                                   opt.output_res,
                                   opt.output_res)).to(opt.device)

            hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks
            if opt.dense_hp:
                mask_weight = batch['dense_hps_mask'].sum() + 1e-4
                hp_loss += (self.crit_kp(
                    output['hps'] * batch['dense_hps_mask'], batch['dense_hps']
                    * batch['dense_hps_mask']) / mask_weight) / opt.num_stacks
            else:
                hp_loss += self.crit_kp(output['hps'], batch['hps_mask'],
                                        batch['ind'],
                                        batch['hps']) / opt.num_stacks
            if opt.wh_weight > 0:
                wh_loss += self.crit_reg(output['wh'], batch['reg_mask'],
                                         batch['ind'],
                                         batch['wh']) / opt.num_stacks
            if opt.reg_offset and opt.off_weight > 0:
                off_loss += self.crit_reg(output['reg'], batch['reg_mask'],
                                          batch['ind'],
                                          batch['reg']) / opt.num_stacks
            if opt.reg_hp_offset and opt.off_weight > 0:
                hp_offset_loss += self.crit_reg(
                    output['hp_offset'], batch['hp_mask'], batch['hp_ind'],
                    batch['hp_offset']) / opt.num_stacks
            if opt.hm_hp and opt.hm_hp_weight > 0:
                hm_hp_loss += self.crit_hm_hp(output['hm_hp'],
                                              batch['hm_hp']) / opt.num_stacks
        loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \
               opt.off_weight * off_loss + opt.hp_weight * hp_loss + \
               opt.hm_hp_weight * hm_hp_loss + opt.off_weight * hp_offset_loss

        loss_stats = {
            'loss': loss,
            'hm_loss': hm_loss,
            'hp_loss': hp_loss,
            'hm_hp_loss': hm_hp_loss,
            'hp_offset_loss': hp_offset_loss,
            'wh_loss': wh_loss,
            'off_loss': off_loss
        }
        return loss, loss_stats
    def forward(self, x):
        print('input:', x.size())
        x = self.base(x)
        print('after_base:', type(x), len(x), x[0].size(), x[1].size(),
              x[2].size(), x[3].size(), x[4].size(), x[5].size())
        x = self.dla_up(x)
        print('after_dla_up:', type(x), len(x), x[0].size(), x[1].size(),
              x[2].size(), x[3].size())

        # fpn_in = x[:]
        # fpn_out = self.get_fpn_out_layers(fpn_in)

        # transconv_fpn0 = fpn_out[0]
        # transconv_fpn1 = self.transconv_fpn1(fpn_out[1])
        # transconv_fpn2 = self.transconv_fpn2(fpn_out[2])
        # transconv_fpn3 = self.transconv_fpn3(fpn_out[3])
        # transconv_fpn_out = [transconv_fpn0, transconv_fpn1, transconv_fpn2, transconv_fpn3]

        # y0_up_1 = self.up_delta_conv_0(fpn_out[0])
        # # print('y0_up_1:', y0_up_1.size())
        # y0_up_2 = self.up_delta_conv_0(y0_up_1)
        # # print('y0_up_2:', y0_up_2.size())
        # y0_up_out = self.transconv_y0_up(y0_up_2)
        # # print('y0_up_out:', y0_up_out.size())
        # y0_btm_1 = self.btm_delta_conv_0(fpn_out[0])
        # # print('y0_btm_1:', y0_btm_1.size())
        # y0_btm_2 = self.btm_delta_conv_0(y0_btm_1)
        # # print('y0_btm_2:', y0_btm_2.size())
        # y0_btm_out = self.transconv_y0_btm(y0_btm_2)
        # # print('y0_btm_out:', y0_btm_out.size())
        #
        # y1_up_1 = self.up_delta_conv_0(fpn_out[1])
        # # print('y1_up_1:', y1_up_1.size())
        # y1_up_2 = self.up_delta_conv_0(y1_up_1)
        # # print('y1_up_2:', y1_up_2.size())
        # y1_up_out = self.transconv_y1_up(y1_up_2)
        # # print('y1_up_out:', y1_up_out.size())
        # y1_btm_1 = self.btm_delta_conv_0(fpn_out[1])
        # # print('y1_btm_1:', y1_btm_1.size())
        # y1_btm_2 = self.btm_delta_conv_0(y1_btm_1)
        # # print('y1_btm_2:', y1_btm_2.size())
        # y1_btm_out = self.transconv_y1_btm(y1_btm_2)
        # # print('y1_btm_out:', y1_btm_out.size())
        #
        # y2_up_1 = self.up_delta_conv_0(fpn_out[2])
        # # print('y2_up_1:', y2_up_1.size())
        # y2_up_2 = self.up_delta_conv_0(y2_up_1)
        # # print('y2_up_2:', y2_up_2.size())
        # y2_up_out = self.transconv_y2_up(y2_up_2)
        # # print('y2_up_out:', y2_up_out.size())
        # y2_btm_1 = self.btm_delta_conv_0(fpn_out[2])
        # # print('y2_btm_1:', y2_btm_1.size())
        # y2_btm_2 = self.btm_delta_conv_0(y2_btm_1)
        # # print('y2_btm_2:', y2_btm_2.size())
        # y2_btm_out = self.transconv_y2_btm(y2_btm_2)
        # # print('y2_btm_out:', y2_btm_out.size())
        #
        # xnet_out = [y0_up_out, y0_btm_out, y1_up_out, y1_btm_out, y2_up_out, y2_btm_out]

        y = []
        for i in range(self.last_level - self.first_level):  # 5-2
            y.append(x[i].clone())
        print('before_ida_up:', type(y), len(y), y[0].size(), y[1].size(),
              y[2].size())
        self.ida_up(y, 0, len(y))
        print('after_ida_up:', type(y), len(y), y[0].size(), y[1].size(),
              y[2].size())
        # y[0]->y[1]->y[2]聚合度越来越高,却接近图表中的OUT标志

        final_out = []

        dla_z = {}
        # print('self.heads:', self.heads)
        for head in self.heads:
            # print(head, 'before:', y[-1].size())
            # print(head, 'struture:', self.__getattr__(head))
            dla_z[head] = self.__getattr__(head)(y[-1])
            # print(head, 'after:', z[head].size())
        dla_z['hm'] = _sigmoid(dla_z['hm'])
        final_out.append(dla_z)  # 1个输出
        # # return [dla_z]

        # 这是直接从原始的dla网络中得到的金字塔网络
        # for out0 in y:   #  y[0]->y[1]->y[2] ,虽然特征图尺寸一致,但是y[0]中特征图高分辨率信息多,应该用来回归小物体;y[2]用来回归大物体
        #     dla_fpn_z = {}
        #     # print('self.heads:', self.heads)
        #     for head in self.heads:
        #         # print(head, 'before:', y[-1].size())
        #         # print(head, 'struture:', self.__getattr__(head))
        #         dla_fpn_z[head] = self.__getattr__(head)(out0)
        #         # print(head, 'after:', z[head].size())
        #     dla_fpn_z['hm'] = _sigmoid(dla_fpn_z['hm'])
        #     final_out.append(dla_fpn_z)  # 3个输出

        # for out1 in transconv_fpn_out:
        #     fpn_z = {}
        #     # print('self.heads:', self.heads)
        #     for head in self.heads:
        #         # print(head, 'before:', y[-1].size())
        #         # print(head, 'struture:', self.__getattr__(head))
        #         fpn_z[head] = self.__getattr__(head)(out1)
        #         # print(head, 'after:', fpn_z[head].size())
        #     fpn_z['hm'] = _sigmoid(fpn_z['hm'])
        #     final_out.append(fpn_z)  # 4个输出

        # for out2 in xnet_out:
        #     xnet_z = {}
        #     # print('self.heads:', self.heads)
        #     for head in self.heads:
        #         # print(head, 'before:', y[-1].size())
        #         # print(head, 'struture:', self.__getattr__(head))
        #         xnet_z[head] = self.__getattr__(head)(out2)
        #         # print(head, 'after:', xnet_z[head].size())
        #     xnet_z['hm'] = _sigmoid(xnet_z['hm'])
        #     # final_out.append(xnet_z)    # 6个输出

        # print(len(final_out), final_out[0].keys(), final_out[0]['hm'].size(), final_out[0]['wh'].size(), final_out[0]['reg'].size())
        return final_out
    def forward(self, x):
        # print('input:', x.size())
        x = self.base(x)
        # print('after_base:', type(x), len(x), x[0].size(), x[1].size(), x[2].size(), x[3].size(), x[4].size(),
        #       x[5].size())
        x = self.dla_up(x)
        # print('after_dla_up:', type(x), len(x), x[0].size(), x[1].size(), x[2].size(), x[3].size())

        fpn_in = x[:]
        fpn_out = self.get_fpn_out_layers(fpn_in)

        transconv_fpn0 = fpn_out[0]
        transconv_fpn1 = self.transconv_fpn1(fpn_out[1])

        transconv_fpn2 = self.transconv_fpn2(fpn_out[2])
        transconv_fpn3 = self.transconv_fpn3(fpn_out[3])
        transconv_fpn_out = [
            transconv_fpn0, transconv_fpn1, transconv_fpn2, transconv_fpn3
        ]

        y0_up_1 = self.up_delta_conv_0(fpn_out[0])
        # print('y0_up_1:', y0_up_1.size())
        y0_up_2 = self.up_delta_conv_0(y0_up_1)
        # print('y0_up_2:', y0_up_2.size())
        y0_up_out = self.transconv_y0_up(y0_up_2)
        # print('y0_up_out:', y0_up_out.size())
        y0_btm_1 = self.btm_delta_conv_0(fpn_out[0])
        # print('y0_btm_1:', y0_btm_1.size())
        y0_btm_2 = self.btm_delta_conv_0(y0_btm_1)
        # print('y0_btm_2:', y0_btm_2.size())
        y0_btm_out = self.transconv_y0_btm(y0_btm_2)
        # print('y0_btm_out:', y0_btm_out.size())

        y1_up_1 = self.up_delta_conv_0(fpn_out[1])
        # print('y1_up_1:', y1_up_1.size())
        y1_up_2 = self.up_delta_conv_0(y1_up_1)
        # print('y1_up_2:', y1_up_2.size())
        y1_up_out = self.transconv_y1_up(y1_up_2)
        # print('y1_up_out:', y1_up_out.size())
        y1_btm_1 = self.btm_delta_conv_0(fpn_out[1])
        # print('y1_btm_1:', y1_btm_1.size())
        y1_btm_2 = self.btm_delta_conv_0(y1_btm_1)
        # print('y1_btm_2:', y1_btm_2.size())
        y1_btm_out = self.transconv_y1_btm(y1_btm_2)
        # print('y1_btm_out:', y1_btm_out.size())

        y2_up_1 = self.up_delta_conv_0(fpn_out[2])
        # print('y2_up_1:', y2_up_1.size())
        y2_up_2 = self.up_delta_conv_0(y2_up_1)
        # print('y2_up_2:', y2_up_2.size())
        y2_up_out = self.transconv_y2_up(y2_up_2)
        # print('y2_up_out:', y2_up_out.size())
        y2_btm_1 = self.btm_delta_conv_0(fpn_out[2])
        # print('y2_btm_1:', y2_btm_1.size())
        y2_btm_2 = self.btm_delta_conv_0(y2_btm_1)
        # print('y2_btm_2:', y2_btm_2.size())
        y2_btm_out = self.transconv_y2_btm(y2_btm_2)
        # print('y2_btm_out:', y2_btm_out.size())

        xnet_out = [
            y0_up_out, y0_btm_out, y1_up_out, y1_btm_out, y2_up_out, y2_btm_out
        ]

        y = []
        for i in range(self.last_level - self.first_level):  # 5-2
            y.append(x[i].clone())
        # print('before_ida_up:', type(y), len(y), y[0].size(), y[1].size(), y[2].size())
        self.ida_up(y, 0, len(y))
        # print('after_ida_up:', type(y), len(y), y[0].size(), y[1].size(), y[2].size())
        # y[0]->y[1]->y[2]聚合度越来越高,却接近图表中的OUT标志

        final_out = []
        dla_z = {}
        # print('self.heads:', self.heads)
        for head in self.heads:
            # print(head, 'before:', y[-1].size())
            # print(head, 'struture:', self.__getattr__(head))
            dla_z[head] = self.__getattr__(head)(y[-1])
            # print(head, 'after:', z[head].size())
        dla_z['hm'] = _sigmoid(dla_z['hm'])
        final_out.append(dla_z)  # 1个输出
        # return [dla_z]

        for out1 in transconv_fpn_out:
            fpn_z = {}
            # print('self.heads:', self.heads)
            for head in self.heads:
                # print(head, 'before:', y[-1].size())
                # print(head, 'struture:', self.__getattr__(head))
                fpn_z[head] = self.__getattr__(head)(out1)
                # print(head, 'after:', fpn_z[head].size())
            fpn_z['hm'] = _sigmoid(fpn_z['hm'])
            # final_out.append(fpn_z)   # 4个输出

        for out2 in xnet_out:
            xnet_z = {}
            # print('self.heads:', self.heads)
            for head in self.heads:
                # print(head, 'before:', y[-1].size())
                # print(head, 'struture:', self.__getattr__(head))
                xnet_z[head] = self.__getattr__(head)(out2)
                # print(head, 'after:', xnet_z[head].size())
            xnet_z['hm'] = _sigmoid(xnet_z['hm'])
            # final_out.append(xnet_z)    # 6个输出

        print(len(final_out), final_out[0].keys(), final_out[0]['hm'].size(),
              final_out[0]['wh'].size(), final_out[0]['reg'].size())
        return final_out