Example #1
0
 def forward(self,
             output,
             num_classes,
             top_k=200,
             variance=[0.1, 0.2],
             conf_thresh=0.01,
             nms_thresh=0.45):
     loc_data, conf_data, prior_data = output[0], output[1], output[2]
     softmax = nn.Softmax(dim=-1)
     conf_data = softmax(conf_data)
     num = loc_data.size(0)
     output = torch.zeros(num, num_classes, top_k, 5)
     conf_preds = conf_data.transpose(2, 1)
     # Decode predictions into bboxes.
     for i in range(num):
         decoded_boxes = decode(loc_data[i], prior_data, variance)
         conf_scores = conf_preds[i].clone()
         for cl in range(1, num_classes):
             c_mask = conf_scores[cl].gt(conf_thresh)
             scores = conf_scores[cl][c_mask]
             if scores.size(0) == 0:
                 continue
             l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes)
             boxes = decoded_boxes[l_mask].view(-1, 4)
             ids, count = nms(boxes, scores, nms_thresh, top_k)
             output[i, cl, :count] = \
                 torch.cat((scores[ids[:count]].unsqueeze(1),
                            boxes[ids[:count]]), 1)
     return output
Example #2
0
 def p_detect(self, image: str):
     self.pnet.eval()
     pboxes = torch.Tensor([]).to(self.device)
     image = Image.open(image)
     while min(image.size) > 12:
         "Scale"
         scale = 1
         "Data"
         data = functions.transform(image).unsqueeze(0).to(self.device)
         "Net"
         confi, offset, _ = self.pnet(data)
         confi, offset = confi.permute(0, 2, 3,
                                       1), offset.permute(0, 2, 3,
                                                          1)  # (N, W, H, C)
         mask = confi[..., 0] > cfg.CONFI["pnet"]
         index = torch.nonzero(mask)
         "ROI"
         side, stride = cfg.NET["pnet"]
         x1, y1 = index[:, 1] * stride, index[:, 2] * stride
         x2, y2 = x1 + side, y1 + stride
         roi = torch.stack([x1, y1, x2, y2], dim=-1)
         "Origin"
         confi, offset = confi[mask], offset[mask]
         origin = offset * side / scale + roi.float()
         box = torch.cat((confi, origin), dim=-1)
         pboxes = torch.cat((pboxes, box), dim=0)
         "Pyramid"
         scale *= 0.707
         image = image.resize((image.size[0], image.size[1]))
     return functions.nms(pboxes)
Example #3
0
 def ro_detect(self, prior, data, model):
     prior, data = prior.to(self.device), data.to(self.device)
     confi, offset, landmark = model(data)
     confi, offset, landmark = confi.permute(0, 2, 3, 1), offset.permute(
         0, 2, 3, 1), landmark.permute(0, 2, 3, 1)
     mask = confi[..., 0] > cfg.CONFI[str(model).lower()]
     confi, offset, landmark = confi[mask], offset[mask], landmark[mask]
     side = prior[:, 2:3] - prior[:, :1]
     coordinate = offset * side + prior
     cx, cy = (prior[:, :1] + prior[:, 2:3]) / 2, (prior[:, 1:2] +
                                                   prior[3:]) / 2
     landmark[:, ::2] = landmark[:, ::2] * side + cx
     landmark[:, 1::2] = landmark[:, 1::2] * side + cy
     boxes = torch.cat((confi, coordinate, landmark), dim=-1)
     return functions.nms(boxes)