コード例 #1
0
ファイル: train.py プロジェクト: zhenni/openpose-pytorch
 def summary_image(self, **kwargs):
     step, height, width, image, mask, keypoints, yx_min, yx_max, parts, limbs, index, output = (kwargs[key] for key in 'step, height, width, image, mask, keypoints, yx_min, yx_max, parts, limbs, index, output'.split(', '))
     limit = min(self.config.getint('summary_image', 'limit'), image.shape[0])
     image = image[:limit, :, :, :]
     if self.config.getboolean('summary_image', 'estimate'):
         canvas = np.copy(image)
         fn = pybenchmark.profile('output/estimate')(self.draw_clusters)
         canvas = [fn(canvas, parts[:-1], limbs) for canvas, parts, limbs in zip(canvas, *(output[name] for name in 'parts, limbs'.split(', ')))]
         self.writer.add_image('output/estimate', torchvision.utils.make_grid(torch.from_numpy(np.stack(canvas)).permute(0, 3, 1, 2).float(), normalize=True, scale_each=True), step)
     if self.config.getboolean('summary_image', 'data_keypoints'):
         canvas = np.copy(image)
         fn = pybenchmark.profile('data/keypoints')(self.draw_keypoints)
         canvas = [fn(canvas, mask, keypoints, yx_min, yx_max, index) for canvas, mask, keypoints, yx_min, yx_max, index in zip(canvas, mask, keypoints, yx_min, yx_max, index)]
         self.writer.add_image('data/keypoints', torchvision.utils.make_grid(torch.from_numpy(np.stack(canvas)).permute(0, 3, 1, 2).float(), normalize=True, scale_each=True), step)
     if self.config.getboolean('summary_image', 'data_parts'):
         fn = pybenchmark.profile('data/parts')(self.draw_feature)
         for i in range(parts.shape[1]):
             canvas = np.copy(image)
             canvas = [fn(canvas, feature[i]) for canvas, feature in zip(canvas, parts)]
             self.writer.add_image('data/parts%d' % i, torchvision.utils.make_grid(torch.from_numpy(np.stack(canvas)).permute(0, 3, 1, 2).float(), normalize=True, scale_each=True), step)
     if self.config.getboolean('summary_image', 'data_limbs'):
         fn = pybenchmark.profile('data/limbs')(self.draw_feature)
         for i in range(limbs.shape[1]):
             canvas = np.copy(image)
             canvas = [fn(canvas, feature[i]) for canvas, feature in zip(canvas, limbs)]
             self.writer.add_image('data/limbs%d' % i, torchvision.utils.make_grid(torch.from_numpy(np.stack(canvas)).permute(0, 3, 1, 2).float(), normalize=True, scale_each=True), step)
     for name, feature in output.items():
         fn = pybenchmark.profile('output/' + name)(self.draw_feature)
         for i in range(feature.shape[1]):
             canvas = np.copy(image)
             canvas = [fn(canvas, feature[i]) for canvas, feature in zip(canvas, feature)]
             self.writer.add_image('output/%s%d' % (name, i), torchvision.utils.make_grid(torch.from_numpy(np.stack(canvas)).permute(0, 3, 1, 2).float(), normalize=True, scale_each=True), step)
コード例 #2
0
ファイル: train.py プロジェクト: zhenni/openpose-pytorch
 def iterate(self, data):
     for key in data:
         t = data[key]
         if torch.is_tensor(t):
             data[key] = t.to(self.device)
     tensor = data['tensor']
     outputs = pybenchmark.profile('inference')(self.inference)(tensor)
     height, width = data['image'].size()[1:3]
     loss = pybenchmark.profile('loss')(model.Loss(self.config, data, self.limbs_index, height, width))
     losses = [loss(**output) for output in outputs]
     losses_hparam = [{name: self.loss_hparam(i, name, l) for name, l in loss.items()} for i, loss in enumerate(losses)]
     loss_total = sum(sum(loss.values()) for loss in losses_hparam)
     self.optimizer.zero_grad()
     loss_total.backward()
     try:
         clip = self.config.getfloat('train', 'clip')
         nn.utils.clip_grad_norm(self.inference.parameters(), clip)
     except configparser.NoOptionError:
         pass
     self.optimizer.step()
     return dict(
         height=height, width=width,
         data=data, outputs=outputs,
         loss_total=loss_total, losses=losses, losses_hparam=losses_hparam,
     )
コード例 #3
0
ファイル: detect.py プロジェクト: GRSEB9S/yolo2-pytorch-1
 def __call__(self):
     image_bgr = self.get_image()
     tensor = self.conv_tensor(image_bgr)
     pred = pybenchmark.profile('inference')(model._inference)(
         self.inference, torch.autograd.Variable(tensor))
     rows, cols = pred['feature'].size()[-2:]
     _prob, pred['cls'] = conv_logits(pred)
     pred['prob'] = pred['iou'] * _prob
     self.filter_visible(pred)
     keep = pybenchmark.profile('nms')(utils.postprocess.nms)(
         *(pred[key].data for key in 'yx_min, yx_max, score'.split(', ')),
         self.config.getfloat('detect', 'overlap'))
     image_result = image_bgr.copy()
     if keep:
         yx_min, yx_max, cls = (
             pred[key].data.cpu().numpy()[keep]
             for key in 'yx_min, yx_max, cls'.split(', '))
         scale = np.array(image_result.shape[:2], np.float32) / [rows, cols]
         yx_min, yx_max = ((a * scale).astype(np.int)
                           for a in (yx_min, yx_max))
         image_result = self.draw_bbox(image_result, yx_min, yx_max, cls)
     cv2.imshow('detection', image_result)
     if self.args.output:
         self.writer.write(image_result)
     if cv2.waitKey(0 if self.args.pause else 1) in self.keys:
         root = os.path.join(self.model_dir, 'snapshot')
         os.makedirs(root, exist_ok=True)
         path = os.path.join(root, time.strftime(self.args.format))
         cv2.imwrite(path, image_bgr)
         logging.warning('image dumped into ' + path)
コード例 #4
0
ファイル: train.py プロジェクト: codealphago/yolo2-pytorch
 def step(self, inference, optimizer, data):
     for key in data:
         t = data[key]
         if torch.is_tensor(t):
             data[key] = utils.ensure_device(t)
     tensor = torch.autograd.Variable(data['tensor'])
     pred = pybenchmark.profile('inference')(model._inference)(inference, tensor)
     height, width = data['image'].size()[1:3]
     rows, cols = pred['feature'].size()[-2:]
     loss, debug = pybenchmark.profile('loss')(model.loss)(self.anchors, norm_data(data, height, width, rows, cols), pred, self.config.getfloat('model', 'threshold'))
     loss_hparam = {key: loss[key] * self.config.getfloat('hparam', key) for key in loss}
     loss_total = sum(loss_hparam.values())
     optimizer.zero_grad()
     loss_total.backward()
     try:
         clip = self.config.getfloat('train', 'clip')
         nn.utils.clip_grad_norm(inference.parameters(), clip)
     except configparser.NoOptionError:
         pass
     optimizer.step()
     return dict(
         height=height, width=width, rows=rows, cols=cols,
         data=data, pred=pred, debug=debug,
         loss_total=loss_total, loss=loss, loss_hparam=loss_hparam,
     )
コード例 #5
0
ファイル: train.py プロジェクト: liviust/yolo2-pytorch
 def step(self, inference, optimizer, data):
     for key in data:
         t = data[key]
         if torch.is_tensor(t):
             data[key] = utils.ensure_device(t)
     tensor = torch.autograd.Variable(data['tensor'])
     pred = pybenchmark.profile('inference')(model._inference)(inference, tensor)
     height, width = data['image'].size()[1:3]
     rows, cols = pred['feature'].size()[-2:]
     loss, debug = pybenchmark.profile('loss')(model.loss)(self.anchors, norm_data(data, height, width, rows, cols), pred, self.config.getfloat('model', 'threshold'))
     loss_hparam = {key: loss[key] * self.config.getfloat('hparam', key) for key in loss}
     loss_total = sum(loss_hparam.values())
     optimizer.zero_grad()
     loss_total.backward()
     try:
         clip = self.config.getfloat('train', 'clip')
         nn.utils.clip_grad_norm(inference.parameters(), clip)
     except configparser.NoOptionError:
         pass
     optimizer.step()
     return dict(
         height=height, width=width, rows=rows, cols=cols,
         data=data, pred=pred, debug=debug,
         loss_total=loss_total, loss=loss, loss_hparam=loss_hparam,
     )
コード例 #6
0
 def stat_ap(self):
     cls_num = [0 for _ in self.category]
     cls_score = [np.array([], dtype=np.float32) for _ in self.category]
     cls_tp = [np.array([], dtype=np.bool) for _ in self.category]
     for data in tqdm.tqdm(self.loader):
         for key in data:
             t = data[key]
             if torch.is_tensor(t):
                 data[key] = utils.ensure_device(t)
         tensor = torch.autograd.Variable(data['tensor'], volatile=True)
         batch_size = tensor.size(0)
         pred = pybenchmark.profile('inference')(model._inference)(
             self.inference, tensor)
         _prob, pred['cls'] = conv_logits(pred)
         pred['iou'] = pred['iou'].contiguous()
         pred['prob'] = pred['iou'] * _prob
         for key in pred:
             pred[key] = pred[key].data
         if self.config.getboolean('eval', 'debug'):
             self.debug_data(data)
             self.debug_pred(pred)
         norm_bbox(data, pred)
         for path, size, difficult, image, data_yx_min, data_yx_max, data_cls, yx_min, yx_max, iou, prob, cls in zip(
                 *(data[key]
                   for key in 'path, size, difficult'.split(', ')),
                 *(torch.unbind(data[key])
                   for key in 'image, yx_min, yx_max, cls'.split(', ')),
                 *(torch.unbind(pred[key].view(batch_size, -1, 2))
                   for key in 'yx_min, yx_max'.split(', ')),
                 *(torch.unbind(pred[key].view(batch_size, -1))
                   for key in 'iou, prob, cls'.split(', '))):
             data_yx_min, data_yx_max, data_cls = filter_valid(
                 data_yx_min, data_yx_max, data_cls, difficult)
             for c in data_cls.cpu().numpy():
                 cls_num[c] += 1
             yx_min, yx_max, cls, score = self.filter_visible(
                 yx_min, yx_max, iou, prob, cls)
             keep = pybenchmark.profile('nms')(utils.postprocess.nms)(
                 yx_min, yx_max, score,
                 self.config.getfloat('detect', 'overlap'))
             if keep:
                 keep = utils.ensure_device(torch.LongTensor(keep))
                 yx_min, yx_max, cls, score = (t[keep]
                                               for t in (yx_min, yx_max,
                                                         cls, score))
                 for c in set(cls.cpu().numpy()):
                     c = int(c)  # PyTorch's bug
                     _score, tp = self.filter_cls(c, path, data_yx_min,
                                                  data_yx_max, data_cls,
                                                  yx_min, yx_max, cls,
                                                  score)
                     cls_score[c] = np.append(cls_score[c],
                                              _score.cpu().numpy())
                     cls_tp[c] = np.append(cls_tp[c], tp)
     return cls_num, cls_score, cls_tp
コード例 #7
0
ファイル: detect.py プロジェクト: codealphago/yolo2-pytorch
 def __call__(self):
     image_bgr = self.get_image()
     tensor = self.conv_tensor(image_bgr)
     pred = pybenchmark.profile('inference')(model._inference)(self.inference, torch.autograd.Variable(tensor, volatile=True))
     rows, cols = pred['feature'].size()[-2:]
     iou = pred['iou'].data.contiguous().view(-1)
     yx_min, yx_max = (pred[key].data.view(-1, 2) for key in 'yx_min, yx_max'.split(', '))
     logits = get_logits(pred)
     prob = F.softmax(logits, -1).data.view(-1, logits.size(-1))
     ret = postprocess(self.config, iou, yx_min, yx_max, prob)
     image_result = image_bgr.copy()
     if ret is not None:
         iou, yx_min, yx_max, cls, score = ret
         try:
             scale = self.scale
         except AttributeError:
             scale = utils.ensure_device(torch.from_numpy(np.array(image_result.shape[:2], np.float32) / np.array([rows, cols], np.float32)))
             self.scale = scale
         yx_min, yx_max = ((t * scale).cpu().numpy().astype(np.int) for t in (yx_min, yx_max))
         image_result = self.draw_bbox(image_result, yx_min, yx_max, cls)
     cv2.imshow('detection', image_result)
     if self.args.output:
         self.writer.write(image_result)
     if cv2.waitKey(0 if self.args.pause else 1) in self.keys:
         root = os.path.join(self.model_dir, 'snapshot')
         os.makedirs(root, exist_ok=True)
         path = os.path.join(root, time.strftime(self.args.format))
         cv2.imwrite(path, image_bgr)
         logging.warning('image dumped into ' + path)
コード例 #8
0
ファイル: detect.py プロジェクト: strategist922/yolo2-pytorch
 def __call__(self):
     image_bgr = self.get_image()
     image_resized = self.resize(image_bgr, self.height, self.width)
     image = self.transform_image(image_resized)
     tensor = self.transform_tensor(image)
     tensor = utils.ensure_device(tensor.unsqueeze(0))
     pred = pybenchmark.profile('inference')(model._inference)(self.inference, torch.autograd.Variable(tensor, volatile=True))
     rows, cols = pred['feature'].size()[-2:]
     iou = pred['iou'].data.contiguous().view(-1)
     yx_min, yx_max = (pred[key].data.view(-1, 2) for key in 'yx_min, yx_max'.split(', '))
     logits = get_logits(pred)
     prob = F.softmax(logits, -1).data.view(-1, logits.size(-1))
     ret = postprocess(self.config, iou, yx_min, yx_max, prob)
     image_result = image_bgr.copy()
     if ret is not None:
         iou, yx_min, yx_max, cls, score = ret
         try:
             scale = self.scale
         except AttributeError:
             scale = utils.ensure_device(torch.from_numpy(np.array(image_result.shape[:2], np.float32) / np.array([rows, cols], np.float32)))
             self.scale = scale
         yx_min, yx_max = ((t * scale).cpu().numpy().astype(np.int) for t in (yx_min, yx_max))
         image_result = self.draw_bbox(image_result, yx_min, yx_max, cls)
     if self.args.output:
         self.writer.write(image_result)
     else:
         cv2.imshow('detection', image_result)
     if cv2.waitKey(0 if self.args.pause else 1) in self.keys:
         root = os.path.join(self.model_dir, 'snapshot')
         os.makedirs(root, exist_ok=True)
         path = os.path.join(root, time.strftime(self.args.format))
         cv2.imwrite(path, image_bgr)
         logging.warning('image dumped into ' + path)
コード例 #9
0
ファイル: eval.py プロジェクト: codealphago/yolo2-pytorch
 def filter_cls(self, c, path, data_yx_min, data_yx_max, data_cls, yx_min, yx_max, cls, score):
     data_yx_min, data_yx_max = filter_cls_data(data_yx_min, data_yx_max, data_cls == c)
     yx_min, yx_max, score = filter_cls_pred(yx_min, yx_max, score, cls == c)
     tp = pybenchmark.profile('matching')(matching)(data_yx_min, data_yx_max, yx_min, yx_max, self.config.getfloat('eval', 'iou'))
     if self.config.getboolean('eval', 'debug'):
         self.debug_visualize(data_yx_min, data_yx_max, yx_min, yx_max, c, tp, path)
     return score, tp
コード例 #10
0
 def filter_cls(self, c, path, data_yx_min, data_yx_max, data_cls, yx_min,
                yx_max, cls, score):
     data_yx_min, data_yx_max = filter_cls_data(data_yx_min, data_yx_max,
                                                data_cls == c)
     yx_min, yx_max, score = filter_cls_pred(yx_min, yx_max, score,
                                             cls == c)
     tp = pybenchmark.profile('matching')(matching)(
         data_yx_min, data_yx_max, yx_min, yx_max,
         self.config.getfloat('eval', 'iou'))
     if self.config.getboolean('eval', 'debug'):
         self.debug(data_yx_min, data_yx_max, yx_min, yx_max, c, tp, path)
     return score, tp
コード例 #11
0
ファイル: train.py プロジェクト: liviust/yolo2-pytorch
 def draw_bbox_pred(self, canvas, yx_min, yx_max, cls, iou, colors=None, nms=False):
     batch_size = len(canvas)
     mask = iou > self.config.getfloat('detect', 'threshold')
     yx_min, yx_max = (np.reshape(a, [a.shape[0], -1, 2]) for a in (yx_min, yx_max))
     cls, iou, mask = (np.reshape(a, [a.shape[0], -1]) for a in (cls, iou, mask))
     yx_min, yx_max, cls, iou, mask = ([a[b] for b in range(batch_size)] for a in (yx_min, yx_max, cls, iou, mask))
     yx_min, yx_max, cls, iou = ([a[m] for a, m in zip(l, mask)] for l in (yx_min, yx_max, cls, iou))
     if nms:
         overlap = self.config.getfloat('detect', 'overlap')
         keep = [pybenchmark.profile('nms')(utils.postprocess.nms)(torch.Tensor(yx_min), torch.Tensor(yx_max), torch.Tensor(iou), overlap) if iou.shape[0] > 0 else [] for yx_min, yx_max, iou in zip(yx_min, yx_max, iou)]
         keep = [np.array(k, np.int) for k in keep]
         yx_min, yx_max, cls = ([a[k] for a, k in zip(l, keep)] for l in (yx_min, yx_max, cls))
     return [self.draw_bbox(canvas, yx_min.astype(np.int), yx_max.astype(np.int), cls, colors=colors) for canvas, yx_min, yx_max, cls in zip(canvas, yx_min, yx_max, cls)]
コード例 #12
0
ファイル: train.py プロジェクト: codealphago/yolo2-pytorch
 def draw_bbox_pred(self, canvas, yx_min, yx_max, cls, iou, colors=None, nms=False):
     batch_size = len(canvas)
     mask = iou > self.config.getfloat('detect', 'threshold')
     yx_min, yx_max = (np.reshape(a, [a.shape[0], -1, 2]) for a in (yx_min, yx_max))
     cls, iou, mask = (np.reshape(a, [a.shape[0], -1]) for a in (cls, iou, mask))
     yx_min, yx_max, cls, iou, mask = ([a[b] for b in range(batch_size)] for a in (yx_min, yx_max, cls, iou, mask))
     yx_min, yx_max, cls, iou = ([a[m] for a, m in zip(l, mask)] for l in (yx_min, yx_max, cls, iou))
     if nms:
         overlap = self.config.getfloat('detect', 'overlap')
         keep = [pybenchmark.profile('nms')(utils.postprocess.nms)(torch.Tensor(iou), torch.Tensor(yx_min), torch.Tensor(yx_max), overlap) if iou.shape[0] > 0 else [] for yx_min, yx_max, iou in zip(yx_min, yx_max, iou)]
         keep = [np.array(k, np.int) for k in keep]
         yx_min, yx_max, cls = ([a[k] for a, k in zip(l, keep)] for l in (yx_min, yx_max, cls))
     return [self.draw_bbox(canvas, yx_min.astype(np.int), yx_max.astype(np.int), cls, colors=colors) for canvas, yx_min, yx_max, cls in zip(canvas, yx_min, yx_max, cls)]
コード例 #13
0
ファイル: train.py プロジェクト: liviust/yolo2-pytorch
 def summary_image(self, **kwargs):
     step, height, width, rows, cols, data, pred, matching = (kwargs[key] for key in 'step, height, width, rows, cols, data, pred, matching'.split(', '))
     image = data['image']
     limit = min(self.config.getint('summary_image', 'limit'), image.shape[0])
     image = image[:limit, :, :, :]
     yx_min, yx_max, iou = (pred[key] for key in 'yx_min, yx_max, iou'.split(', '))
     scale = [height / rows, width / cols]
     yx_min, yx_max = (a * scale for a in (yx_min, yx_max))
     if 'logits' in pred:
         cls = np.argmax(F.softmax(torch.autograd.Variable(torch.from_numpy(pred['logits'])), -1).data.cpu().numpy(), -1)
     else:
         cls = np.zeros(iou.shape, np.int)
     if self.config.getboolean('summary_image', 'bbox'):
         # data
         canvas = np.copy(image)
         canvas = pybenchmark.profile('bbox/data')(self.draw_bbox_data)(canvas, *(data[key] for key in 'yx_min, yx_max, cls'.split(', ')))
         self.writer.add_image('bbox/data', torchvision.utils.make_grid(torch.from_numpy(np.stack(canvas)).permute(0, 3, 1, 2).float(), normalize=True, scale_each=True), step)
         # pred
         canvas = np.copy(image)
         canvas = pybenchmark.profile('bbox/pred')(self.draw_bbox_pred)(canvas, yx_min, yx_max, cls, iou, nms=True)
         self.writer.add_image('bbox/pred', torchvision.utils.make_grid(torch.from_numpy(np.stack(canvas)).permute(0, 3, 1, 2).float(), normalize=True, scale_each=True), step)
     if self.config.getboolean('summary_image', 'iou'):
         # bbox
         canvas = np.copy(image)
         canvas_data = self.draw_bbox_data(canvas, *(data[key] for key in 'yx_min, yx_max, cls'.split(', ')), colors=['g'])
         # data
         for i, canvas in enumerate(pybenchmark.profile('iou/data')(self.draw_bbox_iou)(list(map(np.copy, canvas_data)), yx_min, yx_max, cls, matching, rows, cols, colors=['w'])):
             canvas = np.stack(canvas)
             canvas = torch.from_numpy(canvas).permute(0, 3, 1, 2)
             canvas = torchvision.utils.make_grid(canvas.float(), normalize=True, scale_each=True)
             self.writer.add_image('iou/data%d' % i, canvas, step)
         # pred
         for i, canvas in enumerate(pybenchmark.profile('iou/pred')(self.draw_bbox_iou)(list(map(np.copy, canvas_data)), yx_min, yx_max, cls, iou, rows, cols, colors=['w'])):
             canvas = np.stack(canvas)
             canvas = torch.from_numpy(canvas).permute(0, 3, 1, 2)
             canvas = torchvision.utils.make_grid(canvas.float(), normalize=True, scale_each=True)
             self.writer.add_image('iou/pred%d' % i, canvas, step)
コード例 #14
0
ファイル: train.py プロジェクト: codealphago/yolo2-pytorch
 def summary_image(self, **kwargs):
     step, height, width, rows, cols, data, pred, matching = (kwargs[key] for key in 'step, height, width, rows, cols, data, pred, matching'.split(', '))
     image = data['image']
     limit = min(self.config.getint('summary_image', 'limit'), image.shape[0])
     image = image[:limit, :, :, :]
     yx_min, yx_max, iou = (pred[key] for key in 'yx_min, yx_max, iou'.split(', '))
     scale = [height / rows, width / cols]
     yx_min, yx_max = (a * scale for a in (yx_min, yx_max))
     if 'logits' in pred:
         cls = np.argmax(F.softmax(torch.autograd.Variable(torch.from_numpy(pred['logits'])), -1).data.cpu().numpy(), -1)
     else:
         cls = np.zeros(iou.shape, np.int)
     if self.config.getboolean('summary_image', 'bbox'):
         # data
         canvas = np.copy(image)
         canvas = pybenchmark.profile('bbox/data')(self.draw_bbox_data)(canvas, *(data[key] for key in 'yx_min, yx_max, cls'.split(', ')))
         self.writer.add_image('bbox/data', torchvision.utils.make_grid(torch.from_numpy(np.stack(canvas)).permute(0, 3, 1, 2).float(), normalize=True, scale_each=True), step)
         # pred
         canvas = np.copy(image)
         canvas = pybenchmark.profile('bbox/pred')(self.draw_bbox_pred)(canvas, yx_min, yx_max, cls, iou, nms=True)
         self.writer.add_image('bbox/pred', torchvision.utils.make_grid(torch.from_numpy(np.stack(canvas)).permute(0, 3, 1, 2).float(), normalize=True, scale_each=True), step)
     if self.config.getboolean('summary_image', 'iou'):
         # bbox
         canvas = np.copy(image)
         canvas_data = self.draw_bbox_data(canvas, *(data[key] for key in 'yx_min, yx_max, cls'.split(', ')), colors=['g'])
         # data
         for i, canvas in enumerate(pybenchmark.profile('iou/data')(self.draw_bbox_iou)(list(map(np.copy, canvas_data)), yx_min, yx_max, cls, matching, rows, cols, colors=['w'])):
             canvas = np.stack(canvas)
             canvas = torch.from_numpy(canvas).permute(0, 3, 1, 2)
             canvas = torchvision.utils.make_grid(canvas.float(), normalize=True, scale_each=True)
             self.writer.add_image('iou/data%d' % i, canvas, step)
         # pred
         for i, canvas in enumerate(pybenchmark.profile('iou/pred')(self.draw_bbox_iou)(list(map(np.copy, canvas_data)), yx_min, yx_max, cls, iou, rows, cols, colors=['w'])):
             canvas = np.stack(canvas)
             canvas = torch.from_numpy(canvas).permute(0, 3, 1, 2)
             canvas = torchvision.utils.make_grid(canvas.float(), normalize=True, scale_each=True)
             self.writer.add_image('iou/pred%d' % i, canvas, step)
コード例 #15
0
ファイル: detect.py プロジェクト: strategist922/yolo2-pytorch
def postprocess(config, iou, yx_min, yx_max, prob):
    iou, yx_min, yx_max, prob, prob_cls, cls = filter_visible(config, iou, yx_min, yx_max, prob)
    keep = pybenchmark.profile('nms')(utils.postprocess.nms)(iou, yx_min, yx_max, config.getfloat('detect', 'overlap'))
    if keep:
        keep = utils.ensure_device(torch.LongTensor(keep))
        iou, yx_min, yx_max, prob, prob_cls, cls = (t[keep] for t in (iou, yx_min, yx_max, prob, prob_cls, cls))
        if config.getboolean('detect', 'fix'):
            score = torch.unsqueeze(iou, -1) * prob
            mask = score > config.getfloat('detect', 'threshold_cls')
            indices, cls = torch.unbind(mask.nonzero(), -1)
            yx_min, yx_max = (t[indices] for t in (yx_min, yx_max))
            score = score[mask]
        else:
            score = iou
        return iou, yx_min, yx_max, cls, score
コード例 #16
0
ファイル: detect.py プロジェクト: codealphago/yolo2-pytorch
def postprocess(config, iou, yx_min, yx_max, prob):
    iou, yx_min, yx_max, prob, prob_cls, cls = filter_visible(config, iou, yx_min, yx_max, prob)
    keep = pybenchmark.profile('nms')(utils.postprocess.nms)(iou, yx_min, yx_max, config.getfloat('detect', 'overlap'))
    if keep:
        keep = utils.ensure_device(torch.LongTensor(keep))
        iou, yx_min, yx_max, prob, prob_cls, cls = (t[keep] for t in (iou, yx_min, yx_max, prob, prob_cls, cls))
        if config.getboolean('detect', 'fix'):
            score = torch.unsqueeze(iou, -1) * prob
            mask = score > config.getfloat('detect', 'threshold_cls')
            indices, cls = torch.unbind(mask.nonzero(), -1)
            yx_min, yx_max = (t[indices] for t in (yx_min, yx_max))
            score = score[mask]
        else:
            score = iou
        return iou, yx_min, yx_max, cls, score
コード例 #17
0
 def stat_ap(self):
     cls_num = [0 for _ in self.category]
     cls_score = [np.array([], dtype=np.float32) for _ in self.category]
     cls_tp = [np.array([], dtype=np.bool) for _ in self.category]
     for data in tqdm.tqdm(self.loader):
         for key in data:
             t = data[key]
             if torch.is_tensor(t):
                 data[key] = utils.ensure_device(t)
         tensor = torch.autograd.Variable(data['tensor'], volatile=True)
         pred = pybenchmark.profile('inference')(model._inference)(
             self.inference, tensor)
         pred['iou'] = pred['iou'].contiguous()
         logits = get_logits(pred)
         pred['prob'] = F.softmax(logits, -1)
         for key in pred:
             pred[key] = pred[key].data
         if self.config.getboolean('eval', 'debug'):
             self.debug_data(data)
             self.debug_pred(pred)
         norm_bbox_data(data)
         norm_bbox_pred(pred)
         for path, difficult, image, data_yx_min, data_yx_max, data_cls, iou, yx_min, yx_max, prob in zip(
                 *(data[key] for key in 'path, difficult'.split(', ')),
                 *(torch.unbind(data[key])
                   for key in 'image, yx_min, yx_max, cls'.split(', ')),
                 *(torch.unbind(pred[key])
                   for key in 'iou, yx_min, yx_max, prob'.split(', '))):
             data_yx_min, data_yx_max, data_cls = filter_valid(
                 data_yx_min, data_yx_max, data_cls, difficult)
             for c in data_cls.cpu().numpy():
                 cls_num[c] += 1
             iou = iou.view(-1)
             yx_min, yx_max, prob = (t.view(-1, t.size(-1))
                                     for t in (yx_min, yx_max, prob))
             ret = postprocess(self.config, iou, yx_min, yx_max, prob)
             if ret is not None:
                 iou, yx_min, yx_max, cls, score = ret
                 for c in set(cls.cpu().numpy()):
                     c = int(c)  # PyTorch's bug
                     _score, tp = self.filter_cls(c, path, data_yx_min,
                                                  data_yx_max, data_cls,
                                                  yx_min, yx_max, cls,
                                                  score)
                     cls_score[c] = np.append(cls_score[c],
                                              _score.cpu().numpy())
                     cls_tp[c] = np.append(cls_tp[c], tp)
     return cls_num, cls_score, cls_tp
コード例 #18
0
ファイル: tests.py プロジェクト: valabojub79/algoholic
    def runTest(self):
        funcs = inspect.getmembers(solution, inspect.isfunction)
        solutions = [(name, f) for name, f in funcs if 'solution' in name]

        for name, f in solutions:
            self.data.solution = f
            self.data.actual_result = profile(name)(
                lambda: self.data.solution(*self.data.input_data))()
            self.data.actual_kstones = stats[name]['kstones']

            self.assertEqual(self.data.expected_result,
                             self.data.actual_result,
                             msg=pformat(self.data.__dict__))
            # check that performance is lower than expected UPPER LIMIT of algorithm
            if self.data.expected_kstones:
                self.assertLess(self.data.actual_kstones,
                                self.data.expected_kstones,
                                msg=pformat(self.data.__dict__))
コード例 #19
0
ファイル: eval.py プロジェクト: codealphago/yolo2-pytorch
 def stat_ap(self):
     cls_num = [0 for _ in self.category]
     cls_score = [np.array([], dtype=np.float32) for _ in self.category]
     cls_tp = [np.array([], dtype=np.bool) for _ in self.category]
     for data in tqdm.tqdm(self.loader):
         for key in data:
             t = data[key]
             if torch.is_tensor(t):
                 data[key] = utils.ensure_device(t)
         tensor = torch.autograd.Variable(data['tensor'], volatile=True)
         pred = pybenchmark.profile('inference')(model._inference)(self.inference, tensor)
         pred['iou'] = pred['iou'].contiguous()
         logits = get_logits(pred)
         pred['prob'] = F.softmax(logits, -1)
         for key in pred:
             pred[key] = pred[key].data
         if self.config.getboolean('eval', 'debug'):
             self.debug_data(data)
             self.debug_pred(pred)
         norm_bbox_data(data)
         norm_bbox_pred(pred)
         for path, difficult, image, data_yx_min, data_yx_max, data_cls, iou, yx_min, yx_max, prob in zip(*(data[key] for key in 'path, difficult'.split(', ')), *(torch.unbind(data[key]) for key in 'image, yx_min, yx_max, cls'.split(', ')), *(torch.unbind(pred[key]) for key in 'iou, yx_min, yx_max, prob'.split(', '))):
             data_yx_min, data_yx_max, data_cls = filter_valid(data_yx_min, data_yx_max, data_cls, difficult)
             for c in data_cls.cpu().numpy():
                 cls_num[c] += 1
             iou = iou.view(-1)
             yx_min, yx_max, prob = (t.view(-1, t.size(-1)) for t in (yx_min, yx_max, prob))
             ret = postprocess(self.config, iou, yx_min, yx_max, prob)
             if ret is not None:
                 iou, yx_min, yx_max, cls, score = ret
                 for c in set(cls.cpu().numpy()):
                     c = int(c)  # PyTorch's bug
                     _score, tp = self.filter_cls(c, path, data_yx_min, data_yx_max, data_cls, yx_min, yx_max, cls, score)
                     cls_score[c] = np.append(cls_score[c], _score.cpu().numpy())
                     cls_tp[c] = np.append(cls_tp[c], tp)
     return cls_num, cls_score, cls_tp
コード例 #20
0
 def __call__(self):
     image_bgr = self.get_image()
     image_resized = self.resize(image_bgr, self.height, self.width)
     image = self.transform_image(image_resized)
     tensor = self.transform_tensor(image)
     tensor = tensor.unsqueeze(0).to(self.device)
     outputs = pybenchmark.profile('inference')(self.inference)(tensor)
     if hasattr(self, 'draw_cluster'):
         output = outputs[-1]
         parts, limbs = (output[name][0]
                         for name in 'parts, limbs'.split(', '))
         parts = parts[:-1]
         parts, limbs = (t.detach().cpu().numpy() for t in (parts, limbs))
         try:
             interpolation = getattr(
                 cv2, 'INTER_' +
                 self.config.get('estimate', 'interpolation').upper())
             parts, limbs = (np.stack([
                 cv2.resize(feature, (self.width, self.height),
                            interpolation=interpolation) for feature in a
             ]) for a in (parts, limbs))
         except configparser.NoOptionError:
             pass
         clusters = pyopenpose.estimate(
             parts,
             limbs,
             self.limbs_index,
             self.config.getfloat('nms', 'threshold'),
             self.config.getfloat('integration', 'step'),
             tuple(
                 map(int,
                     self.config.get('integration',
                                     'step_limits').split())),
             self.config.getfloat('integration', 'min_score'),
             self.config.getint('integration', 'min_count'),
             self.config.getfloat('cluster', 'min_score'),
             self.config.getint('cluster', 'min_count'),
         )
         scale_y, scale_x = self.resize.scale(parts.shape[-2:],
                                              image_bgr.shape[:2])
         image_result = image_bgr.copy()
         for cluster in clusters:
             cluster = [((i1, int(y1 * scale_y), int(x1 * scale_x)),
                         (i2, int(y2 * scale_y), int(x2 * scale_x)))
                        for (i1, y1, x1), (i2, y2, x2) in cluster]
             image_result = self.draw_cluster(image_result, cluster)
     else:
         image_result = image_resized.copy()
         feature = self.get_feature(outputs).detach().cpu().numpy()
         image_result = self.draw_feature(image_result, feature)
     if self.args.output:
         if not hasattr(self, 'writer'):
             self.writer = self.create_writer(*image_result.shape[:2])
         self.writer.write(image_result)
     else:
         cv2.imshow('estimate', image_result)
     if cv2.waitKey(0 if self.args.pause else 1) in self.keys:
         root = os.path.join(self.model_dir, 'snapshot')
         os.makedirs(root, exist_ok=True)
         path = os.path.join(root, time.strftime(self.args.format))
         cv2.imwrite(path, image_bgr)
         logging.warning('image dumped into ' + path)
コード例 #21
0
def setup_positive_fixture():
    # callable that will be decorated and measured below
    some_code = lambda: time.sleep(POSITIVE_BENCHMARK_TIME)
    decorated = profile('test')(some_code)  # a la-carte decoration
    return_value = decorated()  # actual run/call of decorated callable
コード例 #22
0
def setup_memory_fixture():
    # callable that will be decorated and measured below
    some_code = lambda: [[]] * 100000
    decorated = profile('test_neg')(some_code)  # a la-carte decoration
    return_value = decorated()  # actual run/call of decorated callable