Exemplo n.º 1
0
 def __init__(self, weights):
     opt = opts().init()
     opt.load_model = weights
     os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
     Detector = detector_factory[opt.task]
     self.detector = Detector(opt)
     self.opt = opt
Exemplo n.º 2
0
def main(opt):
    # init model
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    Detector = detector_factory[opt.task]
    detector = Detector(opt)

    debug = 0  # return the detect result without show
    threshold = 0.05
    TASK = 'multi_pose'  # or 'multi_pose' for human pose estimation
    input_h, intput_w = 800, 800
    MODEL_PATH = '/home/deepblue/deepbluetwo/chenjun/4_face_detect/centerface/exp/multi_pose/mobilev2_10/model_15.pth'
    opt = opts().init(
        '--task {} --load_model {} --debug {} --vis_thresh {} --input_h {} --input_w {}'
        .format(TASK, MODEL_PATH, debug, threshold, input_h,
                intput_w).split(' '))
    detector = detector_factory[opt.task](opt)

    out_onnx_path = "../output/onnx_model/mobilev2_aspaper.onnx"
    image = cv2.imread('../readme/test.png')
    torch_input, meta = detector.pre_process(image, scale=1)
    torch_input = torch_input.cuda()
    # pytorch output
    torch_output = detector.model(torch_input)
    torch.onnx.export(detector.model,
                      torch_input,
                      out_onnx_path,
                      verbose=False)
    sess = nxrun.InferenceSession(out_onnx_path)

    print('save done')
    input_name = sess.get_inputs()[0].name
    output_onnx = sess.run(None, {input_name: torch_input.cpu().data.numpy()})
    temp = 1
Exemplo n.º 3
0
def test_img(MODEL_PATH):
    debug = 1  # draw and show the result image
    TASK = 'multi_pose'
    input_h, intput_w = 800, 800
    opt = opts().init(
        '--task {} --load_model {} --debug {} --input_h {} --input_w {}'.
        format(TASK, MODEL_PATH, debug, intput_w, input_h).split(' '))

    detector = detector_factory[opt.task](opt)

    img = '../readme/000388.jpg'
    ret = detector.run(img)['results']
Exemplo n.º 4
0
def test_wider_Face(model_path):
    Path = '/home/deepblue/deepbluetwo/data/WIDER_val/images/'
    wider_face_mat = sio.loadmat(
        '/home/deepblue/deepbluetwo/data/wider_face_split/wider_face_val.mat')
    event_list = wider_face_mat['event_list']
    file_list = wider_face_mat['file_list']
    save_path = '../output/widerface/'

    debug = 0  # return the detect result without show
    threshold = 0.05
    TASK = 'multi_pose'
    input_h, intput_w = 800, 800
    opt = opts().init(
        '--task {} --load_model {} --debug {} --vis_thresh {} --input_h {} --input_w {}'
        .format(TASK, MODEL_PATH, debug, threshold, input_h,
                intput_w).split(' '))
    detector = detector_factory[opt.task](opt)

    for index, event in enumerate(event_list):
        file_list_item = file_list[index][0]
        im_dir = event[0][0]
        if not os.path.exists(save_path + im_dir):
            os.makedirs(save_path + im_dir)
        for num, file in enumerate(file_list_item):
            im_name = file[0][0]
            zip_name = '%s/%s.jpg' % (im_dir, im_name)
            print(os.path.join(Path, zip_name))
            img_path = os.path.join(Path, zip_name)
            dets = detector.run(img_path)['results']
            f = open(save_path + im_dir + '/' + im_name + '.txt', 'w')
            f.write('{:s}\n'.format('%s/%s.jpg' % (im_dir, im_name)))
            f.write('{:d}\n'.format(len(dets)))
            for b in dets[1]:
                x1, y1, x2, y2, s = b[0], b[1], b[2], b[3], b[4]
                f.write('{:.1f} {:.1f} {:.1f} {:.1f} {:.3f}\n'.format(
                    x1, y1, (x2 - x1 + 1), (y2 - y1 + 1), s))
            f.close()
            print('event:%d num:%d' % (index + 1, num + 1))
Exemplo n.º 5
0
def test_vedio(model_path, vedio_path=None):
    debug = -1  # return the result image with draw
    TASK = 'multi_pose'
    vis_thresh = 0.45
    input_h, intput_w = 800, 800
    opt = opts().init(
        '--task {} --load_model {} --debug {} --input_h {} --input_w {} --vis_thresh {}'
        .format(TASK, MODEL_PATH, debug, intput_w, input_h,
                vis_thresh).split(' '))
    detector = detector_factory[opt.task](opt)

    vedio = vedio_path if vedio_path else 0
    cap = cv2.VideoCapture(vedio)
    while cap.isOpened():
        det = cap.grab()
        if det:
            flag, frame = cap.retrieve()
            res = detector.run(frame)
            cv2.imshow('face detect', res['plot_img'])

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 6
0
            for stat in time_stats:
                time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
            if opt.output_video:
                write_cap.write(ret['plot_img'])
            print('fps:{:.3f}'.format(i / (time.time() - start_time)),
                  time_str)
            if cv2.waitKey(1) == 27:
                return  # esc to quit
    else:
        if os.path.isdir(opt.demo):
            image_names = []
            ls = os.listdir(opt.demo)
            for file_name in sorted(ls):
                ext = file_name[file_name.rfind('.') + 1:].lower()
                if ext in image_ext:
                    image_names.append(os.path.join(opt.demo, file_name))
        else:
            image_names = [opt.demo]

        for (image_name) in image_names:
            ret = detector.run(image_name)
            time_str = ''
            for stat in time_stats:
                time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
            print(time_str)


if __name__ == '__main__':
    opt = opts().init()
    demo(opt)
Exemplo n.º 7
0
                font, 0.5, (0, 255, 0), thickness=1, lineType=cv2.LINE_AA)

def add_coco_hp(image, points, keypoints_prob):
    for j in range(5):
        if keypoints_prob[j] > 0.5:
            cv2.circle(image, (points[j, 0], points[j, 1]), 2, (255, 255, 0), -1)
    return image


if __name__ == '__main__':
    # 0. build trnsorrt engine/转成tensorrt模型
    # onnx_path = '../output/onnx_model/mobilev2_large.onnx'
    trt_path = '../output/onnx_model/mobilev2.trt'
    # build_engine(onnx_path, trt_path, 'fp32', 1)
    # print('build trnsorrt engine done')
    config = opts().init()

    # 1. load trnsorrt engine
    body_engine = CenterNetTensorRTEngine(weight_file=trt_path, config=config)
    print('load trnsorrt engine done')

    # 2. video for the tracking
    cap = cv2.VideoCapture('../readme/唐人街网剧.mp4')

    # 3. write the result image into video
    if config.output_video:
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # 如果是mp4视频,编码需要为mp4v
        im_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        im_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        write_cap = cv2.VideoWriter(config.output_video, fourcc, 50, (im_width, im_height))
Exemplo n.º 8
0
def main(opt, qtepoch=[0,]):
  torch.manual_seed(opt.seed)
  torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
  Dataset = get_dataset(opt.dataset, opt.task)
  opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
  print(opt)

  logger = Logger(opt)

  os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
  opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
  
  print('Creating model...')
  model = create_model(opt.arch, opt.heads, opt.head_conv)

  optimizer = torch.optim.Adam(model.parameters(), opt.lr)
  # optimizer = torch.optim.SGD(model.parameters(), opt.lr)
  start_epoch = 0
  if opt.load_model != '':
    model, optimizer, start_epoch = load_model(
      model, opt.load_model, optimizer, opt.resume, opt.lr, opt.lr_step)

  Trainer = train_factory[opt.task]
  trainer = Trainer(opt, model, optimizer)
  trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

  print('Setting up data...')
  val_loader = torch.utils.data.DataLoader(
      Dataset(opt, 'val'), 
      batch_size=1, 
      shuffle=False,
      num_workers=1,
      pin_memory=True
  )

  if opt.test:
    _, preds = trainer.val(0, val_loader)
    val_loader.dataset.run_eval(preds, opt.save_dir)
    return

  train_loader = torch.utils.data.DataLoader(
      Dataset(opt, 'train'), 
      batch_size=opt.batch_size, 
      shuffle=True,
      num_workers=opt.num_workers,
      pin_memory=True,
      drop_last=True,
      collate_fn=Multiposebatch
  )

  print('Starting training...')
  best = 1e10
  for epoch in range(start_epoch + 1, opt.num_epochs + 1):
    qtepoch.append(epoch)
    mark = epoch if opt.save_all else 'last'
    log_dict_train, _ = trainer.train(epoch, train_loader)
    logger.write('epoch: {} |'.format(epoch))
    for k, v in log_dict_train.items():
      logger.scalar_summary('train_{}'.format(k), v, epoch)
      logger.write('{} {:8f} | '.format(k, v))
    if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
      save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)), 
                 epoch, model, optimizer)
      with torch.no_grad():
        log_dict_val, preds = trainer.val(epoch, val_loader)
      for k, v in log_dict_val.items():
        logger.scalar_summary('val_{}'.format(k), v, epoch)
        logger.write('{} {:8f} | '.format(k, v))
      if log_dict_val[opt.metric] < best:
        best = log_dict_val[opt.metric]
        save_model(os.path.join(opt.save_dir, 'model_best.pth'), 
                   epoch, model)
    else:
      save_model(os.path.join(opt.save_dir, 'model_last.pth'), 
                 epoch, model, optimizer)
    logger.write('\n')
    if epoch in opt.lr_step:
      save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)), 
                 epoch, model, optimizer)
      lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
      print('Drop LR to', lr)
      for param_group in optimizer.param_groups:
          param_group['lr'] = lr
  logger.close()
Exemplo n.º 9
0
      logger.scalar_summary('train_{}'.format(k), v, epoch)
      logger.write('{} {:8f} | '.format(k, v))
    if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
      save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)), 
                 epoch, model, optimizer)
      with torch.no_grad():
        log_dict_val, preds = trainer.val(epoch, val_loader)
      for k, v in log_dict_val.items():
        logger.scalar_summary('val_{}'.format(k), v, epoch)
        logger.write('{} {:8f} | '.format(k, v))
      if log_dict_val[opt.metric] < best:
        best = log_dict_val[opt.metric]
        save_model(os.path.join(opt.save_dir, 'model_best.pth'), 
                   epoch, model)
    else:
      save_model(os.path.join(opt.save_dir, 'model_last.pth'), 
                 epoch, model, optimizer)
    logger.write('\n')
    if epoch in opt.lr_step:
      save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)), 
                 epoch, model, optimizer)
      lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
      print('Drop LR to', lr)
      for param_group in optimizer.param_groups:
          param_group['lr'] = lr
  logger.close()

if __name__ == '__main__':
  opt = opts().parse()
  main(opt)