def inference():
    input_size = [int(v.strip()) for v in args.input_size.split(",")]
    priors = define_img_size(input_size)
    result_path = args.results_path
    imgs_path = args.imgs_path
    if not os.path.exists(result_path):
        os.makedirs(result_path)
    listdir = os.listdir(imgs_path)
    for file_path in tqdm(listdir):
        try:
            img_path = os.path.join(imgs_path, file_path)
            image_ori = cv2.imread(img_path)
            interpreter = MNN.Interpreter(args.model_path)
            session = interpreter.createSession()
            input_tensor = interpreter.getSessionInput(session)
            image = cv2.cvtColor(image_ori, cv2.COLOR_BGR2RGB)
            image = cv2.resize(image, tuple(input_size))
            image = image.astype(float)
            image = (image - image_mean) / image_std
            image = image.transpose((2, 0, 1))
            image = image.astype(np.float32)
            tmp_input = MNN.Tensor((1, 3, input_size[1], input_size[0]),
                                   MNN.Halide_Type_Float, image,
                                   MNN.Tensor_DimensionType_Caffe)
            input_tensor.copyFrom(tmp_input)
            time_time = time.time()
            interpreter.runSession(session)
            scores = interpreter.getSessionOutput(session, "scores").getData()
            boxes = interpreter.getSessionOutput(session, "boxes").getData()
            boxes = np.expand_dims(np.reshape(boxes, (-1, 4)), axis=0)
            scores = np.expand_dims(np.reshape(scores, (-1, 2)), axis=0)
            print("inference time: {} s".format(
                round(time.time() - time_time, 4)))
            boxes = box_utils.convert_locations_to_boxes(
                boxes, priors, center_variance, size_variance)
            boxes = box_utils.center_form_to_corner_form(boxes)
            boxes, labels, probs = predict(image_ori.shape[1],
                                           image_ori.shape[0], scores, boxes,
                                           args.threshold)
            if boxes.shape[0] > 0:
                for i in range(boxes.shape[0]):
                    box = boxes[i, :]
                    cv2.rectangle(image_ori, (box[0], box[1]),
                                  (box[2], box[3]), (0, 255, 0), 2)
                    label = labels[i]
                    result_path_label = os.path.join(result_path, str(label))
                    if not os.path.exists(result_path_label):
                        os.makedirs(result_path_label)
                    cv2.imwrite(os.path.join(result_path_label, file_path),
                                image_ori)
                # cv2.imwrite(os.path.join(result_path, file_path), image_ori)
                print("result_pic is written to {}".format(
                    os.path.join(result_path, file_path)))
                # cv2.imshow("UltraFace_mnn_py", image_ori)
                # cv2.waitKey(-1)
        except Exception as e:
            print(e)
コード例 #2
0
    image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB)
    #image = cv2.resize(image, (320, 240))
    image = cv2.resize(image, (640, 480))
    image_mean = np.array([127, 127, 127])
    image = (image - image_mean) / 128
    image = np.transpose(image, [2, 0, 1])
    image = np.expand_dims(image, axis=0)
    image = image.astype(np.float32)
    # confidences, boxes = predictor.run(image)
    time_time = time.time()
    confidences, boxes = ort_session.run(None, {input_name: image})

    ############
    boxes = box_utils.convert_locations_to_boxes(
        boxes, fd_config.priors, fd_config.center_variance,
        fd_config.size_variance
        #torch.from_numpy(boxes), fd_config.priors, fd_config.center_variance, fd_config.size_variance
    )
    boxes = box_utils.center_form_to_corner_form(boxes)
    ############

    print("cost time:{}".format(time.time() - time_time))
    boxes, labels, probs = predict(orig_image.shape[1], orig_image.shape[0],
                                   confidences, boxes, threshold)

    for i in range(boxes.shape[0]):
        box = boxes[i, :]
        label = f"{class_names[labels[i]]}: {probs[i]:.2f}"

        color = random.choice(colors)
        c1 = (box[0], box[1])
コード例 #3
0
for file_path in listdir:
    img_path = os.path.join(path, file_path)
    time_time = time.time()
    orig_image = cv2.imread(img_path)
    image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB)
    image = cv2.resize(image, (320, 240))
    # image = cv2.resize(image, (640, 480))
    image_mean = np.array([127, 127, 127])
    image = (image - image_mean) / 128
    image = np.transpose(image, [2, 0, 1])
    image = np.expand_dims(image, axis=0)
    image = image.astype(np.float32)
    confidences, locations = ort_session.run(None, {input_name: image})
    # print(locations)
    boxes = box_utils.convert_locations_to_boxes(locations,
                                                 config.priors.to('cpu'),
                                                 config.center_variance,
                                                 config.size_variance)
    boxes = box_utils.center_form_to_corner_form(boxes)

    boxes, labels, probs = predict(orig_image.shape[1], orig_image.shape[0],
                                   confidences, boxes, threshold)
    print("FPS:{}".format(1 / (time.time() - time_time)))
    for i in range(boxes.shape[0]):
        box = boxes[i, :]
        label = f"{class_names[labels[i]]}: {probs[i]:.2f}"

        cv2.rectangle(orig_image, (box[0], box[1]), (box[2], box[3]),
                      (255, 255, 0), 4)

        cv2.putText(
            orig_image,