示例#1
0
 def __init__(self, devid, frameBuffer, results, camera_width,
              camera_height, number_of_ncs, vidfps):
     self.devid = devid
     self.frameBuffer = frameBuffer
     #self.model_xml = "./pretrained-models/frozen_yolo_v3.xml"
     #self.model_bin = "./pretrained-models/frozen_yolo_v3.bin"
     self.model_xml = "../public/yolo-v3-tf/FP16/yolo-v3-tf.xml"
     self.model_bin = "../public/yolo-v3-tf/FP16/yolo-v3-tf.bin"
     self.camera_width = camera_width
     self.camera_height = camera_height
     self.m_input_size = 416
     self.threshould = 0.7
     self.num_requests = 4
     self.inferred_request = [0] * self.num_requests
     self.heap_request = []
     self.inferred_cnt = 0
     self.iecore = IECore()
     self.net = self.iecore.read_network(model=self.model_xml,
                                         weights=self.model_bin)
     self.input_blob = next(iter(self.net.input_info))
     self.exec_net = self.iecore.load_network(
         network=self.net,
         device_name="MYRIAD",
         num_requests=self.num_requests)
     self.results = results
     self.number_of_ncs = number_of_ncs
     self.predict_async_time = 800
     self.skip_frame = 0
     self.roop_frame = 0
     self.vidfps = vidfps
     self.new_w = int(camera_width * min(self.m_input_size / camera_width,
                                         self.m_input_size / camera_height))
     self.new_h = int(camera_height *
                      min(self.m_input_size / camera_width,
                          self.m_input_size / camera_height))
示例#2
0
 def __init__(self, devid, device, model_xml, frameBuffer, results,
              camera_width, camera_height, number_of_ncs, vidfps, nPoints,
              w, h, new_w, new_h):
     self.devid = devid
     self.frameBuffer = frameBuffer
     self.model_xml = model_xml
     self.model_bin = os.path.splitext(model_xml)[0] + ".bin"
     self.camera_width = camera_width
     self.camera_height = camera_height
     self.threshold = 0.1
     self.nPoints = nPoints
     self.num_requests = 4
     self.inferred_request = [0] * self.num_requests
     self.heap_request = []
     self.inferred_cnt = 0
     self.plugin = IEPlugin(device=device)
     if "CPU" == device:
         if platform.processor() == "x86_64":
             self.plugin.add_cpu_extension("lib/libcpu_extension.so")
     self.net = IECore().read_network(model=self.model_xml,
                                      weights=self.model_bin)
     self.input_blob = next(iter(self.net.inputs))
     self.exec_net = IECore().load_network(network=self.net,
                                           device_name=device,
                                           num_requests=self.num_requests)
     self.results = results
     self.number_of_ncs = number_of_ncs
     self.predict_async_time = 250
     self.skip_frame = 0
     self.roop_frame = 0
     self.vidfps = vidfps
     self.w = w  #432
     self.h = h  #368
     self.new_w = new_w
     self.new_h = new_h
    def __init__(self):
        try:
            model_xml = "models/train/test/openvino/mobilenet_v2_0.5_224/FP32/frozen-model.xml"

            model_bin = os.path.splitext(model_xml)[0] + ".bin"
            net = IECore().read_network(model=model_xml, weights=model_bin)
            self.input_blob = next(iter(net.inputs))
            self.exec_net = IECore().load_network(network=net,
                                                  device_name="CPU")
            inputs = net.inputs["image"]

            self.h = inputs.shape[2]  #368
            self.w = inputs.shape[3]  #432

            logger.info("Loaded model")
        except:
            logger.error("Error loading model", exc_info=True)
示例#4
0
    parser.add_argument('--camera_height', type=int, default=480, help='USB Camera resolution (height). (Default=480)')
    parser.add_argument('--vidfps', type=int, default=30, help='FPS of Video. (Default=30)')
    parser.add_argument('--device', type=str, default='CPU', help='Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. \
                                                                   Sample will look for a suitable plugin for device specified (CPU by default)')
    args = parser.parse_args()
    deep_model    = args.deep_model
    usbcamno      = args.usbcamno
    camera_width  = args.camera_width
    camera_height = args.camera_height
    vidfps        = args.vidfps
    device        = args.device

    model_xml = deep_model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    ie = IECore()
    net = ie.read_network(model_xml, model_bin)
    input_info = net.input_info
    input_blob = next(iter(input_info))
    exec_net = ie.load_network(network=net, device_name=args.device)

    cam = cv2.VideoCapture(usbcamno)
    cam.set(cv2.CAP_PROP_FPS, vidfps)
    cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
    cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
    waittime = 1
    window_name = "USB Camera"

    cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)

    while True:
示例#5
0
class NcsWorker(object):
    def __init__(self, devid, frameBuffer, results, camera_width,
                 camera_height, number_of_ncs, vidfps):
        self.devid = devid
        self.frameBuffer = frameBuffer
        #self.model_xml = "./pretrained-models/frozen_yolo_v3.xml"
        #self.model_bin = "./pretrained-models/frozen_yolo_v3.bin"
        self.model_xml = "../public/yolo-v3-tf/FP16/yolo-v3-tf.xml"
        self.model_bin = "../public/yolo-v3-tf/FP16/yolo-v3-tf.bin"
        self.camera_width = camera_width
        self.camera_height = camera_height
        self.m_input_size = 416
        self.threshould = 0.7
        self.num_requests = 4
        self.inferred_request = [0] * self.num_requests
        self.heap_request = []
        self.inferred_cnt = 0
        self.iecore = IECore()
        self.net = self.iecore.read_network(model=self.model_xml,
                                            weights=self.model_bin)
        self.input_blob = next(iter(self.net.input_info))
        self.exec_net = self.iecore.load_network(
            network=self.net,
            device_name="MYRIAD",
            num_requests=self.num_requests)
        self.results = results
        self.number_of_ncs = number_of_ncs
        self.predict_async_time = 800
        self.skip_frame = 0
        self.roop_frame = 0
        self.vidfps = vidfps
        self.new_w = int(camera_width * min(self.m_input_size / camera_width,
                                            self.m_input_size / camera_height))
        self.new_h = int(camera_height *
                         min(self.m_input_size / camera_width,
                             self.m_input_size / camera_height))

    def image_preprocessing(self, color_image):
        resized_image = cv2.resize(color_image, (self.new_w, self.new_h),
                                   interpolation=cv2.INTER_CUBIC)
        canvas = np.full((self.m_input_size, self.m_input_size, 3), 128)
        canvas[(self.m_input_size - self.new_h) //
               2:(self.m_input_size - self.new_h) // 2 + self.new_h,
               (self.m_input_size - self.new_w) //
               2:(self.m_input_size - self.new_w) // 2 +
               self.new_w, :] = resized_image
        prepimg = canvas
        prepimg = prepimg[np.newaxis, :, :, :]  # Batch size axis add
        prepimg = prepimg.transpose((0, 3, 1, 2))  # NHWC to NCHW
        return prepimg

    def skip_frame_measurement(self):
        surplustime_per_second = (1000 - self.predict_async_time)
        if surplustime_per_second > 0.0:
            frame_per_millisecond = (1000 / self.vidfps)
            total_skip_frame = surplustime_per_second / frame_per_millisecond
            self.skip_frame = int(total_skip_frame / self.num_requests)
        else:
            self.skip_frame = 0

    def predict_async(self):
        try:

            if self.frameBuffer.empty():
                return

            self.roop_frame += 1
            if self.roop_frame <= self.skip_frame:
                self.frameBuffer.get()
                return
            self.roop_frame = 0

            prepimg = self.image_preprocessing(self.frameBuffer.get())
            reqnum = searchlist(self.inferred_request, 0)

            if reqnum > -1:
                self.exec_net.start_async(request_id=reqnum,
                                          inputs={self.input_blob: prepimg})
                self.inferred_request[reqnum] = 1
                self.inferred_cnt += 1
                if self.inferred_cnt == sys.maxsize:
                    self.inferred_request = [0] * self.num_requests
                    self.heap_request = []
                    self.inferred_cnt = 0
                heapq.heappush(self.heap_request, (self.inferred_cnt, reqnum))

            cnt, dev = heapq.heappop(self.heap_request)

            if self.exec_net.requests[dev].wait(0) == 0:
                self.exec_net.requests[dev].wait(-1)

                objects = []
                outputs = self.exec_net.requests[dev].outputs
                for output in outputs.values():
                    objects = ParseYOLOV3Output(output, self.new_h, self.new_w,
                                                self.camera_height,
                                                self.camera_width,
                                                self.threshould, objects)

                objlen = len(objects)
                for i in range(objlen):
                    if (objects[i].confidence == 0.0):
                        continue
                    for j in range(i + 1, objlen):
                        if (IntersectionOverUnion(objects[i], objects[j]) >=
                                0.4):
                            objects[j].confidence = 0

                self.results.put(objects)
                self.inferred_request[dev] = 0
            else:
                heapq.heappush(self.heap_request, (cnt, dev))
        except:
            import traceback
            traceback.print_exc()
示例#6
0
class NcsWorker(object):
    def __init__(self, devid, device, model_xml, frameBuffer, results,
                 camera_width, camera_height, number_of_ncs, vidfps, nPoints,
                 w, h, new_w, new_h):
        self.devid = devid
        self.frameBuffer = frameBuffer
        self.model_xml = model_xml
        self.model_bin = os.path.splitext(model_xml)[0] + ".bin"
        self.camera_width = camera_width
        self.camera_height = camera_height
        self.threshold = 0.1
        self.nPoints = nPoints
        self.num_requests = 4
        self.inferred_request = [0] * self.num_requests
        self.heap_request = []
        self.inferred_cnt = 0
        self.plugin = IEPlugin(device=device)
        if "CPU" == device:
            if platform.processor() == "x86_64":
                self.plugin.add_cpu_extension("lib/libcpu_extension.so")
        self.net = IECore().read_network(model=self.model_xml,
                                         weights=self.model_bin)
        self.input_blob = next(iter(self.net.inputs))
        self.exec_net = IECore().load_network(network=self.net,
                                              device_name=device,
                                              num_requests=self.num_requests)
        self.results = results
        self.number_of_ncs = number_of_ncs
        self.predict_async_time = 250
        self.skip_frame = 0
        self.roop_frame = 0
        self.vidfps = vidfps
        self.w = w  #432
        self.h = h  #368
        self.new_w = new_w
        self.new_h = new_h

    def skip_frame_measurement(self):
        surplustime_per_second = (1000 - self.predict_async_time)
        if surplustime_per_second > 0.0:
            frame_per_millisecond = (1000 / self.vidfps)
            total_skip_frame = surplustime_per_second / frame_per_millisecond
            self.skip_frame = int(total_skip_frame / self.num_requests)
        else:
            self.skip_frame = 0

    def predict_async(self):
        try:

            if self.frameBuffer.empty():
                return

            self.roop_frame += 1
            if self.roop_frame <= self.skip_frame:
                self.frameBuffer.get()
                return
            self.roop_frame = 0

            prepimg = self.frameBuffer.get()
            reqnum = searchlist(self.inferred_request, 0)

            if reqnum > -1:
                prepimg = prepimg[np.newaxis, :, :, :]  # Batch size axis add
                prepimg = prepimg.transpose(
                    (0, 3, 1, 2))  # NHWC to NCHW, (1, 3, 368, 432)
                self.exec_net.start_async(request_id=reqnum,
                                          inputs={self.input_blob: prepimg})
                self.inferred_request[reqnum] = 1
                self.inferred_cnt += 1
                if self.inferred_cnt == sys.maxsize:
                    self.inferred_request = [0] * self.num_requests
                    self.heap_request = []
                    self.inferred_cnt = 0
                heapq.heappush(self.heap_request, (self.inferred_cnt, reqnum))

            try:
                cnt, dev = heapq.heappop(self.heap_request)
            except:
                return

            if self.exec_net.requests[dev].wait(0) == 0:
                self.exec_net.requests[dev].wait(-1)

                detected_keypoints = []
                keypoints_list = np.zeros((0, 3))
                keypoint_id = 0

                outputs = self.exec_net.requests[dev].outputs[
                    "Openpose/concat_stage7"]
                for part in range(self.nPoints):
                    probMap = outputs[0, part, :, :]
                    probMap = cv2.resize(probMap,
                                         (self.w, self.h))  # (432, 368)
                    keypoints = getKeypoints(probMap, self.threshold)
                    keypoints_with_id = []

                    for i in range(len(keypoints)):
                        keypoints_with_id.append(keypoints[i] +
                                                 (keypoint_id, ))
                        keypoints_list = np.vstack(
                            [keypoints_list, keypoints[i]])
                        keypoint_id += 1

                    detected_keypoints.append(keypoints_with_id)

                self.results.put([detected_keypoints, outputs, keypoints_list])
                self.inferred_request[dev] = 0
            else:
                heapq.heappush(self.heap_request, (cnt, dev))
        except:
            import traceback
            traceback.print_exc()