def read_video():
    while True:
        ret, image = cap.read()
        if not ret:
            continue
        detector_ip.push(Inference(image.copy()))
        detector_op.wait()
        ret, inference = detector_op.pull()
        if ret:
            i_dets = inference.get_result()
            trk_ip.push(Inference(i_dets))
def read():
    while True:
        ret, image = cap.read()
        if not ret:
            continue

        image = image.copy()
        stock_image_in.push(image)
        # image = cv2.resize(image, (int(image.shape[1] / 2), int(image.shape[0] / 2)))
        detector_ip.push(Inference(image))
        detector_op.wait()
        ret, inference = detector_op.pull()
        if ret:
            i_dets = inference.get_result()
            trk_ip.push(Inference(i_dets))
예제 #3
0
def read_video():
    # start = time.time()
    while True:
        ret, image = cap.read()
        if not ret:
            continue
        detector_ip.push(Inference(image.copy()))
예제 #4
0
    def detect_age_gender(trackers):
        if not Tracker.__age_inference:
            print("initializing Age Model")
            Tracker.__age_inference = AgeApiRunner(SessionRunner())
        detector_ip = Tracker.__age_inference.get_detector_ip()
        detector_op = Tracker.__age_inference.get_detector_op()
        for i, trk in enumerate(trackers):
            trk_trail = trk.get_trail()
            person = trk_trail.get_person()
            if not trk.detect_age() or len(person.get_age_list()) >= 10 or len(person.get_gender_list()) >= 10 or trk.get_image() is None:
                continue
            # print("len of tracker", i, " " ,len(trk.get_patches()))
            detector_ip.push(Inference(trk.get_image().copy()))
            # ret, inference = detector_op.pull(True)

            while True:
                detector_op.wait()
                ret, inference = detector_op.pull(True)
                if ret:
                    # print(ret)
                    if inference.get_result().get_genders() is None or inference.get_result().get_ages() is None:
                        break
                    print(inference.get_result().get_genders())
                    print(inference.get_result().get_ages())
                    gender_confidence = inference.get_result().get_genders()[0][0]
                    gender = 'M' if gender_confidence < 0.5 else 'F'
                    age = int(inference.get_result().get_ages()[0])
                    # print(type())
                    trk.get_trail().get_person().add_age(age)
                    trk.get_trail().get_person().add_gender(gender, gender_confidence)
                    print("ages", trk.get_trail().get_person().get_age_list())
                    print("genders", trk.get_trail().get_person().get_gender_list())
                    break
예제 #5
0
def infer_yolo(timestamp, margin, points):
    while True:
        yolo_input.wait()
        ret, image = yolo_input.pull(flush=True)
        if not ret:
            continue

        # ret, flag = zone_image_update.pull()
        # if ret and flag:
        #     cv2.imwrite('../../../Angular-Dashboard-master/src/assets/rack_image.jpg', image)
        #     print("After Update")

        inference = Inference(image)
        img_shape = image.shape
        ret, point_set_dict = point_set.pull()
        if ret:
            point_set_dict["point_set_2"] = [
                [0,
                 0.6 * margin], [img_shape[1] - 4.25 * margin, 0.6 * margin],
                [img_shape[1] - 4.25 * margin, img_shape[0] - 0.6 * margin],
                [0, img_shape[0] - 0.6 * margin]
            ]
            print("updated points")
            points = point_set_dict['point_set_2']
            retail_an_object.rack_dict = point_set_dict
            retail_an_object.global_init()

        inference.get_meta_dict()['warp_points'] = retail_an_object.rack_dict
        yolo_ip.push(inference)
        yolo_op.wait()
        ret, inference = yolo_op.pull()
        ret1, zones = zone_detection_in_pipe.pull()
        if ret:
            i_dets = inference.get_result()
            boxes = tracker_stock.update(i_dets)
            image = i_dets.get_annotated(boxes)
            if ret1 and not zones:
                image = retail_an_object.print_shelfNo(image)
                image = retail_an_object.misplacedBoxes(boxes, image)
                image = retail_an_object.draw_empty_space(boxes, image)
                flag = retail_an_object.change_of_state()
                # cv2.rectangle(image, (margin, margin), (img_shape[1]-margin, img_shape[0]-margin), (0, 0, 255), 3)

                current_time = time.time()
                elapsed_seconds = (current_time - timestamp)
                # print(elapsed_seconds)
                if (elapsed_seconds > 3):
                    if flag == 1:
                        # pass
                        Thread(target=retail_an_object.postdata).start()
                        timestamp = time.time()

            # print("left corner: ",points[0][1])
            image = image[int(points[0][1] - margin / 2):int(points[2][1] +
                                                             margin / 2),
                          int(points[0][0]):int(points[2][0] + margin / 2), :]
            stock_in_pipe.push(image)
            cv2.imshow("retail_out", image)
            cv2.waitKey(1)
예제 #6
0
 def start_cam():
     while True:
         ret, image = cap.read()
         if not ret:
             continue
         inference = Inference(image.copy(), return_pipe=ret_pipe)
         detector.get_in_pipe().push_wait()
         detector.get_in_pipe().push(inference)
예제 #7
0
 def readvideo():
     while True:
         re, image = cap0.read()
         if re:
             ip0.push(Inference(image.copy()))
         else:
             break
         time.sleep(0.025)
def extract_features(patch, ip, op):
    patch[0] = cv2.equalizeHist(patch[0])
    patch[1] = cv2.equalizeHist(patch[1])
    patch[2] = cv2.equalizeHist(patch[2])
    ip.push(Inference(patch, meta_dict={}))
    op.wait()
    ret, feature_inference = op.pull()
    if ret:
        return feature_inference.get_result()
def load():
    # print("LOADED")
    while True:
        ret, image = cap.read()
        print(ret)
        if not ret:
            continue
        detector_ip.push(Inference(image.copy()))
        # print("#################################")
        sleep(0.05)
예제 #10
0
def read():
    while True:
        ret, image = cap.read()
        if not ret:
            continue

        image = image.copy()
        yolo_input.push(image)
        # print("Image read count: ", count)
        # count+=1
        # image = cv2.resize(image, (int(image.shape[1] / 2), int(image.shape[0] / 2)))
        detector_ip.push(Inference(image))
        detector_op.wait()
        ret, inference = detector_op.pull()
        if ret:
            i_dets = inference.get_result()
            infer_idets = Inference(i_dets)
            infer_idets.get_meta_dict()['zone_pipe'] = zone_pipe
            trk_ip.push(infer_idets)
예제 #11
0
def read():

    while True:
        ret, image = cap.read()
        # image = cv2.resize(image, (int(image.shape[1]/2), int(image.shape[0]/2)))
        # if count == 100:
        #     detector_ip.close()
        # print("breaking...")
        # trk_ip.close()
        # break
        if not ret:
            continue
        detector_ip.push(Inference(image.copy()))
        # print('waiting')
        detector_op.pull_wait()
        # print('done')
        ret, inference = detector_op.pull()
        if ret:
            i_dets = inference.get_result()
            trk_ip.push(Inference(i_dets))
예제 #12
0
def read():
    lst = []
    skip = 2
    count = 0
    while True:
        count += 1
        ret, image = cap.read()
        if not ret or count % skip != 0:
            continue

        detector_ip.push_wait()
        inference = Inference(image)
        detector_ip.push(inference)
예제 #13
0
    image = cv2.resize(image, tuple(image_shape[:2][::-1]))
    image = np.expand_dims(image, axis=0)

    # K.set_session(session)

    extractor = ResNet50ExtractorAPI('rn50_api', True)
    ip = extractor.get_in_pipe()
    # op = extractor.get_out_pipe()
    extractor.use_session_runner(session_runner)

    session_runner.start()
    extractor.run()

    ret_pipe = Pipe()

    # for i in range(1000):
    i = 0
    while True:
        ret, image = cap.read()
        if not ret:
            continue
        ip.push(Inference(image, ret_pipe, {}))

        ret, feature_inference = ret_pipe.pull()
        if ret:
            print(feature_inference.get_result().shape)
        else:
            ret_pipe.wait()

    session_runner.stop()
detection = TFObjectDetectionAPI(
    PRETRAINED_faster_rcnn_inception_v2_coco_2018_01_28, image.shape, 'tf_api',
    True)
detector_ip = detection.get_in_pipe()
detector_op = detection.get_out_pipe()
detection.use_session_runner(session_runner)
detection.use_threading()
session_runner.start()
detection.run()

frame_no = 0
while True:
    ret, image = cap.read()
    if not ret:
        continue
    detector_ip.push(Inference(image.copy()))
    detector_op.wait()
    ret, inference = detector_op.pull(True)
    if ret:
        i_dets = inference.get_result()
        # print(i_dets.get_masks()[0].shape)
        frame = i_dets.get_annotated()
        cv2.imshow("annotated", i_dets.get_annotated())
        # cv2.imshow("annotated", i_dets.extract_patch(0))
        cv2.waitKey(1)
        # person = i_dets.get_category('person')
        # for i in range(i_dets.get_length()):
        #     if i_dets.get_classes(i) == 1 and i_dets.get_scores(i) > 0.7:
        #         cv2.imwrite("/home/uniquetrij/PycharmProjects/SecureIt/data/obj_tracking/outputs/patches/" + (
        #             str(frame_no).zfill(5)) + (str(i).zfill(2)) + ".jpg", i_dets.extract_patches(i))
        print(frame_no)
 def __job(self, inference):
     self.__enc_in_pipe.push(
         Inference(inference.get_data(),
                   meta_dict={'inference': inference},
                   return_pipe=self.__out_pipe))