print("press 'q' to finish the data collecting")

feature_vectors = []

while (cap.isOpened()):
    ret, frame = cap.read()
    if (not ret):
        break

    yolo.set_image(frame)
    yolo.start()
    cv2.imshow('k-means training processing', yolo.img_detection)
    for i in range(len(yolo.final_objects)):
        if (yolo.final_classes_names[i] == class_name):
            feature_vectors.append(
                nhog.HOG(im.fromarray(yolo.final_objects[i])).vector)

    if (cv2.waitKey(1) & 0xFF == ord('q')):
        break

cap.release()
cv2.destroyAllWindows()

if (len(feature_vectors) == 0):
    print(
        "Nothing you want detected! Pleas adjust your camera or the object position!"
    )
else:

    samples = []
    for vector in feature_vectors:
    def start(self):
        is_on = False
        cap = cv2.VideoCapture(0)
        ret, last_frame = cap.read()
        # set up model object
        yolov2 = YOLONetv2()
        yolo = YOLONet()

        while(cap.isOpened()):
            ret, frame = cap.read()
            if (not ret):
                break
            
            mean_difference = self.diffimage(last_frame, frame)
            if(mean_difference > self.key_frame_threshold and is_on):
                detect_time = self.detected_time()
                yolo.set_image(frame)
                yolov2.set_image(frame)
                yolo.detect_from_image()
                yolov2.start()
                cv2.imshow('yolov2', yolov2.img_detection)
                cv2.imshow('yolov1', yolo.final_with_bounding_box)
               
                if(not self.last_key_flag):
                    self.last_key_flag = True
                    last_key_features = []
                    
                    for detected_object in yolov2.final_objects:
                        try:
                            last_key_features.append(nhog.HOG(im.fromarray(detected_object)).vector)
                        except ValueError:
                            # undo everthing
                            last_key_features = []
                            self.last_key_flag = False
                            break
                else:
                    curr_features = []
                    for frame_object in yolov2.final_objects:
                        try:
                            curr_features.append(nhog.HOG(im.fromarray(frame_object)).vector)  
                        except ValueError:
                            curr_features = []
                            self.last_key_flag = False
                            break
                    compare_times = min(len(last_key_features), len(curr_features))
                    similarities = []
                    for i in range(compare_times):
                        similarities.append(nhog.calc_similarity(last_key_features[i], curr_features[i]))
                        #print("similarity of object %d is %f" % (i, similarities[i]))
                        last_key_features = curr_features
                
            else:
                self.last_key_flag = False
                cv2.imshow('yolov2', frame)
                cv2.imshow('yolov1', frame)

            last_frame = frame
            if (cv2.waitKey(1) &0xFF == ord('q')):
                print("pressed q")
                break
            elif(cv2.waitKey(1) &0xFF == ord('s')):
                print("pressed s")
                is_on = not is_on

        cap.release()
        cv2.destroyAllWindows()
Exemple #3
0
 def show_frame_rd(self):
     _, last_frame = self.cap.read()
     is_last_keyframe = False
     last_key_features = []
     while (self.is_switch_on):
         if (self.is_video_on_3):
             try:
                 _, frame = self.cap.read()
             except BaseException:
                 continue
             mean_difference = self.diffimage(last_frame, frame)
             if (self.is_detection_on_3
                     and mean_difference > self.key_frame_threshold):
                 self.yolo_v3.set_image(frame)
                 self.yolo_v3.start()
                 detected_time = self.detected_time()
                 # compare the objects with those in the last key frame
                 if (not is_last_keyframe):
                     is_last_keyframe = True
                     num_objects = len(self.yolo_v3.final_objects)
                     for i in range(
                             num_objects):  # self.yolo_v3.final_objects:
                         try:
                             last_key_features.append(
                                 nhog.HOG(
                                     im.fromarray(self.yolo_v3.
                                                  final_objects[i])).vector)
                             self.db.insert(detected_time, self.yolo_v3.final_classes_names[i], \
                                 'yolo v3', self.yolo_v3.final_relative_positions[i][0], self.yolo_v3.final_relative_positions[i][1],\
                                     self.yolo_v3.final_relative_positions[i][2], self.yolo_v3.final_relative_positions[i][3],\
                                         nhog.HOG(im.fromarray(self.yolo_v3.final_objects[i])).vector)
                             self.message_listbox.insert(
                                 'end',
                                 "[%s]yolo v3 store an object tuple into the database"
                                 % detected_time)
                         except ValueError:
                             # undo everthing
                             last_key_features = []
                             is_last_keyframe = False
                             break
                 else:
                     curr_features = []
                     for frame_object in self.yolo_v3.final_objects:
                         try:
                             curr_features.append(
                                 nhog.HOG(
                                     im.fromarray(frame_object)).vector)
                         except ValueError:
                             curr_features = []
                             is_last_keyframe = False
                             break
                     compare_times = min(len(last_key_features),
                                         len(curr_features))
                     similarities = []
                     for i in range(compare_times):
                         similarities.append(
                             nhog.calc_similarity(last_key_features[i],
                                                  curr_features[i]))
                         # print("similarity of object %d is %f" % (i, similarities[i]))
                         self.message_listbox.insert(
                             'end',
                             "[%s]yolo v3 detected %s %d, similarity is %f"
                             % (detected_time,
                                self.yolo_v3.final_classes_names[i], i,
                                similarities[i]))
                         if (similarities[i] < 0.5):
                             self.db.insert(detected_time, self.yolo_v3.final_classes_names[i], \
                                 'yolo v3', self.yolo_v3.final_relative_positions[i][0], self.yolo_v3.final_relative_positions[i][1],\
                                     self.yolo_v3.final_relative_positions[i][2], self.yolo_v3.final_relative_positions[i][3],\
                                         nhog.HOG(im.fromarray(self.yolo_v3.final_objects[i])).vector)
                             self.message_listbox.insert(
                                 'end',
                                 "[%s]yolo v3 store an object tuple into the database"
                                 % detected_time)
                     last_key_features = copy.deepcopy(curr_features)
                     cv2image = cv2.cvtColor(self.yolo_v3.img_detection,
                                             cv2.COLOR_BGR2RGBA)
             else:
                 cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
                 is_last_keyframe = False
                 last_key_features = []
             img = Image.fromarray(cv2image)
             imgtk = ImageTk.PhotoImage(image=img)
             self.video_rd.imgtk = imgtk
             self.video_rd.configure(image=imgtk)
             last_frame = frame