def __init__(self, path_to_ckpt):
        self.reid = Reid()
        self.path_to_ckpt = path_to_ckpt
        #self.module = import_module('run')
        self.detection_graph = tf.Graph()
        with self.detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')

        self.default_graph = self.detection_graph.as_default()
        self.sess = tf.Session(graph=self.detection_graph)

        # Definite input and output Tensors for detection_graph
        self.image_tensor = self.detection_graph.get_tensor_by_name(
            'image_tensor:0')
        # Each box represents a part of the image where a particular object was detected.
        self.detection_boxes = self.detection_graph.get_tensor_by_name(
            'detection_boxes:0')
        # Each score represent how level of confidence for each of the objects.
        # Score is shown on the result image, together with the class label.
        self.detection_scores = self.detection_graph.get_tensor_by_name(
            'detection_scores:0')
        self.detection_classes = self.detection_graph.get_tensor_by_name(
            'detection_classes:0')
        self.num_detections = self.detection_graph.get_tensor_by_name(
            'num_detections:0')
示例#2
0
    def __init__(self, path_to_ckpt):
        self.reid = Reid()
        self.path_to_ckpt = path_to_ckpt
        self.detection_graph = tf.Graph()
        with self.detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')

        self.default_graph = self.detection_graph.as_default()
        self.sess = tf.Session(graph=self.detection_graph)
        self.image_tensor = self.detection_graph.get_tensor_by_name(
            'image_tensor:0')
        self.detection_boxes = self.detection_graph.get_tensor_by_name(
            'detection_boxes:0')
        self.detection_scores = self.detection_graph.get_tensor_by_name(
            'detection_scores:0')
        self.detection_classes = self.detection_graph.get_tensor_by_name(
            'detection_classes:0')
        self.num_detections = self.detection_graph.get_tensor_by_name(
            'num_detections:0')
class DetectorAPI:
    def __init__(self, path_to_ckpt):
        self.reid = Reid()
        self.path_to_ckpt = path_to_ckpt
        #self.module = import_module('run')
        self.detection_graph = tf.Graph()
        with self.detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')

        self.default_graph = self.detection_graph.as_default()
        self.sess = tf.Session(graph=self.detection_graph)

        # Definite input and output Tensors for detection_graph
        self.image_tensor = self.detection_graph.get_tensor_by_name(
            'image_tensor:0')
        # Each box represents a part of the image where a particular object was detected.
        self.detection_boxes = self.detection_graph.get_tensor_by_name(
            'detection_boxes:0')
        # Each score represent how level of confidence for each of the objects.
        # Score is shown on the result image, together with the class label.
        self.detection_scores = self.detection_graph.get_tensor_by_name(
            'detection_scores:0')
        self.detection_classes = self.detection_graph.get_tensor_by_name(
            'detection_classes:0')
        self.num_detections = self.detection_graph.get_tensor_by_name(
            'num_detections:0')

    def processFrame(self, image):
        # Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(image, axis=0)
        # Actual detection.
        start_time = time.time()
        (boxes, scores, classes,
         num) = self.sess.run([
             self.detection_boxes, self.detection_scores,
             self.detection_classes, self.num_detections
         ],
                              feed_dict={self.image_tensor: image_np_expanded})
        end_time = time.time()

        print("Elapsed Time:", end_time - start_time)

        im_height, im_width, _ = image.shape
        boxes_list = [None for i in range(boxes.shape[1])]
        for i in range(boxes.shape[1]):
            boxes_list[i] = (int(boxes[0, i, 0] * im_height),
                             int(boxes[0, i, 1] * im_width),
                             int(boxes[0, i, 2] * im_height),
                             int(boxes[0, i, 3] * im_width))

        return boxes_list, scores[0].tolist(), [
            int(x) for x in classes[0].tolist()
        ], int(num[0])

    def close(self):
        self.sess.close()
        self.default_graph.close()

    def find(self, img, box):
        cv2.imwrite('./temporaryImg.jpg', img)

        past_ppl = './past_ppl'
        folders = os.listdir(past_ppl)

        for folder in folders:
            files = os.listdir(past_ppl + '/' + folder)
            for f in files:
                ret = self.reid.compare('./temporaryImg.jpg',
                                        './past_ppl/' + folder + '/' + f)

                if (ret == True):
                    person_no = len(files) + 1
                    cv2.imwrite(
                        past_ppl + '/' + folder + '/' + str(person_no) +
                        '.jpg', img)
                    return

        l = len(folders)
        os.makedirs(past_ppl + '/' + str(l))
        cv2.imwrite(past_ppl + '/' + str(l) + '/1.jpg', img)
        return
                cv2.rectangle(image, obj[0], obj[1], (0, 255, 0), 2)
                cv2.putText(image, '{}: {:.2f}'.format(obj[3], obj[2]),
                            (obj[0][0], obj[0][1] - 5), cv2.FONT_HERSHEY_PLAIN,
                            1, (255, 0, 0), 1)

                box0.append(obj[0])
                box1.append(obj[1])

        return image, box0, box1


if __name__ == '__main__':
    detector = ObjectDetectorLite()
    source = 0
    # source = '/home/varat/myPERSON_RE_ID/dataset_videos/video2.mp4'
    reid = Reid()
    if len(sys.argv) > 1:
        source = sys.argv[1]

    cap = cv2.VideoCapture(source)
    # vs = webcamVideoStream(source).start()

    frame_count = 0
    tt_opencvDnn = 0

    while (True):
        t = time.time()
        hasFrame, frame = cap.read()
        # frame = vs.read()
        # if not hasFrame:
        #     break
示例#5
0
class DetectorAPI:
    def __init__(self, path_to_ckpt):
        self.reid = Reid()
        self.path_to_ckpt = path_to_ckpt
        self.detection_graph = tf.Graph()
        with self.detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')

        self.default_graph = self.detection_graph.as_default()
        self.sess = tf.Session(graph=self.detection_graph)
        self.image_tensor = self.detection_graph.get_tensor_by_name(
            'image_tensor:0')
        self.detection_boxes = self.detection_graph.get_tensor_by_name(
            'detection_boxes:0')
        self.detection_scores = self.detection_graph.get_tensor_by_name(
            'detection_scores:0')
        self.detection_classes = self.detection_graph.get_tensor_by_name(
            'detection_classes:0')
        self.num_detections = self.detection_graph.get_tensor_by_name(
            'num_detections:0')

    def process_frame(self, image):
        image_np_expanded = np.expand_dims(image, axis=0)
        (boxes, scores, classes,
         num) = self.sess.run([
             self.detection_boxes, self.detection_scores,
             self.detection_classes, self.num_detections
         ],
                              feed_dict={self.image_tensor: image_np_expanded})

        im_height, im_width, _ = image.shape
        boxes_list = [None for _ in range(boxes.shape[1])]
        for i in range(boxes.shape[1]):
            boxes_list[i] = (int(boxes[0, i, 0] * im_height),
                             int(boxes[0, i, 1] * im_width),
                             int(boxes[0, i, 2] * im_height),
                             int(boxes[0, i, 3] * im_width))
        return boxes_list, scores[0].tolist(), [
            int(x) for x in classes[0].tolist()
        ], int(num[0])

    def close(self):
        self.sess.close()
        self.default_graph.close()

    def find(self, img, boxes_cur, boxes_prev, box):
        cv2.imwrite('./temporaryImg.jpg', img)

        past_ppl = './past_ppl'
        folders = os.listdir(past_ppl)

        for folder in folders:
            files = os.listdir(past_ppl + '/' + folder)
            same = 0
            diff = 0
            num_of_files = len(files)
            for f in range(num_of_files):
                if f % 10 != 0:
                    continue
                ret = self.reid.compare(
                    './temporaryImg.jpg',
                    './past_ppl/' + folder + '/' + str(f + 1) + '.jpg')

                if ret:
                    same += 1
                else:
                    diff += 1

            p = 100 * float(same) / float(same + diff)
            if p > 70:
                person_no = len(files) + 1
                cv2.imwrite(
                    past_ppl + '/' + folder + '/' + str(person_no) + '.jpg',
                    img)
                boxes_cur[int(folder)][0] = box
                boxes_prev[int(folder)] = -1
                return folder

        person_no = len(folders)
        os.makedirs(past_ppl + '/' + str(person_no))
        cv2.imwrite(past_ppl + '/' + str(person_no) + '/1.jpg', img)
        boxes_cur.append([box])

        return person_no
class DetectorAPI:
    def __init__(self, path_to_ckpt):
        self.reid = Reid()
        self.path_to_ckpt = path_to_ckpt
        #self.module = import_module('run')
        self.detection_graph = tf.Graph()
        with self.detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')

        self.default_graph = self.detection_graph.as_default()
        self.sess = tf.Session(graph=self.detection_graph)

        # Definite input and output Tensors for detection_graph
        self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
        # Each box represents a part of the image where a particular object was detected.
        self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
        # Each score represent how level of confidence for each of the objects.
        # Score is shown on the result image, together with the class label.
        self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
        self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
        self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')

    def processFrame(self, image):
        # Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(image, axis=0)
        # Actual detection.
        start_time = time.time()
        (boxes, scores, classes, num) = self.sess.run(
            [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
            feed_dict={self.image_tensor: image_np_expanded})
        end_time = time.time()

        #print("Elapsed Time:", end_time-start_time)

        im_height, im_width,_ = image.shape
        boxes_list = [None for i in range(boxes.shape[1])]
        for i in range(boxes.shape[1]):
            boxes_list[i] = (int(boxes[0,i,0] * im_height),
                        int(boxes[0,i,1]*im_width),
                        int(boxes[0,i,2] * im_height),
                        int(boxes[0,i,3]*im_width))

        return boxes_list, scores[0].tolist(), [int(x) for x in classes[0].tolist()], int(num[0])

    def close(self):
        self.sess.close()
        self.default_graph.close()

    '''
Version0 serves as ground truth .It finds a match for the person and returns the matching folder number. If no match is found, it creates a new folder and returns the match for  that.
    '''
    def version0(self, img):
        cv2.imwrite('./temporaryImg_test0.jpg',img)

        past_ppl = './past_ppl_test_version0'
        folders = os.listdir(past_ppl)

        for folder in folders:
            files = os.listdir(past_ppl + '/' + folder)
            for f in files:
                ret = self.reid.compare('./temporaryImg_test0.jpg'  ,    './past_ppl_test_version0/' + folder + '/' + f)
                
                if(ret == True):
                    person_no = len(files) + 1
                    cv2.imwrite(past_ppl + '/' + folder + '/' + str(person_no) + '.jpg',img)      
                    return int(folder)
        
        l = len(folders)
        os.makedirs(past_ppl + '/' + str( l )  )
        cv2.imwrite(past_ppl + '/' + str( l ) + '/1.jpg',img)
        return l
     
    '''
Version0 serves as ground truth 
    '''
    def isSamePerson(self, img1, img2):
        cv2.imwrite('./temporaryImg_test0.jpg',img1)
        cv2.imwrite('./temporaryImg_test1.jpg',img2)
        ret = self.reid.compare('./temporaryImg_test0.jpg'  ,  './temporaryImg_test1.jpg')        
        return ret

    def find(self, img, boxes_cur, box):
        cv2.imwrite('./temporaryImg_test1.jpg',img)

        past_ppl = './past_ppl_test_version1'
        folders = os.listdir(past_ppl)

        for folder in folders:
            files = os.listdir(past_ppl + '/' + folder)
            for f in files:
                ret = self.reid.compare('./temporaryImg_test1.jpg'  ,    './past_ppl_test_version1/' + folder + '/' + f)
                
                #ret = run(past_ppl + '/' + folder + '/' + f , './temporaryImg.jpg')
                if(ret == True):
                    person_no = len(files) + 1
                    cv2.imwrite(past_ppl + '/' + folder + '/' + str(person_no) + '.jpg',img)   
                    boxes_cur[ int(folder) ] = box    
                    return int(folder)
        
        l = len(folders)
        os.makedirs(past_ppl + '/' + str( l )  )
        cv2.imwrite(past_ppl + '/' + str( l ) + '/1.jpg',img)
        boxes_cur.append( box )
        return -1
        
    '''
    This function is called when the tracking mechanism has tracked a person.
    This code tests if the track was correct or not
    '''    
    def tester(self, img_cur, img_prev):
        cv2.imwrite('./temporaryImg_cur.jpg',img_cur)
        cv2.imwrite('./temporaryImg_prev.jpg',img_prev)
        return self.reid.compare('./temporaryImg_cur.jpg'  , './temporaryImg_prev.jpg')