Пример #1
0
 def use_session_runner(self, session_runner):
     self.__session_runner = session_runner
     self.__encoder = MarsExtractorAPI(flush_pipe_on_read=True)
     self.__encoder.use_session_runner(session_runner)
     self.__enc_in_pipe = self.__encoder.get_in_pipe()
     self.__enc_out_pipe = self.__encoder.get_out_pipe()
     self.__encoder.run()
 def use_session_runner(self, session_runner):
     self.__session_runner = session_runner
     # self.__encoder = ResNet50ExtractorAPI("", True)
     self.__encoder = MarsExtractorAPI(flush_pipe_on_read=True)
     self.__encoder.use_session_runner(session_runner)
     self.__image_shape = self.__encoder.get_input_shape()
     self.__enc_in_pipe = self.__encoder.get_in_pipe()
     self.__enc_out_pipe = self.__encoder.get_out_pipe()
     self.__encoder.run()
def test():
    model = SiameseComparator()()
    model.load_weights(model_path.get() + '/siamese-mars-small128.h5')
    model.summary()
    feature_vector = FeatureVector()
    session_runner = SessionRunner()
    extractor = MarsExtractorAPI('mars_api', True)
    ip = extractor.get_in_pipe()
    op = extractor.get_out_pipe()
    extractor.use_session_runner(session_runner)
    session_runner.start()
    extractor.run()
    image_files = []
    for id in range(1, 5):
        image_files.append(
            glob.glob(input_path.get() + '/patches/{}/*.jpg'.format(id)))
    print(len(image_files))
    patch0 = [
        cv2.imread(image_files[0][randint(0, len(image_files[0]))])
        for _ in range(10)
    ]
    # patch0_1 = [cv2.imread(image_files[0][randint(0, len(image_files[0]))]) for _ in range(10)]
    patch1 = [
        cv2.imread(image_files[1][randint(0, len(image_files[1]))])
        for _ in range(10)
    ]
    patch2 = [
        cv2.imread(image_files[2][randint(0, len(image_files[2]))])
        for _ in range(10)
    ]
    patch3 = [
        cv2.imread(image_files[3][randint(0, len(image_files[3]))])
        for _ in range(10)
    ]
    #patch_pair = [_ for _ in itertools.combinations_with_replacement([patch0[0], patch1[0], patch2[0], patch3[0]], 2)]

    f_vec0 = np.array([extract_features(patch, ip, op)[0] for patch in patch0])
    # f_vec0_1 = np.array(extract_features(patch0_1, ip, op))
    f_vec1 = np.array([extract_features(patch, ip, op)[0] for patch in patch1])
    f_vec2 = np.array([extract_features(patch, ip, op)[0] for patch in patch2])
    f_vec3 = np.array([extract_features(patch, ip, op)[0] for patch in patch3])
    #print(f_vec1)

    output = model.predict(
        [np.expand_dims(f_vec1, 0),
         np.expand_dims(f_vec3, 0)])
    print(output)
def train():
    feature_vector = FeatureVector()
    session_runner = SessionRunner()
    extractor = MarsExtractorAPI('mars_api', True)
    ip = extractor.get_in_pipe()
    op = extractor.get_out_pipe()
    extractor.use_session_runner(session_runner)
    session_runner.start()
    extractor.run()

    for id in range(1, 5):
        image_files = glob.glob(
            '/home/allahbaksh/Tailgating_detection/SecureIt/data/obj_tracking/outputs/patches/{}/*.jpg'
            .format(id))
        for image_file in image_files:
            patch = cv2.imread(image_file)
            f_vec = extract_features(patch, ip, op)
            # print(f_vec.shape)
            # print(f_vec[])
            # break
            feature_vector.add_vector(id, f_vec[0])

    # for x in range(200):
    #     feature_vector.add_vector(randint(0, 30), [randint(0, 128) for _ in range(128)])
    samples = create_samples(feature_vector.get_vector_dict())
    print(count_0)
    print(count_1)
    # print(feature_vector.get_vector_dict())
    model = SiameseComparator()()
    sklearn.utils.shuffle(samples)
    # print()
    # print(samples[1])
    # print(len(samples))
    train_samples, val_samples = train_test_split(samples, test_size=0.2)

    train_generator = generator(train_samples, batch_size=16)
    validation_generator = generator(val_samples, batch_size=16)
    epoch = 10
    saved_weights_name = 'model.h5'
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0.001,
                               patience=3,
                               mode='min',
                               verbose=1)
    checkpoint = ModelCheckpoint(saved_weights_name,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min',
                                 period=1)
    tensorboard = TensorBoard(log_dir=os.path.expanduser('~/logs/'),
                              histogram_freq=0,
                              write_graph=True,
                              write_images=False)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['mae', 'acc'])
    history = model.fit_generator(
        generator=train_generator,
        steps_per_epoch=len(train_samples),
        epochs=epoch,
        verbose=1,
        validation_data=validation_generator,
        nb_val_samples=len(val_samples),
        callbacks=[early_stop, checkpoint, tensorboard])
class OFISTObjectTrackingAPI:
    def __init__(self, max_age=10000, min_hits=5, flush_pipe_on_read=False):
        self.max_age = max_age
        self.min_hits = min_hits
        self.trackers = []
        self.frame_count = 0
        self.__bg_frame = None
        self.__bg_gray = None

        self.__flush_pipe_on_read = flush_pipe_on_read

        self.__feature_dim = (2048)
        self.__image_shape = (224, 224, 3)

        self.__thread = None
        self.__in_pipe = Pipe(self.__in_pipe_process)
        self.__out_pipe = Pipe(self.__out_pipe_process)

    number = 0

    def __extract_image_patch(self, image, bbox, patch_shape):

        sx, sy, ex, ey = np.array(bbox).astype(np.int)

        # dx = ex-sx
        # dx = int(.25*dx)

        # dy = ey-sy
        # dy = int(.6*dy)

        dx = 0
        dy = 0

        image = image[sy:ey - dy, sx + dx:ex - dx]
        image = cv2.resize(image, tuple(patch_shape[::-1]))

        # img_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
        # img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0])
        # image = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)

        image[0] = cv2.equalizeHist(image[0])
        image[1] = cv2.equalizeHist(image[1])
        image[2] = cv2.equalizeHist(image[2])

        return image

    def __in_pipe_process(self, inference):
        i_dets = inference.get_input()
        frame = i_dets.get_image()
        classes = i_dets.get_classes()
        boxes = i_dets.get_boxes_tlbr(normalized=False)
        masks = i_dets.get_masks()
        bboxes = []

        scores = i_dets.get_scores()
        for i in range(len(classes)):
            if classes[i] == i_dets.get_category('person') and scores[i] > .75:
                bboxes.append(
                    [boxes[i][1], boxes[i][0], boxes[i][3], boxes[i][2]])
        patches = []

        # if self.__bg_frame is None:
        #     self.__bg_frame = frame
        #     self.__bg_gray = cv2.cvtColor(self.__bg_frame, cv2.COLOR_BGR2GRAY)
        #     self.__bg_gray = cv2.GaussianBlur(self.__bg_gray, (5, 5), 0)
        #
        # gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # gray_frame = cv2.GaussianBlur(gray_frame, (5, 5), 0)
        # difference = cv2.absdiff(self.__bg_gray, gray_frame)
        # ret, mask = cv2.threshold(difference, 50, 255, cv2.THRESH_BINARY)
        # mask = np.stack((mask, mask, mask), axis=2)
        #
        # image = np.multiply(frame, mask)
        #
        # inference.get_meta_dict()['mask'] = mask
        # inference.get_meta_dict()['diff_img']=image

        image = frame
        # blur = cv2.GaussianBlur(image, (5, 5), 0)
        # image = cv2.addWeighted(image, 1.5, image, -0.5, 0)
        # image = cv2.cvtColor(frame, cv2.COLOR_BGR2HLS)
        for i in range(len(bboxes)):
            box = bboxes[i]
            # mask = masks[i]
            # mask = np.stack((mask, mask, mask), axis=2)
            # image = np.multiply(frame, mask)
            patch = self.__extract_image_patch(image, box,
                                               self.__image_shape[:2])
            if patch is None:
                print("WARNING: Failed to extract image patch: %s." % str(box))
                patch = np.random.uniform(0., 255.,
                                          self.__image_shape).astype(np.uint8)
            patches.append(patch)

        inference.set_data(patches)
        inference.get_meta_dict()['bboxes'] = bboxes
        return inference

    def __out_pipe_process(self, inference):
        f_vecs = inference.get_result()

        # print(f_vecs.shape)
        inference = inference.get_meta_dict()['inference']
        bboxes = inference.get_meta_dict()['bboxes']
        self.frame_count = min(self.frame_count + 1, 1000)

        matched, unmatched_dets, unmatched_trks = KNNTracker.associate_detections_to_trackers(
            f_vecs, self.trackers, bboxes, distance_threshold=0.575)
        if bboxes:

            # # update matched trackers with assigned detections
            # for t, trk in enumerate(self.trackers):
            #     if (t not in unmatched_trks):
            #         d = matched[np.where(matched[:, 1] == t)[0], 0][0]
            #         trk.update(bboxes[d], f_vecs[d])  ## for dlib re-intialize the trackers ?!

            # update matched trackers with assigned detections
            for trk in self.trackers:
                if (trk.get_id() not in unmatched_trks):
                    d = matched[np.where(matched[:, 1] == trk.get_id())[0],
                                0][0]
                    trk.update(
                        bboxes[d],
                        f_vecs[d])  ## for dlib re-intialize the trackers ?!

            # create and initialise new trackers for unmatched detections
            for i in unmatched_dets:
                trk = KNNTracker(bboxes[i], f_vecs[i])
                self.trackers.append(trk)

        i = len(self.trackers)
        ret = []
        for trk in reversed(self.trackers):
            d = trk.get_bbox()
            if (trk.get_hit_streak() >=
                    self.min_hits):  # or self.frame_count <= self.min_hits):
                ret.append(
                    np.concatenate(
                        ([int(i) for i in d], [trk.get_id()])).reshape(
                            1, -1))  # +1 as MOT benchmark requires positive
            i -= 1
            # remove dead tracklet
            if (trk.get_time_since_update() > self.max_age):
                self.trackers.pop(i)

        if (len(ret) > 0):
            inference.set_result(np.concatenate(ret))
        else:
            inference.set_result(np.empty((0, 5)))
        return inference

    def get_in_pipe(self):
        return self.__in_pipe

    def get_out_pipe(self):
        return self.__out_pipe

    def use_session_runner(self, session_runner):
        self.__session_runner = session_runner
        # self.__encoder = ResNet50ExtractorAPI("", True)
        self.__encoder = MarsExtractorAPI(flush_pipe_on_read=True)
        self.__encoder.use_session_runner(session_runner)
        self.__enc_in_pipe = self.__encoder.get_in_pipe()
        self.__enc_out_pipe = self.__encoder.get_out_pipe()
        self.__encoder.run()

    def run(self):
        if self.__thread is None:
            self.__thread = Thread(target=self.__run)
            self.__thread.start()

    def __run(self):
        while self.__thread:

            if self.__in_pipe.is_closed():
                self.__out_pipe.close()
                return

            ret, inference = self.__in_pipe.pull(self.__flush_pipe_on_read)
            if ret:
                self.__job(inference)
            else:
                self.__in_pipe.wait()

    def __job(self, inference):
        self.__enc_in_pipe.push(
            Inference(inference.get_data(),
                      meta_dict={'inference': inference},
                      return_pipe=self.__out_pipe))
Пример #6
0
if __name__ == '__main__':
    session_runner = SessionRunner()
    while True:
        ret, image = cap.read()
        if ret:
            break
    image_shape = (224, 224, 3)

    session = tf.Session()

    image = cv2.resize(image, tuple(image_shape[:2][::-1]))
    image = np.expand_dims(image, axis=0)

    # K.set_session(session)

    extractor = MarsExtractorAPI('mars_api', True)
    ip = extractor.get_in_pipe()
    # op = extractor.get_out_pipe()
    extractor.use_session_runner(session_runner)

    session_runner.start()
    extractor.run()

    ret_pipe = Pipe()

    # for i in range(1000):

    i = 0
    while True:
        ret, image = cap.read()
        if not ret:
class OFISTObjectTrackingAPI:

    def __init__(self, max_age=10000, min_hits=5, flush_pipe_on_read=False, use_detection_mask=False, conf_path=None):
        self.max_age = max_age
        self.min_hits = min_hits
        self.trackers = []
        self.frame_count = 0
        self.__bg_frame = None
        self.__bg_gray = None
        self.__conf_path = conf_path
        self.__flush_pipe_on_read = flush_pipe_on_read

        # self.__feature_dim = (128)
        # self.__image_shape = (128, 64, 3)

        self.__thread = None
        self.__in_pipe = Pipe(self.__in_pipe_process)
        self.__out_pipe = Pipe(self.__out_pipe_process)

        self.__use_detection_mask = use_detection_mask
        self.__zones = None
        if self.__conf_path is not None:
            self.__zones = Zone.create_zones_from_conf(self.__conf_path)

    number = 0

    def __extract_image_patch(self, image, bbox, patch_shape):

        sx, sy, ex, ey = np.array(bbox).astype(np.int)

        dx = ex - sx
        dy = ey - sy

        mx = int((sx + ex) / 2)
        my = int((sy + ey) / 2)

        dx = int(min(40, dx / 2))
        dy = int(min(50, dy / 2))
        # image = image[sy:my + dy, mx - dx:mx + dx]
        # image = ImageEnhancer.gaussian_blurr(image, sigma=1.75)
        # image = ImageEnhancer.lab_enhancement(image, l=0.75)
        # image = ImageEnhancer.hsv_enhancement(image, s=10, v=5)
        # image = ImageEnhancer.hls_enhancement(image, l=2)
        # image = ImageEnhancer.lab_enhancement(image, l=1.25)
        # image = ImageEnhancer.gamma_correction(image, gamma=3)
        # image = ImageEnhancer.gaussian_blurr(image, sigma=1.1)

        # dx = int(min(60, dx / 2))
        # dy = int(min(90, dy / 2))
        image = image[sy:my + dy, mx - dx:mx + dx]
        image = ImageEnhancer.gaussian_blurr(image, sigma=1.75)
        image = ImageEnhancer.lab_enhancement(image, l=0.125)
        image = ImageEnhancer.hsv_enhancement(image, s=5, v=5)
        image = ImageEnhancer.hls_enhancement(image, l=2)
        # image = ImageEnhancer.lab_enhancement(image, l=1)
        # image = ImageEnhancer.gamma_correction(image, gamma=3)
        image = ImageEnhancer.gaussian_blurr(image, sigma=1.25)

        # image = image[sy:ey, sx:ex]


        # image = ImageEnhancer.gaussian_blurr(image, sigma=2)
        # image = ImageEnhancer.lab_enhancement(image, l=0.75)
        # image = ImageEnhancer.hsv_enhancement(image, s=3, v=2)
        # image = ImageEnhancer.lab_enhancement(image, l=1.25)
        # image = ImageEnhancer.gamma_correction(image, gamma=3)

        # image = ImageEnhancer.preprocess_retinex(image)

        image = cv2.resize(image, tuple(patch_shape[::-1]))

        return image

    def __in_pipe_process(self, inference):
        i_dets = inference.get_input()
        frame = i_dets.get_image()
        classes = i_dets.get_classes()
        boxes = i_dets.get_boxes_tlbr(normalized=False)
        masks = i_dets.get_masks()
        bboxes = []

        scores = i_dets.get_scores()
        for i in range(len(classes)):
            if classes[i] == i_dets.get_category('person') and scores[i] > .95:
                bboxes.append([boxes[i][1], boxes[i][0], boxes[i][3], boxes[i][2]])
        patches = [0 for x in bboxes]
        # flips = [0 for x in bboxes]
        threads = []

        for i in range(len(bboxes)):
            box = bboxes[i]
            if self.__use_detection_mask:
                mask = masks[i]
                mask = np.stack((mask, mask, mask), axis=2)
                image = np.multiply(frame, mask)
            else:
                image = frame

            def exec(patches, index):
                index = i
                patch = self.__extract_image_patch(image, box, self.__image_shape[:2])
                if patch is None:
                    print("WARNING: Failed to extract image patch: %s." % str(box))
                    patch = np.random.uniform(0., 255., self.__image_shape).astype(np.uint8)
                if bool(random.getrandbits(1)):
                    patches[index] = patch
                else:
                    patches[index] = cv2.flip(patch,1)

            threads.append(Thread(target=exec, args=(patches,i, )))
            threads[-1].start()

        for thread in threads:
            thread.join()

        inference.set_data(patches)
        inference.get_meta_dict()['bboxes'] = bboxes
        return inference

    def __out_pipe_process(self, inference):
        f_vecs = inference.get_result()
        inference = inference.get_meta_dict()['inference']
        bboxes = inference.get_meta_dict()['bboxes']
        patches = inference.get_data()
        self.frame_count += 1

        matched, unmatched_dets, unmatched_trks = Tracker.associate_detections_to_trackers(f_vecs, self.trackers,
                                                                                           bboxes)

        if bboxes:
            for trk in self.trackers:
                if (trk.get_id() not in unmatched_trks):
                    d = matched[np.where(matched[:, 1] == trk.get_id())[0], 0][0]
                    trk.update(bboxes[d], f_vecs[d], patches[d])

            for i in unmatched_dets:
                trk = Tracker(bboxes[i], f_vecs[i], patches[i], self.frame_count, zones=self.__zones)
                self.trackers.append(trk)

        i = len(self.trackers)
        ret = []
        trails = {}
        for trk in reversed(self.trackers):
            if (trk.get_hit_streak() >= self.min_hits):  # or self.frame_count <= self.min_hits):
                ret.append(trk)
            i -= 1

            if (trk.get_time_since_update() > self.max_age):
                self.trackers.pop(i)
            if self.frame_count - trk.get_creation_time() >= 30 and not trk.is_confident():
                self.trackers.pop(i)
            trails[trk.get_id()] = trk.get_trail()
            #
        inference.get_meta_dict()['trails'] = trails

        if (len(ret) > 0):
            inference.set_result(ret)
        else:
            inference.set_result(np.empty((0, 5)))
        return inference

    def get_in_pipe(self):
        return self.__in_pipe

    def get_out_pipe(self):
        return self.__out_pipe

    def get_zones(self):
        return self.__zones

    def use_session_runner(self, session_runner):
        self.__session_runner = session_runner
        # self.__encoder = ResNet50ExtractorAPI("", True)
        self.__encoder = MarsExtractorAPI(flush_pipe_on_read=True)
        self.__encoder.use_session_runner(session_runner)
        self.__image_shape = self.__encoder.get_input_shape()
        self.__enc_in_pipe = self.__encoder.get_in_pipe()
        self.__enc_out_pipe = self.__encoder.get_out_pipe()
        self.__encoder.run()

    def run(self):
        if self.__thread is None:
            self.__thread = Thread(target=self.__run)
            self.__thread.start()

    def __run(self):
        while self.__thread:

            if self.__in_pipe.is_closed():
                self.__enc_in_pipe.close()
                self.__out_pipe.close()

                return

            ret, inference = self.__in_pipe.pull(self.__flush_pipe_on_read)
            if ret:
                self.__job(inference)
            else:
                self.__in_pipe.wait()

    def __job(self, inference):
        self.__enc_in_pipe.push(
            Inference(inference.get_data(), meta_dict={'inference': inference}, return_pipe=self.__out_pipe))