예제 #1
0
    def detect_age_gender(trackers):
        if not Tracker.__age_inference:
            print("initializing Age Model")
            Tracker.__age_inference = AgeApiRunner(SessionRunner())
        detector_ip = Tracker.__age_inference.get_detector_ip()
        detector_op = Tracker.__age_inference.get_detector_op()
        for i, trk in enumerate(trackers):
            trk_trail = trk.get_trail()
            person = trk_trail.get_person()
            if not trk.detect_age() or len(person.get_age_list()) >= 10 or len(person.get_gender_list()) >= 10 or trk.get_image() is None:
                continue
            # print("len of tracker", i, " " ,len(trk.get_patches()))
            detector_ip.push(Inference(trk.get_image().copy()))
            # ret, inference = detector_op.pull(True)

            while True:
                detector_op.wait()
                ret, inference = detector_op.pull(True)
                if ret:
                    # print(ret)
                    if inference.get_result().get_genders() is None or inference.get_result().get_ages() is None:
                        break
                    print(inference.get_result().get_genders())
                    print(inference.get_result().get_ages())
                    gender_confidence = inference.get_result().get_genders()[0][0]
                    gender = 'M' if gender_confidence < 0.5 else 'F'
                    age = int(inference.get_result().get_ages()[0])
                    # print(type())
                    trk.get_trail().get_person().add_age(age)
                    trk.get_trail().get_person().add_gender(gender, gender_confidence)
                    print("ages", trk.get_trail().get_person().get_age_list())
                    print("genders", trk.get_trail().get_person().get_gender_list())
                    break
def test():
    model = SiameseComparator()()
    model.load_weights(model_path.get() + '/siamese-mars-small128.h5')
    model.summary()
    feature_vector = FeatureVector()
    session_runner = SessionRunner()
    extractor = MarsExtractorAPI('mars_api', True)
    ip = extractor.get_in_pipe()
    op = extractor.get_out_pipe()
    extractor.use_session_runner(session_runner)
    session_runner.start()
    extractor.run()
    image_files = []
    for id in range(1, 5):
        image_files.append(
            glob.glob(input_path.get() + '/patches/{}/*.jpg'.format(id)))
    print(len(image_files))
    patch0 = [
        cv2.imread(image_files[0][randint(0, len(image_files[0]))])
        for _ in range(10)
    ]
    # patch0_1 = [cv2.imread(image_files[0][randint(0, len(image_files[0]))]) for _ in range(10)]
    patch1 = [
        cv2.imread(image_files[1][randint(0, len(image_files[1]))])
        for _ in range(10)
    ]
    patch2 = [
        cv2.imread(image_files[2][randint(0, len(image_files[2]))])
        for _ in range(10)
    ]
    patch3 = [
        cv2.imread(image_files[3][randint(0, len(image_files[3]))])
        for _ in range(10)
    ]
    #patch_pair = [_ for _ in itertools.combinations_with_replacement([patch0[0], patch1[0], patch2[0], patch3[0]], 2)]

    f_vec0 = np.array([extract_features(patch, ip, op)[0] for patch in patch0])
    # f_vec0_1 = np.array(extract_features(patch0_1, ip, op))
    f_vec1 = np.array([extract_features(patch, ip, op)[0] for patch in patch1])
    f_vec2 = np.array([extract_features(patch, ip, op)[0] for patch in patch2])
    f_vec3 = np.array([extract_features(patch, ip, op)[0] for patch in patch3])
    #print(f_vec1)

    output = model.predict(
        [np.expand_dims(f_vec1, 0),
         np.expand_dims(f_vec3, 0)])
    print(output)
예제 #3
0
import time
from threading import Thread
import cv2
from age_detection_api.age_detection.age_api import AgeDetection
from age_detection_api.age_detection.sort import Sort
from tf_session.tf_session_runner import SessionRunner
from tf_session.tf_session_utils import Inference
import numpy as np

cap = cv2.VideoCapture(-1)
# cap = cv2.VideoCapture(videos_path.get()+'/Hitman Agent 47 - car chase scene HD.mp4')

session_runner = SessionRunner()
while True:
    ret, image = cap.read()
    if ret:
        break

detection = AgeDetection()
detector_ip = detection.get_in_pipe()
detector_op = detection.get_out_pipe()
detection.use_session_runner(session_runner)
detection.use_threading()
session_runner.start()
detection.run()
tracker = Sort()

frame_no = 0


def read_video():
def train():
    feature_vector = FeatureVector()
    session_runner = SessionRunner()
    extractor = MarsExtractorAPI('mars_api', True)
    ip = extractor.get_in_pipe()
    op = extractor.get_out_pipe()
    extractor.use_session_runner(session_runner)
    session_runner.start()
    extractor.run()

    for id in range(1, 5):
        image_files = glob.glob(
            '/home/allahbaksh/Tailgating_detection/SecureIt/data/obj_tracking/outputs/patches/{}/*.jpg'
            .format(id))
        for image_file in image_files:
            patch = cv2.imread(image_file)
            f_vec = extract_features(patch, ip, op)
            # print(f_vec.shape)
            # print(f_vec[])
            # break
            feature_vector.add_vector(id, f_vec[0])

    # for x in range(200):
    #     feature_vector.add_vector(randint(0, 30), [randint(0, 128) for _ in range(128)])
    samples = create_samples(feature_vector.get_vector_dict())
    print(count_0)
    print(count_1)
    # print(feature_vector.get_vector_dict())
    model = SiameseComparator()()
    sklearn.utils.shuffle(samples)
    # print()
    # print(samples[1])
    # print(len(samples))
    train_samples, val_samples = train_test_split(samples, test_size=0.2)

    train_generator = generator(train_samples, batch_size=16)
    validation_generator = generator(val_samples, batch_size=16)
    epoch = 10
    saved_weights_name = 'model.h5'
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0.001,
                               patience=3,
                               mode='min',
                               verbose=1)
    checkpoint = ModelCheckpoint(saved_weights_name,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min',
                                 period=1)
    tensorboard = TensorBoard(log_dir=os.path.expanduser('~/logs/'),
                              histogram_freq=0,
                              write_graph=True,
                              write_images=False)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['mae', 'acc'])
    history = model.fit_generator(
        generator=train_generator,
        steps_per_epoch=len(train_samples),
        epochs=epoch,
        verbose=1,
        validation_data=validation_generator,
        nb_val_samples=len(val_samples),
        callbacks=[early_stop, checkpoint, tensorboard])
예제 #5
0
from threading import Thread
from time import sleep

import cv2

from tf_session.tf_session_runner import SessionRunner
from tf_session.tf_session_utils import Pipe, Inference
from weapon_detection.tf_api.tf_weapon_detection_api import TFWeaponDetectionAPI

#cap = cv2.VideoCapture(-1)
cap = cv2.VideoCapture(
    "/home/developer/PycharmProjects/weapon_detection/test_images/video4.mp4")
if __name__ == '__main__':
    session_runner = SessionRunner(threading=True)
    while True:
        ret, image = cap.read()
        if ret:
            break

    detection = TFWeaponDetectionAPI(image.shape, 'tf_api', False)
    ip = detection.get_in_pipe()
    # op = detection.get_out_pipe()
    detection.use_session_runner(session_runner)

    session_runner.start()
    detection.run()

    ret_pipe = Pipe()

    # for i in range(1000):
    count = 0
예제 #6
0
    Thread(target=fs.get_app().run, args=("0.0.0.0", )).start()

    session_runner = {}
    detector = {}

    cap = {}
    pipe = {}
    video_inputs = {
        0: 0,
        1: 1,
        2: 2,
        3: 3,
        4: 4,
        6: 'rtsp://*****:*****@192.168.0.6'
    }

    for i in video_inputs.keys():
        session_runner[i] = SessionRunner(skip=True)
        session_runner[i].start()
        detector[i] = TFObjectDetectionAPI(
            PRETRAINED_faster_rcnn_inception_v2_coco_2018_01_28, None,
            'tf_api_' + str(i), True)
        detector[i].use_session_runner(session_runner[i])
        detector[i].run()

        cap[i] = cv2.VideoCapture(video_inputs[i])
        pipe[i] = Pipe()
        fs.create('feed_' + str(i), pipe[i])
        Thread(target=detect_objects,
               args=(cap[i], pipe[i], detector[i], False)).start()
예제 #7
0
class Tracker(object):
    num_tracks = 0
    __age_inference = AgeApiRunner(SessionRunner())

    @staticmethod
    def __get_next_id():
        Tracker.num_tracks += 1
        return Tracker.num_tracks

    def __init__(self, bbox, features, patch, frame_no, hit_streak_threshold=10, zones=None):
        self.__zones = zones
        self.__patches = [patch]
        self.__id = self.__get_next_id()
        self.__bbox = bbox
        self.__features_fixed = [features]
        self.__features_update = []
        self.__hit_streak = 0
        self.__time_since_update = 0
        self.__hit_streak_threshold = hit_streak_threshold
        self.__hits = 1
        self.__creation_time = frame_no
        self.__patch_update_timestamp = time.time()
        self.__trail = Trail(self.__zones, self.__id)

        #age detection components
        self.__detect_age = True
        self.__image = None

    def set_image(self, image):
        self.__image = image

    def get_image(self):
        return self.__image

    def detect_age(self):
        return self.__detect_age

    def update_zones(self, zones):
        self.__zones = zones
        if zones is not None:
            self.__trail.update_zones(self.__zones)

    def get_creation_time(self):
        return self.__creation_time

    def get_patches(self):
        return self.__patches

    def get_hits(self):
        return self.__hits

    def get_features(self):
        return self.__features_fixed + self.__features_update

    def get_bbox(self):
        """
        Returns the current bounding box estimate.
        """
        return self.__bbox

    def get_time_since_update(self):
        return self.__time_since_update

    def get_id(self):
        return self.__id

    def is_confident(self):
        return self.__hits > 20

    def update(self, bbox, f_vec, patch):
        timestamp = time.time()
        self.__hits += 1
        if timestamp - self.__patch_update_timestamp > 1:
            if len(self.__features_fixed) < 50:
                self.__features_fixed.append(f_vec)
            self.__patches.append(patch)

            self.__patch_update_timestamp = timestamp
            if len(self.__patches) > 10:
                self.__patches.pop(0)

            if len(self.__features_fixed) > 50:
                self.__features_update.append(f_vec)
                if len(self.__features_update) > 50:
                    self.__features_update.pop(0)

        self.__time_since_update = 0
        self.__hit_streak = min(self.__hit_streak_threshold, self.__hit_streak + 1)

        if bbox:
            self.__bbox = bbox
            self.__trail.update_track(bbox)

    def get_trail(self):
        return self.__trail

    def get_hit_streak(self):
        return self.__hit_streak

    @staticmethod
    def associate_detections_to_trackers(f_vecs, trackers, graph, min_similarity_threshold=0.625):
        """
        Assigns detections to tracked object (both represented as bounding boxes)

        Returns 3 lists of matches, unmatched_detections and unmatched_trackers
        """

        if (len(trackers) == 0):
            return np.empty((0, 2), dtype=int), np.arange(len(f_vecs)), np.empty((0, 4), dtype=int)

        similarity_matrix = np.zeros((len(f_vecs), len(trackers)), dtype=np.float32)

        for d, det in enumerate(f_vecs):
            for t, trk in enumerate(trackers):
                # x1, y1 = bboxes[d][0], bboxes[d][1]
                # x2, y2 = trk.get_bbox()[0], trk.get_bbox()[1]
                # print((abs(float(y2-y1)**2 - float(x2-x1)**2)**0.5))
                '''100 is probably a very low theshold. Also, We havent done anything for exit frame mechanism. Other than that it seem to work fine'''
                # 100 coz anything lower doesnt seem to detect my walking we might even have to increase it but then worry about its reaction in multi person environment
                # if ((abs(float(y2-y1)**2 - float(x2-x1)**2)**0.5)) < 25:
                #     print((abs(float(y2 - y1) ** 2 - float(x2 - x1) ** 2) ** 0.5))
                #     print(d,t)
                # similarity_matrix[d, t] = Tracker.get_cosine_similarity(trk, det)

                similarity_matrix[d, t] = Tracker.siamese_comparator(trk, det, graph)
        '''The linear assignment module tries to minimise the total assignment cost.
        In our case we pass -iou_matrix as we want to maximise the total IOU between track predictions and the frame detection.'''
        #print("   ----------------matrix")
        #print(similarity_matrix)
        #print("   ----------------")
        matched_indices = linear_assignment(-similarity_matrix)
        #print("matched indices", matched_indices)

        # print("Matched Indices: ", matched_indices[:,1])

        unmatched_detections = []
        for d, det in enumerate(f_vecs):
            if (d not in matched_indices[:, 0]):
                unmatched_detections.append(d)
        unmatched_trackers = []
        for t, trk in enumerate(trackers):
            if (t not in matched_indices[:, 1]):
                # print("Unmatched Tracker: ", t, trackers[t].get_id())
                unmatched_trackers.append(trackers[t].get_id())
                trk.__hit_streak = max(0, trk.__hit_streak - 1)
                trk.__time_since_update += 1

        # filter out matched with low IOU
        matches = []
        for m in matched_indices:
            trk_index = m[1]
            trk_id = trackers[trk_index].get_id()
            if (similarity_matrix[m[0], m[1]] < min_similarity_threshold):
                unmatched_detections.append(m[0])
                unmatched_trackers.append(trk_id)
                # print("Unmatched Tracker ID: ", trk_id)
                trk = trackers[trk_index]
                trk.__hit_streak = max(0, trk.__hit_streak - 1)
                trk.__time_since_update += 1
            else:
                matches.append(np.array([m[0], trk_id]).reshape(1, 2))
        if (len(matches) == 0):
            matches = np.empty((0, 2), dtype=int)
        else:
            matches = np.concatenate(matches, axis=0)

        return matches, np.array(unmatched_detections), np.array(unmatched_trackers)

    @staticmethod
    def get_cosine_similarity(tracker, f_vec):
        maximum = 0
        lst = tracker.get_features()
        for a in lst:
            b = f_vec
            a = np.expand_dims(a, axis=0)
            b = np.expand_dims(b, axis=0)
            a = np.asarray(a) / np.linalg.norm(a, axis=1, keepdims=True)
            b = np.asarray(b) / np.linalg.norm(b, axis=1, keepdims=True)
            maximum = max(maximum, np.dot(a, b.T)[0][0])
        return maximum

    __siamese_model = None

    @staticmethod
    def siamese_comparator(tracker, f_vec, graph):
        if not Tracker.__siamese_model:
            print("Initializing...")
            with graph.as_default():
                Tracker.__siamese_model = SiameseComparator()()
                Tracker.__siamese_model.load_weights(model_path.get() + '/model_12_28_2018_12_02_56.h5')

        maximum = 0
        lst = tracker.get_features()
        for a in lst:
            b = f_vec
            a = np.expand_dims(a, axis=0)
            b = np.expand_dims(b, axis=0)
            # a = np.expand_dims(a, axis=0)
            # b = np.expand_dims(b, axis=0)
            # a = np.asarray(a) / np.linalg.norm(a, axis=1, keepdims=True)
            # b = np.asarray(b) / np.linalg.norm(b, axis=1, keepdims=True)
            maximum += Tracker.__siamese_model.predict([a, b])[0][0]
        return maximum / len(lst)

    @staticmethod
    def detect_age_gender(trackers):
        if not Tracker.__age_inference:
            print("initializing Age Model")
            Tracker.__age_inference = AgeApiRunner(SessionRunner())
        detector_ip = Tracker.__age_inference.get_detector_ip()
        detector_op = Tracker.__age_inference.get_detector_op()
        for i, trk in enumerate(trackers):
            trk_trail = trk.get_trail()
            person = trk_trail.get_person()
            if not trk.detect_age() or len(person.get_age_list()) >= 10 or len(person.get_gender_list()) >= 10 or trk.get_image() is None:
                continue
            # print("len of tracker", i, " " ,len(trk.get_patches()))
            detector_ip.push(Inference(trk.get_image().copy()))
            # ret, inference = detector_op.pull(True)

            while True:
                detector_op.wait()
                ret, inference = detector_op.pull(True)
                if ret:
                    # print(ret)
                    if inference.get_result().get_genders() is None or inference.get_result().get_ages() is None:
                        break
                    print(inference.get_result().get_genders())
                    print(inference.get_result().get_ages())
                    gender_confidence = inference.get_result().get_genders()[0][0]
                    gender = 'M' if gender_confidence < 0.5 else 'F'
                    age = int(inference.get_result().get_ages()[0])
                    # print(type())
                    trk.get_trail().get_person().add_age(age)
                    trk.get_trail().get_person().add_gender(gender, gender_confidence)
                    print("ages", trk.get_trail().get_person().get_age_list())
                    print("genders", trk.get_trail().get_person().get_gender_list())
                    break