def test():
    model = SiameseComparator()()
    model.load_weights(model_path.get() + '/siamese-mars-small128.h5')
    model.summary()
    feature_vector = FeatureVector()
    session_runner = SessionRunner()
    extractor = MarsExtractorAPI('mars_api', True)
    ip = extractor.get_in_pipe()
    op = extractor.get_out_pipe()
    extractor.use_session_runner(session_runner)
    session_runner.start()
    extractor.run()
    image_files = []
    for id in range(1, 5):
        image_files.append(
            glob.glob(input_path.get() + '/patches/{}/*.jpg'.format(id)))
    print(len(image_files))
    patch0 = [
        cv2.imread(image_files[0][randint(0, len(image_files[0]))])
        for _ in range(10)
    ]
    # patch0_1 = [cv2.imread(image_files[0][randint(0, len(image_files[0]))]) for _ in range(10)]
    patch1 = [
        cv2.imread(image_files[1][randint(0, len(image_files[1]))])
        for _ in range(10)
    ]
    patch2 = [
        cv2.imread(image_files[2][randint(0, len(image_files[2]))])
        for _ in range(10)
    ]
    patch3 = [
        cv2.imread(image_files[3][randint(0, len(image_files[3]))])
        for _ in range(10)
    ]
    #patch_pair = [_ for _ in itertools.combinations_with_replacement([patch0[0], patch1[0], patch2[0], patch3[0]], 2)]

    f_vec0 = np.array([extract_features(patch, ip, op)[0] for patch in patch0])
    # f_vec0_1 = np.array(extract_features(patch0_1, ip, op))
    f_vec1 = np.array([extract_features(patch, ip, op)[0] for patch in patch1])
    f_vec2 = np.array([extract_features(patch, ip, op)[0] for patch in patch2])
    f_vec3 = np.array([extract_features(patch, ip, op)[0] for patch in patch3])
    #print(f_vec1)

    output = model.predict(
        [np.expand_dims(f_vec1, 0),
         np.expand_dims(f_vec3, 0)])
    print(output)
Пример #2
0
cap = cv2.VideoCapture(-1)
# cap = cv2.VideoCapture(videos_path.get()+'/Hitman Agent 47 - car chase scene HD.mp4')

session_runner = SessionRunner()
while True:
    ret, image = cap.read()
    if ret:
        break

detection = AgeDetection()
detector_ip = detection.get_in_pipe()
detector_op = detection.get_out_pipe()
detection.use_session_runner(session_runner)
detection.use_threading()
session_runner.start()
detection.run()
tracker = Sort()

frame_no = 0


def read_video():
    # start = time.time()
    while True:
        ret, image = cap.read()
        if not ret:
            continue
        detector_ip.push(Inference(image.copy()))

def train():
    feature_vector = FeatureVector()
    session_runner = SessionRunner()
    extractor = MarsExtractorAPI('mars_api', True)
    ip = extractor.get_in_pipe()
    op = extractor.get_out_pipe()
    extractor.use_session_runner(session_runner)
    session_runner.start()
    extractor.run()

    for id in range(1, 5):
        image_files = glob.glob(
            '/home/allahbaksh/Tailgating_detection/SecureIt/data/obj_tracking/outputs/patches/{}/*.jpg'
            .format(id))
        for image_file in image_files:
            patch = cv2.imread(image_file)
            f_vec = extract_features(patch, ip, op)
            # print(f_vec.shape)
            # print(f_vec[])
            # break
            feature_vector.add_vector(id, f_vec[0])

    # for x in range(200):
    #     feature_vector.add_vector(randint(0, 30), [randint(0, 128) for _ in range(128)])
    samples = create_samples(feature_vector.get_vector_dict())
    print(count_0)
    print(count_1)
    # print(feature_vector.get_vector_dict())
    model = SiameseComparator()()
    sklearn.utils.shuffle(samples)
    # print()
    # print(samples[1])
    # print(len(samples))
    train_samples, val_samples = train_test_split(samples, test_size=0.2)

    train_generator = generator(train_samples, batch_size=16)
    validation_generator = generator(val_samples, batch_size=16)
    epoch = 10
    saved_weights_name = 'model.h5'
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0.001,
                               patience=3,
                               mode='min',
                               verbose=1)
    checkpoint = ModelCheckpoint(saved_weights_name,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min',
                                 period=1)
    tensorboard = TensorBoard(log_dir=os.path.expanduser('~/logs/'),
                              histogram_freq=0,
                              write_graph=True,
                              write_images=False)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['mae', 'acc'])
    history = model.fit_generator(
        generator=train_generator,
        steps_per_epoch=len(train_samples),
        epochs=epoch,
        verbose=1,
        validation_data=validation_generator,
        nb_val_samples=len(val_samples),
        callbacks=[early_stop, checkpoint, tensorboard])