def main(run_config):
    # Initialize
    logger = logging.getLogger(__name__)
    logger.info(f"Run configuration file: {run_config}")

    # Read configuration file
    run_cfg = file_utils.read_yaml(run_config)
    train_path = run_cfg["train"]
    test_path = run_cfg["test"]
    validation_path = run_cfg["validation"]
    test_videos = run_cfg["test_videos"]
    validation_videos = run_cfg["validation_videos"]
    start_time = timer(None)

    # Get name of the frames to move to the test and validation folder
    test_frames = []
    for video in test_videos:
        test_frames.append(
            list(
                filter(lambda x: f"_{video}_" in x,
                       os.listdir(f"{train_path}/1_crop"))))
        test_frames.append(
            list(
                filter(lambda x: f"_{video}_" in x,
                       os.listdir(f"{train_path}/0_crop"))))
    test_frames = list(itertools.chain(*test_frames))

    validation_frames = []
    for video in validation_videos:
        validation_frames.append(
            list(
                filter(lambda x: f"_{video}_" in x,
                       os.listdir(f"{train_path}/1_crop"))))
        validation_frames.append(
            list(
                filter(lambda x: f"_{video}_" in x,
                       os.listdir(f"{train_path}/0_crop"))))
    validation_frames = list(itertools.chain(*validation_frames))

    # Move test and validation frames to test and validation folders
    logger.info(f"Moving {len(test_frames)} test files...")
    for frame in test_frames:
        try:
            os.rename(f"{train_path}/1_crop/{frame}",
                      f"{test_path}/1_crop/{frame}")
        except FileNotFoundError:
            os.rename(f"{train_path}/0_crop/{frame}",
                      f"{test_path}/0_crop/{frame}")
    logger.info(f"{len(test_frames)} files moved successfully.")

    logger.info(f"Moving {len(validation_frames)} validation files...")
    for frame in validation_frames:
        try:
            os.rename(f"{train_path}/1_crop/{frame}",
                      f"{validation_path}/1_crop/{frame}")
        except FileNotFoundError:
            os.rename(f"{train_path}/0_crop/{frame}",
                      f"{validation_path}/0_crop/{frame}")
    logger.info(f"{len(validation_frames)} files moved successfully.")
    timer(start_time)
Exemplo n.º 2
0
 def get_token(self):
     try:
         d = file_utils.read_yaml(
             file_utils.resolve_path(self.token_file_name, self.token_dir))
     except IOError:
         raise self.TokenNotFound
     if 'token' not in d:
         raise self.TokenNotFound
     return d['token']
Exemplo n.º 3
0
 def get_config(self):
     config_path = file_utils.resolve_path(self.config_file_name,
                                           self.config_dir)
     try:
         conf = file_utils.read_yaml(config_path)
     except IOError:
         conf = self.default_config
     else:
         if not conf:
             conf = self.default_config
     return conf
Exemplo n.º 4
0
    def test_create_and_validate_new_application_credentials(self):
        """ Test to generate application credentials for a test app
        1. Set up test variables
        2. Generate the credentials
        3. Validate response
        4. Raw read the file
        5. Validate the password is encrypted
        6. Run the validate application credentials
        7. Validate response
        """
        # 1. Set up test variables
        application = 'test_app' + str(randint(0, 99))
        user_dict = {
            'user': '******',
            'password': '******'
        }

        # 2. Generate the credentials
        response = self.app_creds.create_update_application_credentials(application, user_dict)

        # 3. Validate response
        self.assertEqual(response['code'], 201)
        self.assertTrue(response['success'])

        # 4. Raw read the file
        file = file_utils.read_yaml('/usr/local/beringersolutions/app_configs/LOCAL/{application}.yaml'.format(
            application=application))

        # 5. Validate the password is encrypted
        self.assertNotEqual(user_dict['password'], file['profiles'][user_dict['user']]['password'])
        self.assertEqual(user_dict['user'], file['profiles'][user_dict['user']]['user'])

        # 6. Run the validate application credentials
        response = self.app_creds.validate_application_credentials(application, user_dict)

        # 7. Validate response
        self.assertEqual(response['code'], 202)
        self.assertTrue(response['success'])
Exemplo n.º 5
0
def main(run_config, input_video, output_video):
    # Read configuration file
    run_cfg = file_utils.read_yaml(run_config)
    image_size = tuple(run_cfg["modelling"]["image_size"])

    # Load trained model and label binarizer
    logger.info("Loading model and label binarizer...")
    model = load_model("../models/VGGFaces_16/estimator.model")
    lb = pickle.loads(
        open("../models/VGGFaces_16/label_binarizer.pickle", "rb").read())
    lb.classes_ = np.char.replace(lb.classes_, "0_crop", "Lie")
    lb.classes_ = np.char.replace(lb.classes_, "1_crop", "Truth")

    # Initialize the predictions queue
    q = deque(maxlen=128)

    # Loop over the frames in the video
    vs = cv2.VideoCapture(input_video)
    writer = None
    (w, h) = (None, None)
    start_time = timer(None)
    while True:
        # read the next frame from the file
        (grabbed, frame) = vs.read()

        # if the frame was not grabbed, then we have reached the end of the stream
        if not grabbed:
            break

        # If the frame dimensions are empty, grab them
        if w is None or h is None:
            (h, w) = frame.shape[:2]
        output = frame.copy()
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame = cv2.resize(frame, image_size[0:2])

        # Make predictions on the frame and update queue
        pred = model.predict(np.expand_dims(frame, axis=0))[0]
        q.append(pred)

        # Perform prediction averaging
        results = np.array(q).mean(axis=0)
        r = np.argmax(results)
        label = lb.classes_[r]

        # Draw the activity on the output frame
        text = f"{label}"
        cv2.putText(output, text, (35, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (0, 255, 0))

        # Check if the video writer is None
        if writer is None:
            # Initialize video writer
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter(output_video, fourcc, 30, (w, h))

        # Write the output frame to disk
        writer.write(output)

    # Clean up and done
    logger.info("Cleaning up...")
    writer.release()
    vs.release()
    logger.info("Done")
    timer(start_time)
Exemplo n.º 6
0
def main(run_config):
    # Initialize
    logger.info(f"Run configuration file: {run_config}")

    # Read configuration file
    run_cfg = file_utils.read_yaml(run_config)
    train = run_cfg["train"]
    test = run_cfg["test"]
    model_name = run_cfg["modelling"]["model_name"]
    sample_mode = run_cfg["modelling"]["sample_mode"]
    data_augmentation = run_cfg["modelling"]["data_augmentation"]
    image_size = tuple(run_cfg["modelling"]["image_size"])
    vgg_model = run_cfg["modelling"]["vgg_model"]

    # Load train and test images
    X_train, y_train, epochs = initialize(train, run_cfg, image_size,
                                          sample_mode)
    X_test, y_test, epochs = initialize(test, run_cfg, image_size, sample_mode)

    # Perform one-hot encoding on the labels
    lb = LabelBinarizer()
    y_train = lb.fit_transform(y_train)
    lb = LabelBinarizer()
    y_test = lb.fit_transform(y_test)

    # Initialize training data augmentation object
    if data_augmentation:
        logger.info("Performing data augmentation...")
        train_aug = ImageDataGenerator(rescale=1. / 255,
                                       rotation_range=30,
                                       zoom_range=0.15,
                                       width_shift_range=0.2,
                                       height_shift_range=0.2,
                                       shear_range=0.15,
                                       fill_mode="nearest")
    else:
        logger.info("Data augmentation not performed")
        train_aug = ImageDataGenerator(rescale=1. / 255)
    test_aug = ImageDataGenerator(rescale=1. / 255)

    # Load VGGFace network for fine tuning
    logger.info("Building the model...")
    base_model = VGGFace(model=vgg_model,
                         weights="vggface",
                         include_top=False,
                         input_tensor=Input(shape=image_size[0:3]))
    head_model = base_model.output
    head_model = AveragePooling2D(pool_size=image_size[3:5])(head_model)
    head_model = Flatten(name="flatten")(head_model)
    head_model = Dense(512, activation="relu")(head_model)
    head_model = Dropout(0.5)(head_model)
    head_model = Dense(1, activation="sigmoid")(head_model)
    model = Model(inputs=base_model.input, outputs=head_model)

    # Freeze the base model to prevent it from being updated during the training process
    for layer in base_model.layers:
        layer.trainable = False

    # Compile the model
    logger.info("Compiling model...")
    opt = SGD(lr=1e-4, momentum=0.9, decay=1e-4 / epochs)
    model.compile(loss="binary_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # Callbacks: early stopping and CSVLogger
    earlystop = EarlyStopping(monitor='val_acc',
                              min_delta=0.001,
                              patience=10,
                              verbose=1,
                              mode='auto')
    if os.path.exists(f"../models/{model_name}"):
        csv_logger = CSVLogger(f"../models/{model_name}/CSVLogger.log")
    else:
        os.mkdir(f"../models/{model_name}")
        csv_logger = CSVLogger(f"../models/{model_name}/CSVLogger.log")

    # Train the head of the network (fine tuning)
    logger.info("Training head of the model...")
    start_time = timer(None)
    head = model.fit_generator(train_aug.flow(X_train, y_train, batch_size=20),
                               steps_per_epoch=len(X_train) // 20,
                               validation_data=test_aug.flow(X_test, y_test),
                               validation_steps=len(X_test) // 20,
                               epochs=epochs,
                               callbacks=[earlystop, csv_logger])
    timer(start_time)

    # Evaluate the network
    logger.info("Evaluating network...")
    predictions = model.predict(X_test, batch_size=20)
    predictions[predictions >= 0.5] = 1
    predictions[predictions < 0.5] = 0
    print(classification_report(y_test, predictions))

    # Plot train loss and accuracy
    n = epochs
    plt.style.use("ggplot")
    plt.figure()
    plt.plot(np.arange(0, n), head.history["loss"], label="train_loss")
    plt.plot(np.arange(0, n), head.history["val_loss"], label="test_loss")
    plt.plot(np.arange(0, n), head.history["accuracy"], label="train_acc")
    plt.plot(np.arange(0, n), head.history["val_accuracy"], label="test_acc")
    plt.title("Train and Test Loss and Accuracy")
    plt.xlabel("Number of epochs")
    plt.ylabel("Loss/Accuracy")
    plt.legend(loc="lower left")

    # Save the model
    if not sample_mode:
        logger.info("Saving model...")
        model.save(f"../models/{model_name}/estimator.model")

        # Save the label binarizer
        f = open(f"../models/{model_name}/label_binarizer.pickle", "wb")
        f.write(pickle.dumps(lb))
        f.close()

        # Save plot
        plt.savefig(f"../models/{model_name}/loss_acc.png")
        logger.info("Model saved successfully.")
Exemplo n.º 7
0
def main(run_config):
    # Read configuration file
    logger = logging.getLogger(__name__)
    logger.info(f"Run configuration file: {run_config}")
    run_cfg = file_utils.read_yaml(run_config)
    test_videos = run_cfg["test_videos"]
    validation_videos = run_cfg["validation_videos"]
    image_size = tuple(run_cfg["modelling"]["image_size"])

    # Load true labels
    logger.info("Loading true labels")
    df = pd.read_csv("../data/Annotations.csv")
    y_test = df.loc[test_videos, "truth"]
    y_vali = df.loc[validation_videos, "truth"]

    # Load trained model and label binarizer
    logger.info("Loading trained model and label binarizer")
    logger.info("Loading model and label binarizer...")
    model = load_model("../models/VGGFaces_16/estimator.model")
    lb = pickle.loads(
        open("../models/VGGFaces_16/label_binarizer.pickle", "rb").read())

    # Load test frames
    logger.info("Making predictions")
    start_time = None
    preds = dict()

    # 0_crop predictions
    for i in test_videos:
        predictions = []
        for filename in glob.glob(f'../data/test/0_crop/*_{i}_*.jpg'):
            img = cv2.imread(filename)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img = cv2.resize(img, image_size[0:2])

            # Make predictions on the frame and update queue
            pred = model.predict(np.expand_dims(img, axis=0))[0]
            predictions.append(pred)

        if predictions:
            # Perform prediction averaging
            results = np.array(predictions).mean(axis=0)
            r = np.argmax(results)
            label = lb.classes_[r]

            preds[f"{i}"] = label
            logger.info(f"Prediction for video {i} done")
        timer(start_time)

    # 1_crop predictions
    for i in test_videos:
        predictions = []
        for filename in glob.glob(f'../data/test/1_crop/*_{i}_*.jpg'):
            img = cv2.imread(filename)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img = cv2.resize(img, image_size[0:2])

            # Make predictions on the frame and update queue
            pred = model.predict(np.expand_dims(img, axis=0))[0]
            predictions.append(pred)

        if predictions:
            # Perform prediction averaging
            results = np.array(predictions).mean(axis=0)
            r = np.argmax(results)
            label = lb.classes_[r]

            preds[f"{i}"] = label
            logger.info(f"Prediction for video {i} done")
        timer(start_time)

    preds = pd.DataFrame(preds).transpose()
    preds.to_csv("predictions.csv")