コード例 #1
0
def train_complete_rnn_ae(args):
    # General
    trajectories_path = args.trajectories
    camera_id = os.path.basename(trajectories_path)
    video_resolution = [
        float(measurement) for measurement in args.video_resolution.split('x')
    ]
    video_resolution = np.array(video_resolution, dtype=np.float32)
    # Architecture
    input_length = args.input_length
    pred_length = args.pred_length
    global_hidden_dims = args.global_hidden_dims
    local_hidden_dims = args.local_hidden_dims
    extra_hidden_dims = args.extra_hidden_dims
    cell_type = args.cell_type
    reconstruct_reverse = args.reconstruct_reverse
    # Training
    optimiser = args.optimiser
    learning_rate = args.learning_rate
    loss = args.loss
    epochs = args.epochs
    batch_size = args.batch_size
    global_normalisation_strategy = args.global_normalisation_strategy
    local_normalisation_strategy = args.local_normalisation_strategy
    # Logging
    root_log_dir = args.root_log_dir
    resume_training = args.resume_training

    _, trajectories_coordinates = load_trajectories(trajectories_path)
    print('\nLoaded %d trajectories.' % len(trajectories_coordinates))

    trajectories_coordinates = remove_short_trajectories(
        trajectories_coordinates,
        input_length=input_length,
        input_gap=0,
        pred_length=pred_length)
    print('\nRemoved short trajectories. Number of trajectories left: %d.' %
          len(trajectories_coordinates))

    trajectories_coordinates_train, trajectories_coordinates_val = \
        train_test_split_through_time(trajectories_coordinates, input_length=input_length, pred_length=pred_length,
                                      train_ratio=0.8)

    # Global pre-processing
    global_features_train = extract_global_features(
        trajectories_coordinates_train, video_resolution=video_resolution)
    global_features_val = extract_global_features(
        trajectories_coordinates_val, video_resolution=video_resolution)

    global_features_train = change_coordinate_system(
        global_features_train,
        video_resolution=video_resolution,
        coordinate_system='global',
        invert=False)
    global_features_val = change_coordinate_system(
        global_features_val,
        video_resolution=video_resolution,
        coordinate_system='global',
        invert=False)

    global_features_train, global_scaler = scale_trajectories(
        global_features_train, strategy=global_normalisation_strategy)
    global_features_val, _ = scale_trajectories(
        global_features_val,
        scaler=global_scaler,
        strategy=global_normalisation_strategy)

    # Local pre-processing
    local_features_train = deepcopy(trajectories_coordinates_train)
    local_features_val = deepcopy(trajectories_coordinates_val)

    local_features_train = change_coordinate_system(
        local_features_train,
        video_resolution=video_resolution,
        coordinate_system='bounding_box_centre',
        invert=False)
    local_features_val = change_coordinate_system(
        local_features_val,
        video_resolution=video_resolution,
        coordinate_system='bounding_box_centre',
        invert=False)

    local_features_train, local_scaler = scale_trajectories(
        local_features_train, strategy=local_normalisation_strategy)
    local_features_val, _ = scale_trajectories(
        local_features_val,
        scaler=local_scaler,
        strategy=local_normalisation_strategy)

    # # Output
    # out_train = trajectories_coordinates_train
    # out_val = trajectories_coordinates_val
    #
    # out_train = change_coordinate_system(out_train,
    #                                      video_resolution=video_resolution,
    #                                      coordinate_system='global', invert=False)
    # out_val = change_coordinate_system(out_val,
    #                                    video_resolution=video_resolution,
    #                                    coordinate_system='global', invert=False)

    # Anomaly Model
    global_input_dim = extract_input_dim(global_features_train)
    local_input_dim = extract_input_dim(local_features_train)
    anomaly_model = CombinedEncoderDecoder(
        input_length=input_length,
        global_input_dim=global_input_dim,
        local_input_dim=local_input_dim,
        prediction_length=pred_length,
        global_hidden_dims=global_hidden_dims,
        local_hidden_dims=local_hidden_dims,
        extra_hidden_dims=extra_hidden_dims,
        cell_type=cell_type,
        reconstruct_reverse=reconstruct_reverse,
        optimiser=optimiser,
        learning_rate=learning_rate,
        loss=loss)

    # Set up training logging (optional)
    log_dir = set_up_logging(camera_id=camera_id,
                             root_log_dir=root_log_dir,
                             resume_training=resume_training)

    # Resume training (optional)
    last_epoch = resume_training_from_last_epoch(
        model=anomaly_model, resume_training=resume_training)

    if pred_length > 0:
        # Global
        X_global_train, y_global_train = list(
            zip(*[
                collect_trajectories(global_trajectory, input_length, 0,
                                     pred_length)
                for global_trajectory in global_features_train.values()
            ]))
        X_global_train, y_global_train = np.vstack(X_global_train), np.vstack(
            y_global_train)

        X_global_val, y_global_val = list(
            zip(*[
                collect_trajectories(global_trajectory, input_length, 0,
                                     pred_length)
                for global_trajectory in global_features_val.values()
            ]))
        X_global_val, y_global_val = np.vstack(X_global_val), np.vstack(
            y_global_val)

        # Local
        X_local_train, y_local_train = list(
            zip(*[
                collect_trajectories(local_trajectory, input_length, 0,
                                     pred_length)
                for local_trajectory in local_features_train.values()
            ]))
        X_local_train, y_local_train = np.vstack(X_local_train), np.vstack(
            y_local_train)

        X_local_val, y_local_val = list(
            zip(*[
                collect_trajectories(local_trajectory, input_length, 0,
                                     pred_length)
                for local_trajectory in local_features_val.values()
            ]))
        X_local_val, y_local_val = np.vstack(X_local_val), np.vstack(
            y_local_val)

        # # Output
        # X_out_train, y_out_train = list(zip(*[collect_trajectories(out_trajectory,
        #                                                            input_length, 0, pred_length)
        #                                       for out_trajectory in out_train.values()]))
        # X_out_train, y_out_train = np.vstack(X_out_train), np.vstack(y_out_train)
        #
        # X_out_val, y_out_val = list(zip(*[collect_trajectories(out_trajectory,
        #                                                        input_length, 0, pred_length)
        #                                   for out_trajectory in out_val.values()]))
        # X_out_val, y_out_val = np.vstack(X_out_val), np.vstack(y_out_val)
        #
        # X_global_train, X_local_train, X_out_train, y_global_train, y_local_train, y_out_train = \
        #     shuffle(X_global_train, X_local_train, X_out_train, y_global_train, y_local_train, y_out_train,
        #             random_state=42)
        X_global_train, X_local_train, y_global_train, y_local_train = \
            shuffle(X_global_train, X_local_train, y_global_train, y_local_train, random_state=42)

        # X_train = [X_global_train, X_local_train, X_out_train]
        # y_train = [y_global_train, y_local_train, y_out_train]
        # val_data = ([X_global_val, X_local_val, X_out_val], [y_global_val, y_local_val, y_out_val])
        X_train = [X_global_train, X_local_train]
        y_train = [y_global_train, y_local_train]
        val_data = ([X_global_val, X_local_val], [y_global_val, y_local_val])
        anomaly_model.train(X_train,
                            y_train,
                            epochs=epochs,
                            initial_epoch=last_epoch,
                            batch_size=batch_size,
                            val_data=val_data,
                            log_dir=log_dir)
    else:
        # Global
        X_global_train = [
            collect_trajectories(global_trajectory, input_length, 0,
                                 pred_length)
            for global_trajectory in global_features_train.values()
        ]
        X_global_train = np.vstack(X_global_train)

        X_global_val = [
            collect_trajectories(global_trajectory, input_length, 0,
                                 pred_length)
            for global_trajectory in global_features_val.values()
        ]
        X_global_val = np.vstack(X_global_val)

        # Local
        X_local_train = [
            collect_trajectories(local_trajectory, input_length, 0,
                                 pred_length)
            for local_trajectory in local_features_train.values()
        ]
        X_local_train = np.vstack(X_local_train)

        X_local_val = [
            collect_trajectories(local_trajectory, input_length, 0,
                                 pred_length)
            for local_trajectory in local_features_val.values()
        ]
        X_local_val = np.vstack(X_local_val)

        # # Output
        # X_out_train = [collect_trajectories(out_trajectory, input_length, 0, pred_length)
        #                for out_trajectory in out_train.values()]
        # X_out_train = np.vstack(X_out_train)
        #
        # X_out_val = [collect_trajectories(out_trajectory, input_length, 0, pred_length)
        #              for out_trajectory in out_val.values()]
        # X_out_val = np.vstack(X_out_val)

        # X_global_train, X_local_train, X_out_train = shuffle(X_global_train, X_local_train, X_out_train,
        #                                                      random_state=42)
        X_global_train, X_local_train = shuffle(X_global_train,
                                                X_local_train,
                                                random_state=42)
        # X_train = [X_global_train, X_local_train, X_out_train]
        # val_data = ([X_global_val, X_local_val, X_out_val],)
        X_train = [X_global_train, X_local_train]
        val_data = ([X_global_val, X_local_val], )
        anomaly_model.train(X_train,
                            epochs=epochs,
                            initial_epoch=last_epoch,
                            batch_size=batch_size,
                            val_data=val_data,
                            log_dir=log_dir)

    print('Combined global and local anomaly model successfully trained.')
    if log_dir is not None:
        joblib.dump(global_scaler,
                    filename=os.path.join(log_dir, 'global_scaler.pkl'))
        joblib.dump(local_scaler,
                    filename=os.path.join(log_dir, 'local_scaler.pkl'))
        print('log files were written to: %s' % log_dir)

    return None
コード例 #2
0
def train_ae(args):
    # General
    trajectories_path = args.trajectories  # e.g. .../03
    camera_id = os.path.basename(trajectories_path)
    video_resolution = [
        float(measurement) for measurement in args.video_resolution.split('x')
    ]
    video_resolution = np.array(video_resolution, dtype=np.float32)
    # Architecture
    global_model = args.global_model
    hidden_dims = args.hidden_dims
    coordinate_system = args.coordinate_system
    normalisation_strategy = args.normalisation_strategy
    # Training
    optimiser = args.optimiser
    learning_rate = args.learning_rate
    loss = args.loss
    epochs = args.epochs
    batch_size = args.batch_size
    # Logging
    root_log_dir = args.root_log_dir
    resume_training = args.resume_training

    trajectories_frames, trajectories_coordinates = load_trajectories(
        trajectories_path)
    print('\nLoaded %d trajectories.' % len(trajectories_coordinates))

    if global_model:
        trajectories_coordinates = extract_global_features(
            trajectories_coordinates, video_resolution=video_resolution)
        coordinate_system = 'global'
        print(
            '\nExtracted global features from input skeletons. In addition, the coordinate system has been set '
            'to global.')

    trajectories_coordinates = change_coordinate_system(
        trajectories_coordinates,
        video_resolution=video_resolution,
        coordinate_system=coordinate_system,
        invert=False)
    print('\nChanged coordinate system to %s.' % coordinate_system)

    trajectories_frames_train, trajectories_frames_val, trajectories_coordinates_train, trajectories_coordinates_val = \
        train_test_split_trajectories(trajectories_frames, trajectories_coordinates, train_ratio=0.8, seed=42)

    trajectories_coordinates_train, scaler = scale_trajectories(
        trajectories_coordinates_train, strategy=normalisation_strategy)
    trajectories_coordinates_val, _ = scale_trajectories(
        trajectories_coordinates_val,
        scaler=scaler,
        strategy=normalisation_strategy)
    print('\nNormalised input features using the %s normalisation strategy.' %
          normalisation_strategy)

    input_dim = extract_input_dim(trajectories_coordinates_train)
    ae_model = Autoencoder(input_dim=input_dim,
                           hidden_dims=hidden_dims,
                           optimiser=optimiser,
                           learning_rate=learning_rate,
                           loss=loss)

    log_dir = set_up_logging(camera_id=camera_id,
                             root_log_dir=root_log_dir,
                             resume_training=resume_training)
    last_epoch = resume_training_from_last_epoch(
        model=ae_model, resume_training=resume_training)

    _, X_train = collect_skeletons(trajectories_frames_train,
                                   trajectories_coordinates_train)
    _, X_val = collect_skeletons(trajectories_frames_val,
                                 trajectories_coordinates_val)

    X_train = shuffle(X_train, random_state=42)
    ae_model.train(X_train,
                   X_train,
                   epochs=epochs,
                   initial_epoch=last_epoch,
                   batch_size=batch_size,
                   val_data=(X_val, X_val),
                   log_dir=log_dir)

    print('\nAutoencoder model successfully trained.')
    if log_dir is not None:
        joblib.dump(scaler, filename=os.path.join(log_dir, 'scaler.pkl'))
        print('log files were written to: %s' % log_dir)

    return None
コード例 #3
0
def train_rnn_ae(args):
    # General
    trajectories_path = args.trajectories  # e.g. .../11
    camera_id = os.path.basename(args.trajectories_path)
    video_resolution = [
        float(measurement) for measurement in args.video_resolution.split('x')
    ]
    video_resolution = np.array(video_resolution, dtype=np.float32)
    # Architecture
    global_model = args.global_model
    input_length = args.input_length
    input_gap = args.input_gap
    pred_length = args.pred_length
    hidden_dims = args.hidden_dims
    cell_type = args.cell_type
    disable_reconstruction_branch = args.disable_reconstruction_branch
    reconstruct_reverse = args.reconstruct_reverse
    conditional_reconstruction = args.conditional_reconstruction
    conditional_prediction = args.conditional_prediction
    # Training
    optimiser = args.optimiser
    learning_rate = args.learning_rate
    loss = args.loss
    epochs = args.epochs
    batch_size = args.batch_size
    input_missing_steps = args.input_missing_steps
    coordinate_system = args.coordinate_system
    normalisation_strategy = args.normalisation_strategy
    # Logging
    root_log_dir = args.root_log_dir
    resume_training = args.resume_training

    # trajectories_coordinates is a dictionary where the keys uniquely identify each person in each video and the values
    # are float32 tensors. Each tensor represents the detected trajectory of a single person and has shape
    # (trajectory_length, input_dim). trajectory_length is the total number of frames for which the person was tracked
    # and each detection is composed of k key points (17 for now), each represented by a pair of (x, y) coordinates.
    _, trajectories_coordinates = load_trajectories(trajectories_path)
    print('\nLoaded %d trajectories.' % len(trajectories_coordinates))

    # Filter-out short trajectories
    trajectories_coordinates = remove_short_trajectories(
        trajectories_coordinates,
        input_length=input_length,
        input_gap=input_gap,
        pred_length=pred_length)
    print('\nRemoved short trajectories. Number of trajectories left: %d.' %
          len(trajectories_coordinates))

    # Global model (optional)
    if global_model:
        trajectories_coordinates = extract_global_features(
            trajectories_coordinates, video_resolution=video_resolution)
        coordinate_system = 'global'
        print(
            '\nExtracted global features from input trajectories. In addition, the coordinate system has been set '
            'to global.')

    # Change coordinate system
    trajectories_coordinates = change_coordinate_system(
        trajectories_coordinates,
        video_resolution=video_resolution,
        coordinate_system=coordinate_system,
        invert=False)
    print('\nChanged coordinate system to %s.' % coordinate_system)

    # Split into training and validation sets
    trajectories_coordinates_train, trajectories_coordinates_val = \
        train_test_split_through_time(trajectories_coordinates, input_length=input_length, pred_length=pred_length,
                                      train_ratio=0.8)

    # Input missing steps (optional)
    if input_missing_steps:
        trajectories_coordinates_train = input_trajectories_missing_steps(
            trajectories_coordinates_train)

    # Normalise the data
    trajectories_coordinates_train, scaler = scale_trajectories(
        trajectories_coordinates_train, strategy=normalisation_strategy)
    trajectories_coordinates_val, _ = scale_trajectories(
        trajectories_coordinates_val,
        scaler=scaler,
        strategy=normalisation_strategy)
    print('\nInput features normalised using the %s normalisation strategy.' %
          normalisation_strategy)

    print('\nInstantiating anomaly model ...')
    input_dim = extract_input_dim(trajectories_coordinates)
    anomaly_model = RNNEncoderDecoder(
        input_length=input_length,
        input_dim=input_dim,
        prediction_length=pred_length,
        hidden_dims=hidden_dims,
        cell_type=cell_type,
        reconstruction_branch=disable_reconstruction_branch,
        reconstruct_reverse=reconstruct_reverse,
        conditional_reconstruction=conditional_reconstruction,
        conditional_prediction=conditional_prediction,
        optimiser=optimiser,
        learning_rate=learning_rate,
        loss=loss)

    # Set up training logging (optional)
    log_dir = set_up_logging(camera_id=camera_id,
                             root_log_dir=root_log_dir,
                             resume_training=resume_training)

    # Resume training (optional)
    last_epoch = resume_training_from_last_epoch(
        model=anomaly_model, resume_training=resume_training)

    # Train the anomaly model
    if pred_length > 0:
        X_train, y_train = list(
            zip(*[
                collect_trajectories(trajectory_coordinates, input_length,
                                     input_gap, pred_length)
                for trajectory_coordinates in
                trajectories_coordinates_train.values()
            ]))
        X_train, y_train = np.vstack(X_train), np.vstack(y_train)
        print('\nTrain trajectories\' shape: (%d, %d, %d).' %
              (X_train.shape[0], X_train.shape[1], X_train.shape[2]))
        print('Train future trajectories\' shape: (%d, %d, %d).' %
              (y_train.shape[0], y_train.shape[1], y_train.shape[2]))

        X_val, y_val = list(
            zip(*[
                collect_trajectories(trajectory_coordinates, input_length,
                                     input_gap, pred_length)
                for trajectory_coordinates in
                trajectories_coordinates_val.values()
            ]))
        X_val, y_val = np.vstack(X_val), np.vstack(y_val)
        print('\nVal trajectories\' shape: (%d, %d, %d).' %
              (X_val.shape[0], X_val.shape[1], X_val.shape[2]))
        print('Val future trajectories\' shape: (%d, %d, %d).' %
              (y_val.shape[0], y_val.shape[1], y_val.shape[2]))

        X_train, y_train = shuffle(X_train, y_train, random_state=42)
        anomaly_model.train(X_train,
                            y_train,
                            epochs=epochs,
                            initial_epoch=last_epoch,
                            batch_size=batch_size,
                            val_data=(X_val, y_val),
                            log_dir=log_dir)
    else:
        X_train = [
            collect_trajectories(trajectory_coordinates, input_length,
                                 input_gap, pred_length) for
            trajectory_coordinates in trajectories_coordinates_train.values()
        ]
        X_train = np.vstack(X_train)
        print('\nTrain trajectories\' shape: (%d, %d, %d).' %
              (X_train.shape[0], X_train.shape[1], X_train.shape[2]))

        X_val = [
            collect_trajectories(trajectory_coordinates, input_length,
                                 input_gap, pred_length) for
            trajectory_coordinates in trajectories_coordinates_val.values()
        ]
        X_val = np.vstack(X_val)
        print('\nVal trajectories\' shape: (%d, %d, %d).' %
              (X_val.shape[0], X_val.shape[1], X_val.shape[2]))

        X_train = shuffle(X_train, random_state=42)
        anomaly_model.train(X_train,
                            epochs=epochs,
                            initial_epoch=last_epoch,
                            batch_size=batch_size,
                            val_data=(X_val, ),
                            log_dir=log_dir)

    print('\nRNN Autoencoder model successfully trained.')
    if log_dir is not None:
        joblib.dump(scaler, filename=os.path.join(log_dir, 'scaler.pkl'))
        print('log files were written to: %s' % log_dir)

    return None
コード例 #4
0
def eval_complete_rnn_ae_models(args):
    # Extract command line arguments
    video_resolution = [
        float(measurement) for measurement in args.video_resolution.split('x')
    ]
    video_resolution = np.array(video_resolution, dtype=np.float32)
    all_trajectories_path = args.trajectories
    all_pretrained_models_path = args.pretrained_models  # e.g. .../16_0_2_rrs_mse
    frame_level_anomaly_masks_path = args.frame_level_anomaly_masks
    overlapping_trajectories = args.overlapping_trajectories

    # Extract information about the models
    data_type = 'training' if 'training' in all_trajectories_path else 'testing'
    model_info = os.path.basename(all_pretrained_models_path)
    reconstruct_reverse = 'rrs' in model_info
    global_normalisation_strategy = 'three_stds' if 'G3stds' in model_info else 'zero_one'
    local_normalisation_strategy = 'three_stds' if 'L3stds' in model_info else 'zero_one'
    # input_missing = 'ims' in model_info
    # input_missing = False
    model_info = model_info.split('_')
    input_length, input_gap, pred_length = int(model_info[0]), int(
        model_info[1]), int(model_info[2])

    # A dictionary where the keys are the ids of the cameras and the values are the pre-trained models
    pretrained_models, global_scalers, local_scalers = \
        load_complete_rnn_ae_pretrained_models(all_pretrained_models_path)
    camera_ids = set(pretrained_models.keys())

    trajectories = {}
    for camera_id in camera_ids:
        trajectories_path = os.path.join(all_trajectories_path, camera_id)
        trajectories_frames, trajectories_coordinates = load_trajectories(
            trajectories_path)

        trajectories_coordinates = remove_short_trajectories(
            trajectories_coordinates,
            input_length=input_length,
            input_gap=input_gap,
            pred_length=pred_length)
        for trajectory_id in set(trajectories_frames.keys()):
            if trajectory_id not in trajectories_coordinates.keys():
                del trajectories_frames[trajectory_id]

        global_features = extract_global_features(
            trajectories_coordinates, video_resolution=video_resolution)
        global_features = change_coordinate_system(
            global_features,
            video_resolution=video_resolution,
            coordinate_system='global',
            invert=False)
        global_features, _ = scale_trajectories(
            global_features,
            scaler=global_scalers[camera_id],
            strategy=global_normalisation_strategy)

        local_features = deepcopy(trajectories_coordinates)
        local_features = change_coordinate_system(
            local_features,
            video_resolution=video_resolution,
            coordinate_system='bounding_box_centre',
            invert=False)
        local_features, _ = scale_trajectories(
            local_features,
            scaler=local_scalers[camera_id],
            strategy=local_normalisation_strategy)

        # out_features = trajectories_coordinates
        # out_features = change_coordinate_system(out_features, video_resolution=video_resolution,
        #                                         coordinate_system='global', invert=False)
        #
        # trajectories[camera_id] = [trajectories_frames, global_features, local_features, out_features]
        trajectories[camera_id] = [
            trajectories_frames, global_features, local_features
        ]

    all_anomaly_masks = []
    all_reconstruction_errors = []
    all_reconstructed_trajectories = {}
    all_original_lengths = {}
    aurocs, auprs = {}, {}
    worst_false_positives, worst_false_negatives = {}, {}
    for camera_id in sorted(camera_ids):
        anomaly_model = pretrained_models[camera_id]
        global_scaler = global_scalers[camera_id]
        local_scaler = local_scalers[camera_id]
        # trajectories_frames, global_features, local_features, out_features = trajectories[camera_id]
        trajectories_frames, global_features, local_features = trajectories[
            camera_id]
        original_lengths = {
            trajectory_id: len(trajectory_coordinates)
            for trajectory_id, trajectory_coordinates in
            local_features.items()
        }
        all_original_lengths[camera_id] = original_lengths

        test_frames, global_features_test = assemble_trajectories(
            trajectories_frames,
            global_features,
            overlapping=overlapping_trajectories,
            input_length=input_length,
            input_gap=input_gap,
            pred_length=pred_length)
        _, local_features_test = assemble_trajectories(
            trajectories_frames,
            local_features,
            overlapping=overlapping_trajectories,
            input_length=input_length,
            input_gap=input_gap,
            pred_length=pred_length)
        # _, out_features_test = assemble_trajectories(trajectories_frames, out_features,
        #                                              overlapping=overlapping_trajectories,
        #                                              input_length=input_length,
        #                                              input_gap=input_gap,
        #                                              pred_length=pred_length)
        features_test = concatenate_features(global_features_test,
                                             local_features_test)

        reconstructed_features = anomaly_model.reconstruct(
            global_features_test, local_features_test)
        if reconstruct_reverse:
            reconstructed_features = reverse_trajectories(
                reconstructed_features)
        # reconstruction_errors = compute_reconstruction_errors(out_features_test, reconstructed_features,
        #                                                       loss=anomaly_model.loss)
        reconstruction_errors = compute_reconstruction_errors(
            features_test, reconstructed_features, loss=anomaly_model.loss)
        reconstruction_errors = summarise_reconstruction_errors(
            reconstruction_errors, test_frames)
        reconstruction_errors = discard_errors_from_padded_frames(
            reconstruction_errors, original_lengths)

        reconstructed_features = inverse_scale_trajectories(
            reconstructed_features,
            global_scaler=global_scaler,
            local_scaler=local_scaler)
        # reconstructed_features = from_global_to_image(reconstructed_features, video_resolution=video_resolution)
        all_reconstructed_trajectories[camera_id] = [
            test_frames, reconstructed_features
        ]

        anomaly_masks = load_anomaly_masks(frame_level_anomaly_masks_path,
                                           camera_id=camera_id)
        y_true, y_hat, video_ids = ground_truth_and_reconstructions(
            anomaly_masks, reconstruction_errors, trajectories_frames)
        worst_false_positives[camera_id] = compute_worst_mistakes(
            y_true, y_hat, video_ids, type='false_positives', top=10)
        worst_false_negatives[camera_id] = compute_worst_mistakes(
            y_true, y_hat, video_ids, type='false_negatives', top=10)
        if data_type == 'training':
            # This hack is necessary because the training set has no anomalies
            y_true[0] = 1
        aurocs[camera_id], auprs[camera_id] = roc_auc_score(
            y_true, y_hat), average_precision_score(y_true, y_hat)
        all_anomaly_masks.append(y_true)
        all_reconstruction_errors.append(y_hat)

    # Dump the reconstruction errors for computation of the AUROC across all cameras
    reconstruction_errors_save_path = os.path.join(
        all_pretrained_models_path, data_type + '_reconstruction_errors')
    np.savez(reconstruction_errors_save_path, *all_reconstruction_errors)

    all_anomaly_masks = np.concatenate(all_anomaly_masks)
    anomaly_masks_save_path = os.path.join(all_pretrained_models_path,
                                           'anomaly_masks.npy')
    if data_type == 'testing':
        np.save(file=anomaly_masks_save_path[:-4], arr=all_anomaly_masks)

    for camera_id in sorted(aurocs.keys()):
        print('\nAUROC for camera %s: %.4f' % (camera_id, aurocs[camera_id]))
        print('AUPR for camera %s: %.4f' % (camera_id, auprs[camera_id]))

    # Dump the worst mistakes in a .txt file
    if data_type == 'testing':
        write_all_worst_mistakes(all_pretrained_models_path,
                                 worst_false_positives, worst_false_negatives)
        print('All mistakes were written to %s.' %
              os.path.join(all_pretrained_models_path, 'mistakes.txt'))

    if args.write_reconstructions is not None:
        all_image_trajectories = local_to_global_coordinates(
            all_reconstructed_trajectories, video_resolution=video_resolution)
        # all_image_trajectories = deepcopy(all_reconstructed_trajectories)
        all_image_trajectories = uniquify_reconstructions(
            all_image_trajectories)
        all_image_trajectories = discard_steps_from_padded_frames(
            all_image_trajectories, all_original_lengths)
        write_all_reconstructed_trajectories(
            all_image_trajectories, write_path=args.write_reconstructions)

        print('All reconstructed trajectories were written to %s.' %
              args.write_reconstructions)

    if args.write_bounding_boxes is not None:
        all_reconstructed_trajectories = uniquify_reconstructions(
            all_reconstructed_trajectories)
        all_reconstructed_trajectories = discard_steps_from_padded_frames(
            all_reconstructed_trajectories, all_original_lengths)
        all_reconstructed_trajectories = pull_global_features(
            all_reconstructed_trajectories)
        all_reconstructed_trajectories = from_global_to_image_all_cameras(
            all_reconstructed_trajectories, video_resolution=video_resolution)
        all_bounding_boxes = compute_bounding_boxes_from_global_features(
            all_reconstructed_trajectories)
        # all_bounding_boxes = compute_bounding_boxes_from_image_features(all_reconstructed_trajectories,
        #                                                                 video_resolution=video_resolution)
        write_all_reconstructed_trajectories(
            all_bounding_boxes, write_path=args.write_bounding_boxes)

        print('All reconstructed bounding boxes were written to %s.' %
              args.write_bounding_boxes)

    return None
コード例 #5
0
def eval_ae_models(args):
    # General
    video_resolution = [
        float(measurement) for measurement in args.video_resolution.split('x')
    ]
    video_resolution = np.array(video_resolution, dtype=np.float32)
    all_trajectories_path = args.trajectories
    all_pretrained_models_path = args.pretrained_models  # e.g. .../adam_bb-tl_mse
    frame_level_anomaly_masks_path = args.frame_level_anomaly_masks

    # Extract information about the models
    data_type = 'training' if 'training' in all_trajectories_path else 'testing'
    model_info = os.path.basename(all_pretrained_models_path)
    if 'bb-tl' in model_info:
        coordinate_system = 'bounding_box_top_left'
    elif 'bb-c' in model_info:
        coordinate_system = 'bounding_box_centre'
    else:
        coordinate_system = 'global'
    normalisation_strategy = 'three_stds' if '3stds' in model_info else 'zero_one'
    global_model = 'gm' in model_info

    pretrained_models, scalers = load_ae_pretrained_models(
        all_pretrained_models_path)
    camera_ids = set(pretrained_models.keys())

    skeletons = {}
    for camera_id in camera_ids:
        trajectories_path = os.path.join(all_trajectories_path, camera_id)
        trajectories_frames, trajectories_coordinates = load_trajectories(
            trajectories_path)
        if global_model:
            trajectories_coordinates = extract_global_features(
                trajectories_coordinates, video_resolution=video_resolution)
            coordinate_system = 'global'
        trajectories_coordinates = change_coordinate_system(
            trajectories_coordinates,
            video_resolution=video_resolution,
            coordinate_system=coordinate_system,
            invert=False)
        trajectories_coordinates, _ = scale_trajectories(
            trajectories_coordinates,
            scaler=scalers[camera_id],
            strategy=normalisation_strategy)
        trajectories_frames, trajectories_coordinates = remove_missing_skeletons(
            trajectories_frames, trajectories_coordinates)
        skeletons[camera_id] = [trajectories_frames, trajectories_coordinates]

    all_anomaly_masks, all_reconstruction_errors = [], []
    aurocs, auprs = {}, {}
    for camera_id in sorted(camera_ids):
        anomaly_model = pretrained_models[camera_id]
        trajectories_frames, trajectories_coordinates = skeletons[camera_id]

        trajectories_coordinates_reconstructed = reconstruct_skeletons(
            anomaly_model, trajectories_coordinates)
        reconstruction_errors = compute_ae_reconstruction_errors(
            trajectories_coordinates,
            trajectories_coordinates_reconstructed,
            loss=anomaly_model.loss)

        anomaly_masks = load_anomaly_masks(frame_level_anomaly_masks_path,
                                           camera_id=camera_id)
        y_true, y_hat = ground_truth_and_reconstructions(
            anomaly_masks, reconstruction_errors, trajectories_frames)
        if data_type == 'training':
            # This hack is necessary because the training set has no anomalies
            y_true[0] = 1
        aurocs[camera_id], auprs[camera_id] = roc_auc_score(
            y_true, y_hat), average_precision_score(y_true, y_hat)
        all_anomaly_masks.append(y_true)
        all_reconstruction_errors.append(y_hat)

    # Dump the reconstruction errors for computation of the AUROC/AUPR across all cameras
    reconstruction_errors_save_path = os.path.join(
        all_pretrained_models_path, data_type + '_reconstruction_errors')
    np.savez(reconstruction_errors_save_path, *all_reconstruction_errors)

    all_anomaly_masks = np.concatenate(all_anomaly_masks)
    anomaly_masks_save_path = os.path.join(all_pretrained_models_path,
                                           'anomaly_masks.npy')
    if not os.path.exists(anomaly_masks_save_path) and data_type == 'testing':
        np.save(file=anomaly_masks_save_path[:-4], arr=all_anomaly_masks)

    for camera_id in sorted(aurocs.keys()):
        print('\nAUROC for camera %s: %.4f' % (camera_id, aurocs[camera_id]))
        print('AUPR for camera %s: %.4f' % (camera_id, auprs[camera_id]))

    return None
コード例 #6
0
def eval_rnn_ae_models(args):
    # General
    video_resolution = [
        float(measurement) for measurement in args.video_resolution.split('x')
    ]
    video_resolution = np.array(video_resolution, dtype=np.float32)
    all_trajectories_path = args.trajectories
    all_pretrained_models_path = args.pretrained_models  # e.g. .../16_0_2_rrs_bb-c_3stds_mse
    frame_level_anomaly_masks_path = args.frame_level_anomaly_masks

    # Extract information about the models
    data_type = 'training' if 'training' in all_trajectories_path else 'testing'
    model_info = os.path.basename(all_pretrained_models_path)
    global_model = 'gm' in model_info
    reconstruct_reverse = 'rrs' in model_info
    input_missing = 'ims' in model_info
    # input_missing = False
    if 'bb-tl' in model_info:
        coordinate_system = 'bounding_box_top_left'
    elif 'bb-c' in model_info:
        coordinate_system = 'bounding_box_centre'
    else:
        coordinate_system = 'global'
    normalisation_strategy = 'three_stds' if '3stds' in model_info else 'zero_one'
    model_info = model_info.split('_')
    input_length, input_gap, pred_length = int(model_info[0]), int(
        model_info[1]), int(model_info[2])

    # A dictionary where the keys are the ids of the cameras and the values are the pre-trained models
    camera_ids = set()
    pretrained_models = {}
    scalers = {}
    for pretrained_model_name in os.listdir(all_pretrained_models_path):
        if pretrained_model_name.endswith(
                '.npy') or pretrained_model_name.endswith('.npz'):
            continue
        camera_id = pretrained_model_name.split('_')[0]
        camera_ids.add(camera_id)
        pretrained_model_path = os.path.join(all_pretrained_models_path,
                                             pretrained_model_name)
        pretrained_models[camera_id], scalers[
            camera_id] = load_pretrained_rnn_ae(pretrained_model_path)

    # A dictionary where the keys are the ids of the cameras and the values are lists containing the trajectory's
    # frames and trajectory's coordinates
    trajectories = {}
    for camera_id in camera_ids:
        trajectories_path = os.path.join(all_trajectories_path, camera_id)
        trajectories_frames, trajectories_coordinates = load_trajectories(
            trajectories_path)

        # Remove short trajectories
        trajectories_coordinates = remove_short_trajectories(
            trajectories_coordinates,
            input_length=input_length,
            input_gap=input_gap,
            pred_length=pred_length)
        for trajectory_id in set(trajectories_frames.keys()):
            if trajectory_id not in trajectories_coordinates.keys():
                del trajectories_frames[trajectory_id]

        # Input missing steps (optional)
        if input_missing:
            trajectories_coordinates = input_trajectories_missing_steps(
                trajectories_coordinates)

        if global_model:
            trajectories_coordinates = extract_global_features(
                trajectories_coordinates, video_resolution=video_resolution)
            coordinate_system = 'global'

        trajectories_coordinates = change_coordinate_system(
            trajectories_coordinates,
            video_resolution=video_resolution,
            coordinate_system=coordinate_system,
            invert=False)

        trajectories_coordinates, _ = scale_trajectories(
            trajectories_coordinates,
            scaler=scalers[camera_id],
            strategy=normalisation_strategy)

        trajectories[camera_id] = [
            trajectories_frames, trajectories_coordinates
        ]

    all_anomaly_masks = []
    all_reconstruction_errors = []
    all_reconstructed_trajectories = {}
    all_original_lengths = {}
    aurocs, auprs = {}, {}
    for camera_id in sorted(camera_ids):
        anomaly_model = pretrained_models[camera_id]
        scaler = scalers[camera_id]
        trajectories_frames, trajectories_coordinates = trajectories[camera_id]
        original_lengths = {
            trajectory_id: len(trajectory_coordinates)
            for trajectory_id, trajectory_coordinates in
            trajectories_coordinates.items()
        }
        all_original_lengths[camera_id] = original_lengths

        test_frames, test_coordinates = assemble_trajectories(
            trajectories_frames,
            trajectories_coordinates,
            overlapping=args.overlapping_trajectories,
            input_length=input_length,
            input_gap=input_gap,
            pred_length=pred_length)
        reconstructed_coordinates = reconstruct_trajectories(
            anomaly_model, test_coordinates)
        if reconstruct_reverse:
            reconstructed_coordinates = reverse_trajectories(
                reconstructed_coordinates)

        reconstruction_errors = compute_reconstruction_errors(
            test_coordinates,
            reconstructed_coordinates,
            loss=anomaly_model.loss)
        reconstruction_errors = summarise_reconstruction_errors(
            reconstruction_errors, test_frames)
        reconstruction_errors = discard_errors_from_padded_frames(
            reconstruction_errors, original_lengths)

        reconstructed_coordinates = inverse_single_scale_trajectories(
            reconstructed_coordinates, scaler=scaler)
        all_reconstructed_trajectories[camera_id] = [
            test_frames, reconstructed_coordinates
        ]

        anomaly_masks = load_anomaly_masks(frame_level_anomaly_masks_path,
                                           camera_id=camera_id)
        y_true, y_hat, _ = ground_truth_and_reconstructions(
            anomaly_masks, reconstruction_errors, trajectories_frames)
        if data_type == 'training':
            # This hack is necessary because the training set has no anomalies
            y_true[0] = 1
        aurocs[camera_id], auprs[camera_id] = roc_auc_score(
            y_true, y_hat), average_precision_score(y_true, y_hat)
        all_anomaly_masks.append(y_true)
        all_reconstruction_errors.append(y_hat)

    # Dump the reconstruction errors for computation of the AUROC across all cameras
    reconstruction_errors_save_path = os.path.join(
        all_pretrained_models_path, data_type + '_reconstruction_errors')
    np.savez(reconstruction_errors_save_path, *all_reconstruction_errors)

    all_anomaly_masks = np.concatenate(all_anomaly_masks)
    anomaly_masks_save_path = os.path.join(all_pretrained_models_path,
                                           'anomaly_masks.npy')
    if not os.path.exists(anomaly_masks_save_path) and data_type == 'testing':
        np.save(file=anomaly_masks_save_path[:-4], arr=all_anomaly_masks)

    for camera_id in sorted(aurocs.keys()):
        print('\nAUROC for camera %s: %.4f' % (camera_id, aurocs[camera_id]))
        print('AUPR for camera %s: %.4f' % (camera_id, auprs[camera_id]))

    if args.write_reconstructions is not None and not global_model:
        all_reconstructed_trajectories = uniquify_reconstructions(
            all_reconstructed_trajectories)
        all_reconstructed_trajectories = discard_steps_from_padded_frames(
            all_reconstructed_trajectories, all_original_lengths)
        all_reconstructed_trajectories = from_global_to_image_all_cameras(
            all_reconstructed_trajectories, video_resolution=video_resolution)
        write_all_reconstructed_trajectories(
            all_reconstructed_trajectories,
            write_path=args.write_reconstructions)
        print('All reconstructed trajectories were written to %s.' %
              args.write_reconstructions)

    return None