예제 #1
0
def predict_most_salient_cb_point(most_salient_points, current_point):
    pred_window_predicted_closest_sal_point = []
    for id, most_salient_points_per_fut_frame in enumerate(most_salient_points):
        distances = np.array([compute_orthodromic_distance(current_point, most_sal_pt) for most_sal_pt in most_salient_points_per_fut_frame])
        closest_sal_point = np.argmin(distances)
        predicted_closest_sal_point = most_salient_points_per_fut_frame[closest_sal_point]
        pred_window_predicted_closest_sal_point.append(predicted_closest_sal_point)
    return pred_window_predicted_closest_sal_point
예제 #2
0
def compute_no_motion_baseline_error_xyz(dataset, videos_list, history_window, prediction_horizon):
    intersection_angle_error = []
    for enum_user, user in enumerate(dataset.keys()):
        for enum_video, video in enumerate(dataset[user].keys()):
            if video in videos_list:
                print('computing error for trace', 'user', enum_user, '/', len(dataset.keys()), 'video', enum_video, '/', len(dataset[user].keys()))
                xyz_per_video = dataset[user][video]
                for t in range(history_window, len(xyz_per_video)-prediction_horizon):
                    sample_t = xyz_per_video[t, 1:]
                    for x_i in range(prediction_horizon):
                        sample_t_n = xyz_per_video[t+x_i+1, 1:]
                        int_ang_err = compute_orthodromic_distance(sample_t, sample_t_n)
                        intersection_angle_error.append(radian_to_degrees(int_ang_err))
    return intersection_angle_error
예제 #3
0
def compute_pretrained_model_error_xyz(dataset, videos_list, model, model_name, history_window, prediction_horizon):
    intersection_angle_error = []
    for enum_user, user in enumerate(dataset.keys()):
        for enum_video, video in enumerate(dataset[user].keys()):
            if video in videos_list:
                print('computing error for trace', 'user', enum_user, '/', len(dataset.keys()), 'video', enum_video, '/', len(dataset[user].keys()))
                xyz_per_video = dataset[user][video]
                for t in range(history_window, len(xyz_per_video)-prediction_horizon):
                    encoder_pos_inputs_for_batch = [xyz_per_video[t-history_window:t, 1:]]
                    decoder_pos_inputs_for_batch = [xyz_per_video[t:t+1, 1:]]
                    prediction = model.predict([transform_batches_cartesian_to_normalized_eulerian(encoder_pos_inputs_for_batch), transform_batches_cartesian_to_normalized_eulerian(decoder_pos_inputs_for_batch)])
                    for x_i in range(prediction_horizon):
                        pred_t_n = normalized_eulerian_to_cartesian(prediction[0, x_i, 0], prediction[0, x_i, 1])
                        sample_t_n = xyz_per_video[t+x_i+1, 1:]
                        int_ang_err = compute_orthodromic_distance(pred_t_n, sample_t_n)
                        intersection_angle_error.append(radian_to_degrees(int_ang_err))
    return intersection_angle_error
예제 #4
0
def compute_no_motion_baseline_error_xyz(dataset, videos_list, users_list, history_window, prediction_horizon):
    errors_per_video = {}
    for enum_user, user in enumerate(dataset.keys()):
        if user in users_list:
            for enum_video, video in enumerate(dataset[user].keys()):
                if video in videos_list:
                    if video not in errors_per_video.keys():
                        errors_per_video[video] = {}
                    print('computing error for trace', 'user', enum_user, '/', len(dataset.keys()), 'video', enum_video, '/', len(dataset[user].keys()))
                    xyz_per_video = dataset[user][video]
                    for t in range(history_window, len(xyz_per_video)-prediction_horizon):
                        sample_t = xyz_per_video[t, 1:]
                        for x_i in range(prediction_horizon):
                            if x_i not in errors_per_video[video].keys():
                                errors_per_video[video][x_i] = []
                            sample_t_n = xyz_per_video[t+x_i+1, 1:]
                            errors_per_video[video][x_i].append(compute_orthodromic_distance(sample_t, sample_t_n))
    return errors_per_video
예제 #5
0
def most_salient_point_baseline(dataset):
    most_salient_points_per_video = get_most_salient_points_per_video()
    error_per_time_step = {}
    for enum_user, user in enumerate(dataset.keys()):
        for enum_video, video in enumerate(dataset[user].keys()):
            print('computing error for user', enum_user, '/',
                  len(dataset.keys()), 'video', enum_video, '/',
                  len(dataset[user].keys()))
            trace = dataset[user][video]
            for x_i in range(5, 75):
                model_prediction = predict_most_salient_point(
                    most_salient_points_per_video[video][x_i + 1:x_i + 25 + 1],
                    trace[x_i, 1:])
                for t in range(25):
                    if t not in error_per_time_step.keys():
                        error_per_time_step[t] = []
                    error_per_time_step[t].append(
                        compute_orthodromic_distance(trace[x_i + t + 1, 1:],
                                                     model_prediction[t]))
    for t in range(25):
        print(t * 0.2, np.mean(error_per_time_step[t]))
예제 #6
0
def compute_pretrained_model_error_xyz(dataset, videos_list, model_name, history_window, prediction_horizon, model_weights_path):
    if model_name == 'TRACK':
        model = create_TRACK_model(history_window, TRAINED_PREDICTION_HORIZON, NUM_TILES_HEIGHT, NUM_TILES_WIDTH)
    elif model_name == 'CVPR18':
        model = create_CVPR18_model(history_window, TRAINED_PREDICTION_HORIZON, NUM_TILES_HEIGHT, NUM_TILES_WIDTH)
    elif model_name == 'CVPR18_orig':
        model = create_CVPR18_orig_Model(history_window, NUM_TILES_HEIGHT_TRUE_SAL, NUM_TILES_WIDTH_TRUE_SAL)
    elif model_name == 'pos_only':
        model = create_pos_only_model(history_window, TRAINED_PREDICTION_HORIZON)

    ###
    if os.path.isfile(model_weights_path):
        model.load_weights(model_weights_path)
    else:
        command = 'python training_procedure.py -train -gpu_id 0 -dataset_name Xu_CVPR_18 -model_name %s -m_window 5 -h_window 5 -exp_folder sampled_dataset_replica -provided_videos' % model_name
        if model_name not in ['no_motion', 'pos_only', 'TRACK']:
            command += ' -use_true_saliency'

        raise Exception('Sorry, the folder ./Xu_CVPR_18/'+model_name+'/ doesn\'t exist or is incomplete.\nYou can:\n* Create it using the command:\n\t\"'+command+'\" or \n* Download the files from:\n\thttps://unice-my.sharepoint.com/:f:/g/personal/miguel_romero-rondon_unice_fr/EjhbHp5qgDRKrtkqODKayq0BoCqUY76cmm8bDwdbMOTqeQ?e=fGRFjo')
    ###

    saliency_folder = os.path.join(ROOT_FOLDER, 'extract_saliency/saliency')
    true_saliency_folder = os.path.join(ROOT_FOLDER, 'true_saliency')

    if model_name not in ['pos_only']:
        all_saliencies = {}
        for video in videos_list:
            # for model CVPR18_orig we use the true saliency:
            if model_name == 'CVPR18_orig':
                if os.path.isdir(true_saliency_folder):
                    all_saliencies[video] = load_true_saliency(true_saliency_folder, video)
                else:
                    raise Exception('Sorry, the folder ./Xu_CVPR_18/true_saliency doesn\'t exist or is incomplete.\nYou can:\n* Create it using the command:\n\t\"python ./Xu_CVPR_18/Read_Dataset.py -creat_true_sal\" or \n* Download the folder from:\n\thttps://unice-my.sharepoint.com/:f:/g/personal/miguel_romero-rondon_unice_fr/EsOFppF2mSRBtCtlmUM0TV4BGFRb1plZWgtUxSEo_E-I7w?e=pKXxCf')
            else:
                if os.path.isdir(saliency_folder):
                    all_saliencies[video] = load_saliency(saliency_folder, video)
                else:
                    raise Exception('Sorry, the folder ./Xu_CVPR_18/extract_saliency doesn\'t exist or is incomplete.\nYou can:\n* Create it using the command:\n\t\"./Xu_CVPR_18/dataset/creation_of_scaled_images.sh\n\tpython ./Extract_Saliency/panosalnet.py -dataset_name CVPR_18\" or \n* Download the folder from:\n\thttps://unice-my.sharepoint.com/:f:/g/personal/miguel_romero-rondon_unice_fr/EvRCuy0v5BpDmADTPUuA8JgBoIgaWcFbR0S7wIXlevIIGQ?e=goOz7o')

    intersection_angle_error = []
    for enum_user, user in enumerate(dataset.keys()):
        for enum_video, video in enumerate(dataset[user].keys()):
            if video in videos_list:
                print('computing error for trace', 'user', enum_user, '/', len(dataset.keys()), 'video', enum_video, '/', len(dataset[user].keys()))
                xyz_per_video = dataset[user][video]
                for t in range(history_window, len(xyz_per_video)-prediction_horizon):
                    if model_name not in ['pos_only', 'no_motion']:
                        encoder_sal_inputs_for_sample = np.array([np.expand_dims(all_saliencies[video][t - history_window + 1:t + 1], axis=-1)])
                        # ToDo: Be careful here, we are using TRAINED_PREDICTION_HORIZON to load future saliencies
                        if model_name == 'CVPR18_orig':
                            decoder_sal_inputs_for_sample = np.zeros((1, TRAINED_PREDICTION_HORIZON, NUM_TILES_HEIGHT_TRUE_SAL, NUM_TILES_WIDTH_TRUE_SAL, 1))
                        else:
                            decoder_sal_inputs_for_sample = np.zeros((1, TRAINED_PREDICTION_HORIZON, NUM_TILES_HEIGHT, NUM_TILES_WIDTH, 1))
                        taken_saliencies = all_saliencies[video][t + 1:min(t + TRAINED_PREDICTION_HORIZON + 1, len(all_saliencies[video]))]
                        # decoder_sal_inputs_for_sample = np.array([np.expand_dims(taken_saliencies, axis=-1)])
                        decoder_sal_inputs_for_sample[0, :len(taken_saliencies), :, :, 0] = taken_saliencies
                    encoder_pos_inputs_for_sample = [xyz_per_video[t-history_window:t, 1:]]
                    decoder_pos_inputs_for_sample = [xyz_per_video[t:t+1, 1:]]

                    if model_name == 'TRACK':
                        model_prediction = model.predict(
                            [np.array(encoder_pos_inputs_for_sample), np.array(encoder_sal_inputs_for_sample),
                             np.array(decoder_pos_inputs_for_sample), np.array(decoder_sal_inputs_for_sample)])[0]
                    elif model_name == 'CVPR18':
                        model_prediction = model.predict(
                            [np.array(encoder_pos_inputs_for_sample), np.array(decoder_pos_inputs_for_sample),
                             np.array(decoder_sal_inputs_for_sample)])[0]
                    elif model_name == 'CVPR18_orig':
                        initial_pos_inputs = transform_batches_cartesian_to_normalized_eulerian(encoder_pos_inputs_for_sample)
                        model_pred = auto_regressive_prediction(model, initial_pos_inputs, decoder_sal_inputs_for_sample, history_window, prediction_horizon)
                        model_prediction = transform_normalized_eulerian_to_cartesian(model_pred)
                    elif model_name == 'pos_only':
                        model_pred = model.predict(
                            [transform_batches_cartesian_to_normalized_eulerian(encoder_pos_inputs_for_sample),
                             transform_batches_cartesian_to_normalized_eulerian(decoder_pos_inputs_for_sample)])[0]
                        model_prediction = transform_normalized_eulerian_to_cartesian(model_pred)

                    for x_i in range(prediction_horizon):
                        pred_t_n = model_prediction[x_i]
                        sample_t_n = xyz_per_video[t+x_i+1, 1:]
                        int_ang_err = compute_orthodromic_distance(pred_t_n, sample_t_n)
                        intersection_angle_error.append(radian_to_degrees(int_ang_err))
    return intersection_angle_error