Exemplo n.º 1
0
def get_traces_for_train_and_test():
    # Fixing random state for reproducibility
    np.random.seed(19680801)

    videos = get_video_ids(OUTPUT_FOLDER)
    all_users = get_user_ids(OUTPUT_FOLDER)
    users = np.random.choice(all_users, NUM_USERS_EXPERIMENT, replace=False)

    traces_index = []
    traces = []
    count_traces = 0
    for video in videos:
        for user in users:
            traces.append({'video': video, 'user': user})
            traces_index.append(count_traces)
            count_traces += 1
    traces_index = np.array(traces_index)
    np.random.shuffle(traces_index)
    num_train_traces = int(len(traces_index) * PROPORTION_TRAIN_SET)

    train_traces_ids = traces_index[:num_train_traces]
    test_traces_ids = traces_index[num_train_traces:]

    train_traces = []
    for trace_id in train_traces_ids:
        train_traces.append(traces[trace_id])
    test_traces = []
    for trace_id in test_traces_ids:
        test_traces.append(traces[trace_id])
    return train_traces, test_traces
Exemplo n.º 2
0
def split_in_train_and_test():
    from SampledDataset import get_video_ids, get_user_ids, get_users_per_video
    # Fixing random state for reproducibility
    np.random.seed(19680801)

    videos = get_video_ids(OUTPUT_FOLDER)
    videos_train = np.random.choice(videos, 5, replace=False)
    videos_test = np.setdiff1d(videos, videos_train)
    print(videos_train)
    print(videos_test)
Exemplo n.º 3
0
def create_and_store_true_saliency(sampled_dataset):
    if not os.path.exists(OUTPUT_TRUE_SALIENCY_FOLDER):
        os.makedirs(OUTPUT_TRUE_SALIENCY_FOLDER)

    # Returns an array of size (NUM_TILES_HEIGHT_TRUE_SAL, NUM_TILES_WIDTH_TRUE_SAL) with values between 0 and 1 specifying the probability that a tile is watched by the user
    # We built this function to ensure the model and the groundtruth tile-probabilities are built with the same (or similar) function
    def from_position_to_tile_probability_cartesian(pos):
        yaw_grid, pitch_grid = np.meshgrid(
            np.linspace(0, 1, NUM_TILES_WIDTH_TRUE_SAL, endpoint=False),
            np.linspace(0, 1, NUM_TILES_HEIGHT_TRUE_SAL, endpoint=False))
        yaw_grid += 1.0 / (2.0 * NUM_TILES_WIDTH_TRUE_SAL)
        pitch_grid += 1.0 / (2.0 * NUM_TILES_HEIGHT_TRUE_SAL)
        yaw_grid = yaw_grid * 2 * np.pi
        pitch_grid = pitch_grid * np.pi
        x_grid, y_grid, z_grid = eulerian_to_cartesian(theta=yaw_grid,
                                                       phi=pitch_grid)
        great_circle_distance = np.arccos(
            np.maximum(
                np.minimum(x_grid * pos[0] + y_grid * pos[1] + z_grid * pos[2],
                           1.0), -1.0))
        gaussian_orth = np.exp(
            (-1.0 / (2.0 * np.square(0.1))) * np.square(great_circle_distance))
        return gaussian_orth

    videos = get_video_ids(OUTPUT_FOLDER)
    users_per_video = get_users_per_video(OUTPUT_FOLDER)

    for enum_video, video in enumerate(videos):
        print('creating true saliency for video', video, '-', enum_video, '/',
              len(videos))
        real_saliency_for_video = []

        max_num_samples = get_max_num_samples_for_video(
            video, sampled_dataset, users_per_video[video])

        for x_i in range(max_num_samples):
            tileprobs_for_video_cartesian = []
            for user in users_per_video[video]:
                if len(sampled_dataset[user][video]) > x_i:
                    tileprobs_cartesian = from_position_to_tile_probability_cartesian(
                        sampled_dataset[user][video][x_i, 1:])
                    tileprobs_for_video_cartesian.append(tileprobs_cartesian)
            tileprobs_for_video_cartesian = np.array(
                tileprobs_for_video_cartesian)
            real_saliency_cartesian = np.sum(
                tileprobs_for_video_cartesian,
                axis=0) / tileprobs_for_video_cartesian.shape[0]
            real_saliency_for_video.append(real_saliency_cartesian)
        real_saliency_for_video = np.array(real_saliency_for_video)

        true_sal_out_file = os.path.join(OUTPUT_TRUE_SALIENCY_FOLDER, video)
        np.save(true_sal_out_file, real_saliency_for_video)
Exemplo n.º 4
0
        RESULTS_FOLDER = os.path.join(root_dataset_folder, 'MM18/Results_Seq2One_2DNormalized_TrueSal' + EXP_NAME)
        MODELS_FOLDER = os.path.join(root_dataset_folder, 'MM18/Models_Seq2One_2DNormalized_TrueSal' + EXP_NAME)

PERC_VIDEOS_TRAIN = 0.8
PERC_USERS_TRAIN = 0.5

BATCH_SIZE = 128.0

TRAIN_MODEL = False
EVALUATE_MODEL = False
if args.train_flag:
    TRAIN_MODEL = True
if args.evaluate_flag:
    EVALUATE_MODEL = True

videos = get_video_ids(SAMPLED_DATASET_FOLDER)
users = get_user_ids(SAMPLED_DATASET_FOLDER)
users_per_video = get_users_per_video(SAMPLED_DATASET_FOLDER)

if args.provided_videos:
    if dataset_name == 'Xu_CVPR_18':
        videos_train, videos_test = get_videos_train_and_test_from_file(root_dataset_folder)
        partition = partition_in_train_and_test_without_video_intersection(SAMPLED_DATASET_FOLDER, INIT_WINDOW, END_WINDOW, videos_train, videos_test, users_per_video)
    if dataset_name == 'Xu_PAMI_18':
        # From PAMI_18 paper:
        # For evaluating the performance of offline-DHP, we randomly divided all 76 panoramic sequences of our PVS-HM database into a training set (61 sequences) and a test set (15 sequences).
        # For evaluating the performance of online-DHP [...]. Since the DRL network of offline-DHP was learned over 61 training sequences and used as the initial model of online-DHP, our comparison was conducted on all 15 test sequences of our PVS-HM database.
        videos_test = ['KingKong', 'SpaceWar2', 'StarryPolar', 'Dancing', 'Guitar', 'BTSRun', 'InsideCar', 'RioOlympics', 'SpaceWar', 'CMLauncher2', 'Waterfall', 'Sunset', 'BlueWorld', 'Symphony', 'WaitingForLove']
        videos_train = ['A380', 'AcerEngine', 'AcerPredator', 'AirShow', 'BFG', 'Bicycle', 'Camping', 'CandyCarnival', 'Castle', 'Catwalks', 'CMLauncher', 'CS', 'DanceInTurn', 'DrivingInAlps', 'Egypt', 'F5Fighter', 'Flight', 'GalaxyOnFire', 'Graffiti', 'GTA', 'HondaF1', 'IRobot', 'KasabianLive', 'Lion', 'LoopUniverse', 'Manhattan', 'MC', 'MercedesBenz', 'Motorbike', 'Murder', 'NotBeAloneTonight', 'Orion', 'Parachuting', 'Parasailing', 'Pearl', 'Predator', 'ProjectSoul', 'Rally', 'RingMan', 'Roma', 'Shark', 'Skiing', 'Snowfield', 'SnowRopeway', 'Square', 'StarWars', 'StarWars2', 'Stratosphere', 'StreetFighter', 'Supercar', 'SuperMario64', 'Surfing', 'SurfingArctic', 'TalkingInCar', 'Terminator', 'TheInvisible', 'Village', 'VRBasketball', 'Waterskiing', 'WesternSichuan', 'Yacht']
        partition = partition_in_train_and_test_without_video_intersection(SAMPLED_DATASET_FOLDER, INIT_WINDOW, END_WINDOW, videos_train, videos_test, users_per_video)
    if dataset_name == 'Fan_NOSSDAV_17':