pygame.init()
    pygame.display.set_mode((w, h), pygame.constants.RESIZABLE | pygame.constants.DOUBLEBUF | pygame.constants.RLEACCEL, 32)
    screen = pygame.display.get_surface()
    print ("Reading gaze data...")
    dw = drawgc_wrapper()
    ds.target_fps = 30
    ds.total_frame = len(truth_files)

    last_time = time.time()
    clock = pygame.time.Clock()
    
    # Whether to convert npz saliency maps first
    if convert_npz:
        print('Converting npz to jpgs..')
        # Check if target dir exits
        check_dirs.check_dir(dir_save_png_map)   
        # npz => jpg Conversion
        temp_id = ds.cur_frame_id - 1       
        while temp_id < ds.total_frame:
            print('converting frame:', truth_files[temp_id-1])
            m = cm.ScalarMappable(cmap='jet')
            npz_img = np.load(dir_load_npz_saliency_map + key + 
                          truth_files[temp_id-1].strip('frame').strip('.jpg') + '.npz')[key]
            rgb_img = m.to_rgba(npz_img)[:,:,:3]
            plt.imsave(dir_save_png_map + truth_files[temp_id-1], rgb_img)   # Save jpg
            temp_id += 1
        
    # Load saliency maps and play  
    print('\n loading ground truth and predicted saliency maps..')      
    while ds.cur_frame_id-1 < ds.total_frame:
        print('frame:', truth_files[ds.cur_frame_id-1])            
예제 #2
0
import os
import config
import shutil
import check_dirs
import scipy.io as sio
import numpy as np

import read_frameInfo

matfn = config.groundtruth_file
data = sio.loadmat(matfn)
fix_index = data['fixation_frames']
fix_index = fix_index.astype(np.int32)
subject = 1  # Can be 1,2,3

check_dirs.check_dir(config.dir_to_save_frames_in_use)
frame_sets = [
    x for x in os.listdir(config.dir_to_load_frames) if x.endswith('.jpg')
]  # Load frames
print('Number of frames:', len(frame_sets))

# Import frame idx
subFrames, _, _ = read_frameInfo.frameInfo('./frameInfo.mat', subject=subject)

for frame in frame_sets:
    index = int(frame.strip('frame').strip('.jpg'))  # Get loaded frame index
    # Range of frames in use
    if index in subFrames and fix_index[index] == 1:
        shutil.copy(config.dir_to_load_frames + frame,
                    config.dir_to_save_frames_in_use)
        try:
예제 #3
0
"""
Created on Fri Jun 15 17:41:27 2018

@author: david
"""
'''To use ffmpeg: http://zulko.github.io/blog/2013/09/27/read-and-write-video-frames-in-python-using-ffmpeg/'''

#%%

import cv2
import check_dirs
print('OpenCV version: ', cv2.__version__)

dir_to_load_video = './'
dir_to_save_frames = './frames/'
check_dirs.check_dir(dir_to_save_frames)
pixel = {'x': 1024, 'y': 576}   # Target size

cap = cv2.VideoCapture(dir_to_load_video + 'world.mp4')   #Obtain video
ret, frame = cap.read()   #Capture each frame

count = 0
ret = True
while ret:
    frame = cv2.resize(frame, (pixel['x'], pixel['y']), interpolation =cv2.INTER_AREA)
    cv2.imwrite(dir_to_save_frames + "frame%d.jpg" % count, frame)     # save frames as JPEG files
    ret, frame = cap.read()
    print ('Read a new frame: ', count)
    count += 1
    
    
예제 #4
0
Created on Thu Dec 27 22:07:04 2018

@author: dawei
"""

import os

import check_dirs
import vggish_inference_demo
import tryread

dir_to_save_csv = './csv_files/'
dir_to_save_tf = './tf_records/'
dir_to_load_wav = './wav_files/'

check_dirs.check_dir(dir_to_save_csv)
check_dirs.check_dir(dir_to_save_tf)

if __name__ == '__main__':
    # Load wav files
    wav_list = [x for x in os.listdir(dir_to_load_wav) if x.endswith('.wav')]   
    print('Number of wav files:', len(wav_list))
    # Run the VGGish model
    for wav in wav_list:
        wav = wav.strip('.wav')
        try:
            vggish_object = vggish_inference_demo.vggish(dir_to_load_wav + wav, 
                                                         dir_to_save_tf + wav)
            vggish_object.set_all_flags()
            vggish_object.run()
        except:
예제 #5
0
#%%


def reshape(training_data_fit):
    # Reshape training data as (#,128,1) for CNN
    training_data_fit = np.reshape(
        training_data_fit,
        (training_data_fit.shape[2], 128, training_data_fit.shape[0]))
    training_data_fit = training_data_fit[:, :, :, np.newaxis]
    return training_data_fit


#%%
if __name__ == "__main__":
    # Create a path to save model
    check_dirs.check_dir(save_model_dir)

    # Load target classes
    classes = pd.read_csv(label_path + 'UCLA_data_codebook.csv').values[:, 0]
    classes = classes.tolist()
    class_to_select = [e for e in classes if target in e]
    print('target class:', class_to_select)

    # Load regression labels (values)
    UCLA_data = pd.read_csv(label_path + 'UCLA_data.csv')
    # Replace empty str as null, then drop
    UCLA_data = UCLA_data.replace(' ', np.nan, inplace=False)
    UCLA_data = UCLA_data.dropna(how='any')
    # File idx, int
    csid = UCLA_data.values[:, 0].tolist()
    # Labels, float
예제 #6
0
features_new, labels_new, groups_new = get_contextual(3, features, labels,
                                                      groups)
features_1_test, labels_1_test, groups_1_test = get_contextual(
    3, features_1, labels_1, groups_1)
features_2_test, labels_2_test, groups_2_test = get_contextual(
    3, features_2, labels_2, groups_2)
features_m_test, labels_m_test, groups_m_test = get_contextual(
    3, features_mixed, labels_mixed, groups_mixed)

#%%
RF = False
if RF == True:
    seed(0)
    predict = False  # True, False
    save_model_path = './models/RF/'
    check_dirs.check_dir(save_model_path)
    #kfold = StratifiedKFold(n_splits=5, shuffle=False, random_state=None)
    lppo = LeavePGroupsOut(n_groups=1)
    fold_no = 1
    f1_per_fold, acc_per_fold, pre_per_fold, rec_per_fold = [], [], [], []
    features = np.reshape(features, (features.shape[0], features.shape[1]))
    # training
    if not predict:
        #for train, test in kfold.split(features, labels):
        for train, test in lppo.split(features_new,
                                      labels_new,
                                      groups=groups_new):
            feat_train, labels_train = features_new[train], labels_new[
                train].reshape((len(labels_new[train])))
            feat_test, labels_test = features_new[test], labels_new[
                test].reshape((len(labels_new[test])))
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 17 17:54:19 2018

@author: david
"""

import config
import check_dirs
import os
import numpy as np
import cv2

# Check and create necessary dir
dir_groundtruth = config.dir_to_save_groundtruth
check_dirs.check_dir(dir_groundtruth[:-1]+'_flip/')

dir_heatmap = config.dir_to_save_heatmap
check_dirs.check_dir(dir_heatmap[:-1]+'_flip/')

dir_img = config.dir_to_save_frames_in_use
check_dirs.check_dir(dir_img[:-1]+'_flip/')

frame_sets = [x for x in os.listdir(dir_img) if x.endswith('.jpg')]   # Load frames
print('Number of frames:', len(frame_sets))

heatmap_sets = [x for x in os.listdir(dir_heatmap) if x.endswith('.npz')]   # Load heatmaps
print('Number of heatmaps:', len(heatmap_sets))

groundtruth_sets = [x for x in os.listdir(dir_groundtruth) if x.endswith('.jpg')]   # Load groundtruth
print('Number of groundtruth:', len(groundtruth_sets))
"""
import cv2
import pandas as pd
import scipy.io as sio
import os
import numpy as np
import config
import video
import heatmap
import check_dirs

#%%
'''Visualize groundtruth coordinates and generate groundtruth videos/heatmaps'''

if __name__ == '__main__':
    check_dirs.check_dir(config.dir_to_save_groundtruth)
    check_dirs.check_dir(config.dir_to_save_heatmap)
    check_dirs.check_dir(config.dir_write_video)
    groundtruth_log = open(config.dir_to_save_groundtruth_log + "/log2.txt",
                           'w')
    '''Import groundtruth coordinates and frame indeces from csv'''
    matfn = 'G:/Research2/sem2 w1/Walking_data_Subj1and2/Subj2/por.mat'
    data = sio.loadmat(matfn)
    norm_pos_x = data['porX'] * 1.0 / 1920
    norm_pos_y = data['porY'] * 1.0 / 1080
    norm_pos_x = norm_pos_x.T
    norm_pos_y = norm_pos_y.T
    heatmap_object = heatmap.heatmap(config.pixel['x'], config.pixel['y'])
    '''Generate and save groundtruth frames/heatmap/txt'''
    frame_sets = [
        x for x in os.listdir(config.dir_to_load_frames) if x.endswith('.jpg')
예제 #9
0
"""
Created on Wed Dec 12 2018
@author: david

"""

import os
import subprocess

import check_dirs

dir_to_save = './wav_files/'
dir_to_load_mp4 = './T1_Audio_Files/'

check_dirs.check_dir(dir_to_save)
mp4_list = [x for x in os.listdir(dir_to_load_mp4)
            if x.endswith('.mp4')]  # Load mp4 files
print('Number of audio files:', len(mp4_list))

for item in mp4_list:
    # Format: 16-bit depth, sampling rate: from source
    command = "ffmpeg -i " + dir_to_load_mp4 + item + " -vn -sample_fmt s16 " + dir_to_save + item.strip(
        '.mp4') + ".wav"
    subprocess.call(command, shell=True)

print('Done')
예제 #10
0
import metrics
import numpy as np
import config
import os
import itti_model
import heatmap
import check_dirs

#%%
dir_to_load_heatmap = config.dir_to_save_heatmap
dir_to_load_log = config.dir_to_save_log
dir_to_load_frames = config.dir_to_load_frames
y_pos, x_pos = config.pixel['y'], config.pixel['x']

if __name__ == '__main__':
    check_dirs.check_dir(config.dir_to_save_log)
    check_dirs.check_dir(config.dir_to_save_saliency_map)
    check_dirs.check_dir(config.dir_to_save_antisaliency_map)
    check_dirs.check_dir(config.dir_to_save_chance_map)

    NSS_itti = open(config.dir_to_save_log + "/NSS_itti.txt", 'w')
    AUC_itti = open(config.dir_to_save_log + "/AUC_itti.txt", 'w')
    CC_itti = open(config.dir_to_save_log + "/CC_itti.txt", 'w')
    KL_itti = open(config.dir_to_save_log + "/KL_itti.txt", 'w')
    NSS_itti.write('index' + ' ' + 'saliency' + ' ' + 'chance' + ' ' +
                   'anti_saliency' + '\n')
    AUC_itti.write('index' + ' ' + 'saliency' + ' ' + 'chance' + ' ' +
                   'anti_saliency' + '\n')
    CC_itti.write('index' + ' ' + 'saliency' + ' ' + 'chance' + ' ' +
                  'anti_saliency' + '\n')
    KL_itti.write('index' + ' ' + 'saliency' + ' ' + 'chance' + ' ' +