コード例 #1
0
def fix_offset_files(offset_files):
    problem_files = []
    for offset_file in offset_files:
        lines = util.readLinesFromFile(offset_file)
        assert len(lines) > 0
        if len(lines) == 1:
            try:
                num = int(lines[0])
                continue
            except:
                problem_files.append(offset_file)
        else:
            problem_files.append(offset_file)

    for problem_file in problem_files:
        print problem_file
        lines = util.readLinesFromFile(problem_file)
        print 'PROBLEM'
        print lines

        to_write = []
        s = ''
        while True:
            s = raw_input()
            if s == 'q':
                break
            to_write.append(s)
        print problem_file, to_write

        util.writeFile(problem_file, to_write)
コード例 #2
0
def sanity_check():

    dir_meta = '../data/bp4d'
    out_dir_files = os.path.join(dir_meta,
                                 'train_test_files_110_color_nodetect')
    for fold_num in range(3):
        train_file = os.path.join(out_dir_files,
                                  'train_' + str(fold_num) + '.txt')
        test_file = os.path.join(out_dir_files,
                                 'test_' + str(fold_num) + '.txt')
        train_data = util.readLinesFromFile(train_file)
        test_data = util.readLinesFromFile(test_file)
        train_folders = [
            os.path.split(line_curr.split(' ')[0])[0]
            for line_curr in train_data
        ]
        test_folders = [
            os.path.split(line_curr.split(' ')[0])[0]
            for line_curr in test_data
        ]
        train_folds = list(set(train_folders))
        test_folds = list(set(test_folders))
        print len(train_folds), len(test_folds), len(
            set(train_folds + test_folds)), len(train_folds) + len(test_folds)
        print np.in1d(test_folds, train_folds)
コード例 #3
0
def save_test_pair_file():
    dir_train_test_files = '../data/ucf101/train_test_files'
    num_to_pick = 20    
    train_file = os.path.join(dir_train_test_files,'train_ultra_correct.txt')
    test_file = os.path.join(dir_train_test_files,'test_ultra_correct.txt')

    train_data = util.readLinesFromFile(train_file)
    test_data = util.readLinesFromFile(test_file)

    test_new_data = []
    for line_curr in test_data:
        # rand_idx = random.randint(0,len(train_data)-1)
        num_rand = random.sample(xrange(len(train_data)), num_to_pick)
        for rand_idx in num_rand:
        
        # for line_train in train_data:
            test_new_data.append(line_curr)
            test_new_data.append(train_data[rand_idx])

    print len(test_new_data)
    print len(test_data)
    print len(train_data)
    print len(test_data)*len(train_data)*2
    out_file = os.path.join(dir_train_test_files,'test_pair_rand'+str(num_to_pick)+'_ultra_correct.txt')
    print out_file
    # raw_input()
    util.writeFile(out_file, test_new_data)
コード例 #4
0
ファイル: exploring_dbs.py プロジェクト: menorashid/eccv_18
def main():
    data_dir = '../data/ck_96/train_test_files'
    facs_anno_dir = '../data/ck_original/FACS'

    all_files = []
    fold_num = 0
    train_file = os.path.join(data_dir, 'train_' + str(fold_num) + '.txt')
    test_file = os.path.join(data_dir, 'test_' + str(fold_num) + '.txt')
    all_files = all_files + util.readLinesFromFile(
        train_file) + util.readLinesFromFile(test_file)
    assert len(all_files) == len(set(all_files))

    existence = []
    for file_curr in all_files:
        file_curr_split = file_curr.split(' ')
        anno = file_curr_split[1]
        im_name = os.path.split(file_curr_split[0])[1]
        im_name_split = im_name[:im_name.rindex('.')].split('_')
        facs_file = os.path.join(facs_anno_dir, im_name_split[0],
                                 im_name_split[1],
                                 '_'.join(im_name_split) + '_facs.txt')
        print facs_file, os.path.exists(facs_file), anno
        existence.append(os.path.exists(facs_file))

    print len(existence)
    print sum(existence)
コード例 #5
0
def make_test_files_manual_anno():
    data_dir = '../../data/frames'

    files = [
        os.path.join(data_dir, file_name) for file_name in [
            'ch06_20161212115301_frames_class_ims.txt',
            'ch02_20161212115300_frames_class_ims.txt'
        ]
    ]

    all_files = util.readLinesFromFile(files[0]) + util.readLinesFromFile(
        files[1])
    print len(all_files)
    random.shuffle(all_files)
    train_files = all_files[:-20]
    test_files = all_files[-20:]
    print len(train_files), len(test_files), len(train_files) + len(test_files)
    # return
    out_dir = os.path.join(data_dir, 'test_frames')
    util.mkdir(out_dir)
    files = []
    out_file_list = os.path.join(data_dir, 'test_frames.txt')
    for file_curr in test_files:
        file_curr = file_curr.split(' ')[0]
        out_file = os.path.join(out_dir, os.path.split(file_curr)[1])
        shutil.move(file_curr, out_file)
        file_curr = file_curr.replace('.jpg', '.txt')
        out_file = out_file.replace('.jpg', '.txt')
        shutil.move(file_curr, out_file)
        files.append(out_file)
    util.writeFile(out_file_list, files)
コード例 #6
0
def create_comparative_html_fast():
    just_train = True
    # load files
    dir_train_test_files = '../data/ucf101/train_test_files'
    train_file = os.path.join(dir_train_test_files,
                              'train_just_primary_corrected.txt')
    test_file = os.path.join(dir_train_test_files,
                             'test_just_primary_corrected.txt')
    dir_gt_vecs = '../data/ucf101/gt_vecs/just_primary_corrected'

    if just_train:
        out_dir = '../scratch/i3d_dists_just_train'
        out_dir_coocs = '../scratch/i3d_dists_just_train/arr_coocs_pairwise'

        out_dir_viz = '../scratch/i3d_dists_just_train/arr_coocs_pairwise_viz'
        util.mkdir(out_dir_viz)

        lines = util.readLinesFromFile(train_file)
    else:
        out_dir = '../scratch/i3d_dists'
        lines = util.readLinesFromFile(train_file) + util.readLinesFromFile(
            test_file)

    npy_files = [line_curr.split(' ')[0] for line_curr in lines]
    gt_files = [
        os.path.join(dir_gt_vecs,
                     os.path.split(line_curr)[1]) for line_curr in npy_files
    ]

    # get vid names per class
    vid_names_per_class, class_id = get_vid_names_per_class(lines)
    # print vid_names_per_class

    out_dir_html = out_dir + '_pairwise_htmls'
    util.mkdir(out_dir_html)
    dir_server = '/disk2/maheen_data'
    out_dir_viz = out_dir_viz.replace('..', os.path.join(dir_server, 'nn_net'))

    for dir_type in ['fg', 'bg', 'mat']:
        out_dir_html_curr = os.path.join(out_dir_html, dir_type)
        util.mkdir(out_dir_html_curr)
        n_strs = ['n_10', 'n_25', 'n_50', 'n_100']
        folders = [
            os.path.join(out_dir_viz, n_str, dir_type) for n_str in n_strs
        ]
        for idx_class, vid_names_curr in enumerate(vid_names_per_class):
            class_name = class_names[idx_class]
            img_names = [class_name + '.jpg'] + [
                vid_name + '.jpg' for vid_name in vid_names_curr
            ]
            out_file_html = os.path.join(out_dir_html_curr,
                                         class_name + '.html')
            visualize.writeHTMLForDifferentFolders(out_file_html,
                                                   folders,
                                                   n_strs,
                                                   img_names,
                                                   rel_path_replace=dir_server,
                                                   height=330,
                                                   width=400)
コード例 #7
0
def merge_emo_facs(emo_file, facs_file, out_file, list_au_keep, idx_map):

    assert os.path.exists(emo_file)
    assert os.path.exists(facs_file)

    emo_lines = util.readLinesFromFile(emo_file)
    facs_lines = util.readLinesFromFile(facs_file)

    au_bin = np.zeros((len(emo_lines), np.max(idx_map) + 1))
    print 'au_bin.shape', au_bin.shape

    emo_ims = [line.split(' ')[0] for line in emo_lines]
    facs_ims = [line.split(' ')[0] for line in facs_lines]
    for idx_facs, facs_im in enumerate(facs_ims):

        idx_emo = emo_ims.index(facs_im)
        facs = facs_lines[idx_facs]

        facs = [int(val) for val in facs.split(' ')[1:]]
        facs = facs[::2]
        found = 0
        for facs_curr in facs:
            if facs_curr in list_au_keep:
                found = 1
                idx_au = list_au_keep.index(facs_curr)
                au_bin[idx_emo, idx_map[idx_au]] = 1

        if not found:
            print facs_lines[idx_facs]
            print emo_lines[idx_emo]
            raw_input()

    facs_bin = np.sum(au_bin, axis=1, keepdims=True)
    print facs_bin.shape, np.min(facs_bin), np.max(facs_bin), np.sum(
        facs_bin > 0)
    facs_bin[facs_bin > 0] = 1

    print np.sum(facs_bin), len(facs_lines), np.sum(facs_bin) == len(
        facs_lines)
    print np.sum(au_bin, 0)

    out_mat = np.concatenate((facs_bin, au_bin), 1)
    print out_mat.shape
    assert out_mat.shape[0] == len(emo_lines)
    assert out_mat.shape[1] == np.max(idx_map) + 2

    if os.path.split(out_file)[1].startswith('train'):
        assert np.all(np.sum(au_bin, 0) > 0)

    out_lines = []
    for idx_emo_line, emo_line in enumerate(emo_lines):
        facs_arr_str = [str(int(val)) for val in list(out_mat[idx_emo_line])]
        out_line = emo_line + ' ' + ' '.join(facs_arr_str)
        # print out_line
        # raw_input()
        out_lines.append(out_line)

    print out_file
    util.writeFile(out_file, out_lines)
コード例 #8
0
def script_save_110_im_color():
    dir_meta = '../data/disfa'
    out_dir_train_test = os.path.join(
        dir_meta, 'train_test_8_au_all_method_256_color_align')

    im_size = [110, 110]
    out_dir_im = os.path.join(dir_meta, 'preprocess_im_110_color_align')

    num_folds = 3
    im_files_all = []
    for fold_curr in range(num_folds):
        for file_pre in ['train', 'test']:
            file_curr = os.path.join(out_dir_train_test,
                                     file_pre + '_' + str(fold_curr) + '.txt')
            print file_curr, len(util.readLinesFromFile(file_curr))

            im_files = [
                line_curr.split(' ')[0]
                for line_curr in util.readLinesFromFile(file_curr)
            ]
            im_files_all.extend(im_files)

    print len(im_files_all), len(list(set(im_files_all)))
    im_files_all = list(set(im_files_all))
    str_replace_in_files = [
        os.path.join(dir_meta, 'preprocess_im_110_color_align'),
        os.path.join(dir_meta, 'preprocess_im_256_color_align')
    ]
    im_files_all = [
        file_curr.replace(str_replace_in_files[0], str_replace_in_files[1])
        for file_curr in im_files_all
    ]

    str_replace_out_files = [str_replace_in_files[1], out_dir_im]
    args = []
    for idx_file_curr, im_file_curr in enumerate(im_files_all):
        out_file_curr = im_file_curr.replace(str_replace_out_files[0],
                                             str_replace_out_files[1])
        out_dir_curr = os.path.split(out_file_curr)[0]
        util.makedirs(out_dir_curr)
        # if os.path.exists(out_file_curr):
        #     continue
        args.append((im_file_curr, out_file_curr, im_size, idx_file_curr))

    # print len(im_files_all)
    # print len(args)
    # for arg in args:
    #     print 'ARG',arg

    #     save_resize_im(arg)
    #     raw_input()
    #     break

    pool = multiprocessing.Pool(multiprocessing.cpu_count())
    pool.map(save_resize_im, args)
コード例 #9
0
def save_dots():
    dir_train_test_files = '../data/ucf101/train_test_files'
    train_file = os.path.join(dir_train_test_files, 'train.txt')
    test_file = os.path.join(dir_train_test_files, 'test.txt')

    lines = util.readLinesFromFile(train_file) + util.readLinesFromFile(
        test_file)
    # lines = lines[:5]
    num_files = len(lines)
    print len(lines)
    print lines[0]

    out_dir = '../scratch/i3d_dists'
    util.mkdir(out_dir)

    npy_files = [line_curr.split(' ')[0] for line_curr in lines]
    # vid_names = []
    vid_lens = []
    arr_dots = []
    npy_dict = {}
    for first_vid_idx in range(num_files - 1):
        t = time.time()
        print first_vid_idx

        arr_curr = []
        first_vid = npy_files[first_vid_idx]
        load_video(first_vid, npy_dict, vid_lens)

        # print vid_lens

        first_vid_data = npy_dict[first_vid]

        for second_vid_idx in range(first_vid_idx + 1, num_files):

            second_vid = npy_files[second_vid_idx]
            load_video(second_vid, npy_dict, vid_lens)
            second_vid_data = npy_dict[second_vid]

            dists = np.matmul(first_vid_data, second_vid_data.T)

            arr_curr.append(dists)

        arr_curr = np.concatenate(arr_curr, axis=1)

        out_file = os.path.join(out_dir, str(first_vid_idx) + '.npy')
        np.save(out_file, arr_curr)

        print time.time() - t
        raw_input()
        # print arr_curr.shape

    vid_lens = np.array(vid_lens)

    np.save(os.path.join(out_dir, 'vid_lens.npy'), vid_lens)
コード例 #10
0
def get_cooc_per_class_per_vid():
    dir_train_test_files = '../data/ucf101/train_test_files'
    dir_gt_vecs = '../data/ucf101/gt_vecs/just_primary_corrected'
    train_file = os.path.join(dir_train_test_files, 'train.txt')
    test_file = os.path.join(dir_train_test_files, 'test.txt')

    out_dir = '../scratch/i3d_dists_just_train'
    lines_train = util.readLinesFromFile(train_file)
    num_train = len(lines_train)
    lines = lines_train + util.readLinesFromFile(test_file)

    out_dir_cooc = '../scratch/i3d_dists_just_train/arr_coocs_per_class'

    vid_idx_arr_file = os.path.join(out_dir, 'vid_idx_arr.npy')
    vid_idx_arr = np.load(vid_idx_arr_file)

    vid_idx_arr = np.unique(vid_idx_arr)
    post_load = '_sort_idx_opv.npz'

    class_bin = [[int(val) for val in line_curr.split(' ')[1:]]
                 for line_curr in lines]
    class_bin = np.array(class_bin)
    vid_names = [
        os.path.split(line_curr.split(' ')[0])[1] for line_curr in lines
    ]
    print len(vid_names)
    print class_bin.shape

    num_files = len(vid_names)

    args = []

    for idx_vid_len in range(num_files):

        vid_name = vid_names[idx_vid_len]
        sort_idx_file = os.path.join(out_dir, str(idx_vid_len) + post_load)
        vid_name = vid_name[:vid_name.rindex('.')]
        out_file_info = (out_dir_cooc, vid_name + '.npz')
        # if idx_vid_len<num_train:
        #     is_train = True
        # else:
        #     is_train = False
        arg_curr = (idx_vid_len, class_bin, sort_idx_file, out_file_info,
                    num_train)
        args.append(arg_curr)

    # for arg in args:
    #     get_cooc_per_class_per_vid_mp(arg)

    print len(args)
    pool = multiprocessing.Pool()
    pool.map(get_cooc_per_class_per_vid_mp, args)
    pool.close()
    pool.join()
コード例 #11
0
def save_sort_idx_just_train_opv():
    dir_train_test_files = '../data/ucf101/train_test_files'
    train_file = os.path.join(dir_train_test_files, 'train.txt')
    test_file = os.path.join(dir_train_test_files, 'test.txt')

    train_lines = util.readLinesFromFile(train_file)
    lines = train_lines + util.readLinesFromFile(test_file)
    num_train = len(train_lines)
    num_files = len(lines)
    print 'num_files, num_train', num_files, num_train

    out_dir = '../scratch/i3d_dists_just_train'
    out_dir_in = '../scratch/i3d_dists'
    util.mkdir(out_dir)

    npy_files = [line_curr.split(' ')[0] for line_curr in lines]

    vid_lens = np.load(os.path.join(out_dir_in, 'vid_lens.npy'))
    # vid_lens = vid_lens[:len(lines)]

    # total_len = np.sum(vid_lens)
    total_len = np.sum(vid_lens)
    total_len_train = np.sum(vid_lens[:num_train])
    vid_idx_arr_file = os.path.join(out_dir, 'vid_idx_arr.npy')
    vid_idx_arr = np.load(vid_idx_arr_file)
    print vid_idx_arr.shape

    arr_dots = []
    for first_vid_idx in range(num_train):
        print first_vid_idx
        file_curr = os.path.join(out_dir_in, str(first_vid_idx) + '.npy')
        arr_curr = np.load(file_curr)
        # print arr_curr.shape
        # raw_input()
        # e_curr = np.sum(vid_lens[:first_vid_idx+1])
        # arr_curr = arr_curr[:,:(total_len - e_curr)]
        arr_dots.append(arr_curr)

    args = []
    for idx_vid_len in range(len(vid_lens)):
        out_file_curr = os.path.join(out_dir,
                                     str(idx_vid_len) + '_sort_idx_opv.npz')
        if os.path.exists(out_file_curr):
            continue
        arg = (arr_dots, vid_idx_arr, idx_vid_len, vid_lens, out_file_curr,
               num_train)

        args.append(arg)
    print len(args)

    for arg in args:
        save_sort_idx_opv_mp(arg)
コード例 #12
0
def check_class_weights():
    folds = range(3)
    dir_files = '../data/emotionet/train_test_files_toy'
    for fold in folds:
        train_file = os.path.join(dir_files, 'train_' + str(fold) + '.txt')
        test_file = os.path.join(dir_files, 'test_' + str(fold) + '.txt')
        train_weights = util.get_class_weights_au(
            util.readLinesFromFile(train_file))
        test_weights = util.get_class_weights_au(
            util.readLinesFromFile(test_file))
        diff_weights = np.abs(train_weights - test_weights)
        print fold
        print np.min(diff_weights), np.max(diff_weights), np.mean(diff_weights)
コード例 #13
0
def main():

    script_print_f1_etc()
    return

    out_dir_train = '../experiments/vgg_capsule_7_3_with_dropout3/bp4d_256_train_test_files_256_color_align_2_reconstruct_False_True_all_aug_marginmulti_False_wdecay_0_6_step_6_0.1_0.0001_0.001_0.001_True_0'
    log_file = os.path.join(out_dir_train, 'log.txt')
    print log_file
    print util.readLinesFromFile(log_file)
    to_print = []
    for model_test in range(6):
        test_dir = os.path.join(out_dir_train,
                                'results_model_' + str(model_test))
        labels_all, predictions = collate_files([test_dir])
        accuracy = get_auc(predictions, labels_all)
        str_print = 'val accuracy: %.4f' % (accuracy)
        print str_print
        to_print.append(str_print)

    util.writeFile(log_file, to_print)

    # get_ideal_train_test_file()

    # return
    # script_print_f1_etc()
    return

    dir_data_meta = '../data/bp4d'
    dir_meta = '../experiments/khorrami_capsule_7_3_gray3'
    train_test_folder = 'train_test_files_110_gray_align'
    dir_exp_pre = 'bp4d_' + train_test_folder + '_'
    dir_exp_post = '_reconstruct_True_True_flipCrop_marginmulti_False_wdecay_0_20_exp_0.96_350_1e-06_0.001_0.001_0.001_lossweights_1.0_1.0'
    model_test = 2
    fold_num = 0
    mean_file = os.path.join(dir_data_meta, train_test_folder,
                             'train_' + str(fold_num) + '_mean.png')
    std_file = os.path.join(dir_data_meta, train_test_folder,
                            'train_' + str(fold_num) + '_std.png')
    au_arr = [1, 2, 4, 6, 7, 10, 12, 14, 15, 17, 23, 24]

    test_dir = os.path.join(dir_meta,
                            dir_exp_pre + str(fold_num) + dir_exp_post,
                            'results_model_' + str(model_test))
    resize = 96
    visualizing_recon_new.make_html_recon_active_thresh(test_dir,
                                                        mean_file,
                                                        std_file,
                                                        au_arr,
                                                        resize,
                                                        thresh_active=6)
コード例 #14
0
def script_save_256_im():
    dir_meta = '../data/disfa'
    out_dir_train_test = os.path.join(
        dir_meta, 'train_test_8_au_all_method_110_gray_align')

    im_size = [256, 256]
    out_dir_im = os.path.join(dir_meta, 'preprocess_im_256_color_align')

    num_folds = 3
    im_files_all = []
    for fold_curr in range(num_folds):
        for file_pre in ['train', 'test']:
            file_curr = os.path.join(out_dir_train_test,
                                     file_pre + '_' + str(fold_curr) + '.txt')
            print file_curr, len(util.readLinesFromFile(file_curr))

            im_files = [
                line_curr.split(' ')[0]
                for line_curr in util.readLinesFromFile(file_curr)
            ]
            im_files_all.extend(im_files)

    print len(im_files_all), len(list(set(im_files_all)))
    im_files_all = list(set(im_files_all))
    str_replace_in_files = [
        os.path.join(dir_meta, 'preprocess_im_110_gray_align'),
        os.path.join(dir_meta, 'Videos_LeftCamera_frames_200')
    ]
    im_files_all = [
        file_curr.replace(str_replace_in_files[0], str_replace_in_files[1])
        for file_curr in im_files_all
    ]

    str_replace_out_files = [str_replace_in_files[1], out_dir_im]
    args = []
    for idx_file_curr, im_file_curr in enumerate(im_files_all):
        out_file_curr = im_file_curr.replace(str_replace_out_files[0],
                                             str_replace_out_files[1])
        out_dir_curr = os.path.split(out_file_curr)[0]
        util.makedirs(out_dir_curr)
        # if os.path.exists(out_file_curr):
        #     continue
        args.append((im_file_curr, out_file_curr, im_size, idx_file_curr))

    print len(im_files_all)
    print len(args)
    for arg in args:
        print 'ARG', arg
        raw_input()
コード例 #15
0
def script_make_im_gray():
    dir_meta = '../data/bp4d'
    # out_dir_im = os.path.join(dir_meta, 'preprocess_im_110_color_align')
    # out_dir_files = os.path.join(dir_meta, 'train_test_files_110_color_align')
    # out_dir_files_new = os.path.join(dir_meta, 'train_test_files_110_gray_align')
    # out_dir_im_new = os.path.join(dir_meta, 'preprocess_im_110_gray_align')

    out_dir_im = os.path.join(dir_meta, 'preprocess_im_110_color_nodetect')
    out_dir_files = os.path.join(dir_meta,
                                 'train_test_files_110_color_nodetect')
    out_dir_files_new = os.path.join(dir_meta,
                                     'train_test_files_110_gray_nodetect')
    out_dir_im_new = os.path.join(dir_meta, 'preprocess_im_110_gray_nodetect')
    util.mkdir(out_dir_files_new)

    num_folds = 3
    im_size = None
    # [96,96]
    all_im = []
    for fold_curr in range(num_folds):
        train_file = os.path.join(out_dir_files,
                                  'train_' + str(fold_curr) + '.txt')
        test_file = os.path.join(out_dir_files,
                                 'test_' + str(fold_curr) + '.txt')
        all_data = util.readLinesFromFile(train_file) + util.readLinesFromFile(
            test_file)
        all_im = all_im + [line_curr.split(' ')[0] for line_curr in all_data]

    print len(all_im), len(set(all_im))
    all_im = list(set(all_im))
    args = []
    for idx_file_curr, file_curr in enumerate(all_im):
        out_file_curr = file_curr.replace(out_dir_im, out_dir_im_new)
        dir_curr = os.path.split(out_file_curr)[0]
        util.makedirs(dir_curr)
        # print out_file_curr
        # print dir_curr
        if not os.path.exists(out_file_curr):
            args.append((file_curr, out_file_curr, im_size, idx_file_curr))

    print len(args)
    # for arg in args:
    #     print arg
    #     save_color_as_gray(arg)
    #     raw_input()

    pool = multiprocessing.Pool(multiprocessing.cpu_count())
    pool.map(save_color_as_gray, args)
コード例 #16
0
def write_non_peak_files():
    dir_meta = '../data/ck_96'
    non_peak_file = os.path.join(dir_meta, 'anno_all_non_peek_one_third.txt')
    dir_files = os.path.join(dir_meta, 'train_test_files')
    out_dir_files = os.path.join(dir_meta,
                                 'train_test_files_non_peak_one_third')
    util.mkdir(out_dir_files)

    non_peak_files = util.readLinesFromFile(non_peak_file)

    num_folds = 10

    for fold_curr in range(num_folds):
        already_done = []
        for file_pre in ['train', 'test']:
            file_curr = os.path.join(dir_files,
                                     file_pre + '_' + str(fold_curr) + '.txt')

            out_file_curr = os.path.join(
                out_dir_files, file_pre + '_' + str(fold_curr) + '.txt')

            lines = util.readLinesFromFile(file_curr)
            out_lines = []

            for line_curr in lines:
                im_curr, label_curr = line_curr.split(' ')
                if int(label_curr) == 0:
                    out_lines.append(line_curr)
                else:
                    im_dir = os.path.split(im_curr)[0]
                    im_dir = im_dir.replace('im', 'im_non_peak')
                    assert os.path.exists(im_dir)
                    if im_dir not in already_done:

                        rel_files = [
                            val for val in non_peak_files
                            if val.startswith(im_dir)
                        ]
                        for rel_file in rel_files:
                            out_lines.append(' '.join([rel_file, label_curr]))
                        already_done.append(im_dir)

            print len(lines)
            print len(out_lines)
            print out_file_curr
            util.writeFile(out_file_curr, out_lines)

    save_mean_std_im(out_dir_files)
コード例 #17
0
def get_correct_for_au():

    model_name = 'vgg_capsule_7_33/bp4d_256_train_test_files_256_color_align_0_reconstruct_True_True_all_aug_marginmulti_False_wdecay_0_1_exp_0.96_350_1e-06_0.0001_0.001_0.001_lossweights_1.0_0.1_True'
    model_file_name = 'model_0.pt'

    results_dir = os.path.join(
        '../../eccv_18/experiments', model_name,
        'results_' + model_file_name[:model_file_name.rindex('.')])
    assert os.path.exists(results_dir)

    out_dir = os.path.join('../experiments', model_name, 'au_info')
    util.makedirs(out_dir)

    test_file = '../data/bp4d/train_test_files_256_color_align/test_0.txt'
    test_im = [
        line_curr.split(' ')[0]
        for line_curr in util.readLinesFromFile(test_file)
    ]
    test_im = np.array(test_im)

    labels, preds = ca.collate_files([results_dir])
    preds[preds <= 0.5] = 0
    preds[preds > 0.5] = 1

    for au_num in range(labels.shape[1]):
        bin_keep = np.logical_and(labels[:, au_num] == preds[:, au_num],
                                  labels[:, au_num] == 1)
        im_keep = test_im[bin_keep]
        out_file = os.path.join(out_dir, 'correct_' + str(au_num) + '.txt')
        util.writeFile(out_file, im_keep)
        print out_file
        print au_num
        print bin_keep.shape, np.sum(bin_keep), im_keep.shape
コード例 #18
0
ファイル: generic_dataset.py プロジェクト: menorashid/nips_18
 def __init__(self, text_file, transform=None):
     self.files = util.readLinesFromFile(text_file)
     self.transform = transform
     if transform is None:
         self.transform = transforms.ToTensor()
     else:
         self.transform = transform
コード例 #19
0
def save_h5py():
    data_dir_meta = '../data/horse_51'
    im_dir = data_dir_meta
    out_dir_split = os.path.join(data_dir_meta, 'train_test_split')
    util.mkdir(out_dir_split)

    split_num = 5
    im_size = 256

    for split_curr in range(split_num):
        for file_curr in ['train', 'test']:
            text_file = os.path.join(
                out_dir_split, file_curr + '_' + str(split_curr) + '.txt')
            anno = util.readLinesFromFile(text_file)
            out_file_h5py = os.path.join(
                out_dir_split, file_curr + '_' + str(split_curr) + '.h5')
            if os.path.exists(out_file_h5py):
                continue

            print out_file_h5py

            with h5py.File(out_file_h5py, "w") as f:

                data = f.create_dataset('data',
                                        (len(anno), im_size, im_size, 3))
                labels = f.create_dataset('labels', (len(anno), ))

                for idx_anno_curr, anno_curr in enumerate(anno):
                    im_file, label = anno_curr.split(' ')
                    im = scipy.misc.imread(im_file)
                    im = scipy.misc.imresize(im, (im_size, im_size))
                    data[idx_anno_curr, :, :, :] = im
                    labels[idx_anno_curr] = int(label)
コード例 #20
0
def script_save_mean_std_files():

    dir_meta = '../data/disfa'
    # out_dir_files_new = os.path.join(dir_meta, 'train_test_10_6_method_110_gray_align')
    # num_folds = 10
    # jump = 1

    out_dir_files_new = os.path.join(
        dir_meta, 'train_test_8_au_all_method_110_gray_align')
    num_folds = 3
    jump = 10

    for fold_curr in range(num_folds):
        train_file = os.path.join(out_dir_files_new,
                                  'train_' + str(fold_curr) + '.txt')
        lines = util.readLinesFromFile(train_file)
        ims_rel = [line_curr.split(' ')[0] for line_curr in lines]
        ims_rel = ims_rel[::jump]
        print len(ims_rel)
        out_file_mean = os.path.join(out_dir_files_new,
                                     'train_' + str(fold_curr) + '_mean.png')
        out_file_std = os.path.join(out_dir_files_new,
                                    'train_' + str(fold_curr) + '_std.png')

        print out_file_mean
        print out_file_std
        print train_file
        preprocess_bp4d.save_mean_std_im(ims_rel, out_file_mean, out_file_std)
コード例 #21
0
def script_change_train_test():
    dir_meta = '../data/disfa'

    out_dir_files = os.path.join(dir_meta, 'train_test_10_6_method')
    out_dir_files_new = os.path.join(dir_meta,
                                     'train_test_10_6_method_110_gray_align')
    util.mkdir(out_dir_files_new)

    out_dir_im = os.path.join(dir_meta, 'Videos_LeftCamera_frames_200')
    out_dir_im_new = os.path.join(dir_meta, 'preprocess_im_110_gray_align')

    num_folds = 10

    for fold_curr in range(num_folds):
        for file_pre in ['train', 'test']:
            file_curr = os.path.join(out_dir_files,
                                     file_pre + '_' + str(fold_curr) + '.txt')
            lines = util.readLinesFromFile(file_curr)

            out_file = os.path.join(out_dir_files_new,
                                    file_pre + '_' + str(fold_curr) + '.txt')

            out_lines = []
            for line_curr in lines:
                out_line = line_curr.replace(out_dir_im, out_dir_im_new)
                im_curr = line_curr.split(' ')[0]
                assert os.path.exists(im_curr)
                out_lines.append(out_line)

            # print out_lines[0]
            print len(out_lines)
            print out_file

            # raw_input()
            util.writeFile(out_file, out_lines)
コード例 #22
0
def script_save_resize_faces():
    dir_meta = '../data/emotionet'
    im_size = [256, 256]
    im_file_list = os.path.join(dir_meta, 'im_list.txt')

    in_dir_meta = os.path.join(dir_meta, 'im')
    out_dir_meta = os.path.join(
        dir_meta, 'preprocess_im_' + str(im_size[0]) + '_color_nodetect')
    im_list_in = util.readLinesFromFile(im_file_list)

    savegray = False
    args = []
    for idx_im_in, im_in in enumerate(im_list_in):
        out_file = im_in.replace(in_dir_meta, out_dir_meta)
        if os.path.exists(out_file):
            continue

        out_dir_curr = os.path.split(out_file)[0]
        # print out_dir_curr
        util.makedirs(out_dir_curr)
        args.append((im_in, out_file, im_size, savegray, idx_im_in))

    print len(args)

    # for arg in args:
    #     print arg
    #     save_resized_images(arg)
    # break

    pool = multiprocessing.Pool(multiprocessing.cpu_count())
    pool.map(save_resized_images, args)
コード例 #23
0
def looking_at_gt_pain():
    dir_meta = '../data/pain'
    gt_pain_anno_dir = os.path.join(dir_meta, 'Sequence_Labels')
    out_dir_meta = os.path.join(gt_pain_anno_dir,'gt_avg')
    util.mkdir(out_dir_meta)
    gt_pain_anno_dirs = [os.path.join(gt_pain_anno_dir,dir_curr) for dir_curr in ['AFF','VAS','SEN']]
    min_maxs = [[1,14],[0,10],[1,14]]
    
    out_range = [0,10]
    
    sequence_names = [dir_curr.replace(gt_pain_anno_dirs[0]+'/','') for dir_curr in glob.glob(os.path.join(gt_pain_anno_dirs[0],'*','*.txt'))]
    
    # for gt_pain_anno_dir in gt_pain_anno_dirs:
    for sequence_name in sequence_names:
        pain_levels = []
        for gt_pain_anno_dir,min_max in zip(gt_pain_anno_dirs,min_maxs):    
            # print sequence_name,gt_pain_anno_dir
            file_curr = os.path.join(gt_pain_anno_dir,sequence_name)
            # print file_curr
            pain_val = int(float(util.readLinesFromFile(file_curr)[0]))
            pain_val = (pain_val-min_max[0])/float(min_max[1]-min_max[0]) * (out_range[1]-out_range[0])+out_range[0]
            pain_levels.append(pain_val)
        # print pain_levels
        avg_pain = np.mean(pain_levels)
        out_file_curr = os.path.join(out_dir_meta,sequence_name)
        util.makedirs(os.path.split(out_file_curr)[0])
        util.writeFile(out_file_curr,[str(avg_pain)])
コード例 #24
0
ファイル: dataset.py プロジェクト: menorashid/action_graphs
    def __init__(self, text_file, feature_limit, select_front = False, num_similar = 0, just_one = True):
        self.anno_file = text_file
        self.files = util.readLinesFromFile(text_file)
        self.feature_limit = feature_limit
        self.select_front = select_front
        self.num_similar = num_similar

        self.annos = [[int(val) for val in file_curr.split(' ')[1:]] for file_curr in self.files]
        self.annos = np.array(self.annos).astype(float)

        self.num_classes = self.annos.shape[1]
        ind_idx = np.sum(self.annos, axis = 1)
        # print self.annos[0]


        ind_idx = ind_idx ==1
        # print 'ind_idx.shape, np.sum(ind_idx)',ind_idx.shape, np.sum(ind_idx)

        # raw_input()
        self.idx_per_class = []
        for idx_curr in range(self.num_classes):
            if just_one:
                bin_class = np.logical_and(self.annos[:,idx_curr]>0, ind_idx)
            else:
                bin_class = self.annos[:,idx_curr]>0
                
            self.idx_per_class.append(np.where(bin_class)[0])
コード例 #25
0
def get_feats_class_idx(dir_curr, dir_gt, anno_file, per_vid=0):
    annos = util.readLinesFromFile(anno_file)
    annos = [line_curr.split(' ') for line_curr in annos]
    npy_files = [anno[0] for anno in annos]
    annos = np.array([[int(val) for val in anno[1:]] for anno in annos])
    feats_all = []
    class_idx_all = []

    for idx_anno, anno_file in enumerate(npy_files):
        class_idx = np.where(annos[idx_anno, :])[0]
        vid_name = os.path.split(anno_file)[1]

        feat_file = os.path.join(dir_curr, vid_name)
        gt_file = os.path.join(dir_gt, vid_name)

        rel_feats = get_rel_features(feat_file, gt_file)
        if per_vid > 0:
            rel_feats_idx = np.random.random_integers(0,
                                                      rel_feats.shape[0] - 1,
                                                      per_vid)
            print rel_feats_idx
            print rel_feats.shape
            rel_feats = rel_feats[rel_feats_idx, :]
            print rel_feats.shape

        class_idx = np.ones(rel_feats.shape[0]) * class_idx
        feats_all.append(rel_feats)
        class_idx_all.append(class_idx)

    feats_all = np.concatenate(feats_all, axis=0)
    class_idx_all = np.concatenate(class_idx_all, axis=0)

    return feats_all, class_idx_all
コード例 #26
0
def make_select_mean_files(dir_files, num_folds):

    num_per_video = 200

    for fold_num in range(num_folds):
        print fold_num

        train_file = os.path.join(dir_files, 'train_' + str(fold_num) + '.txt')
        im_files = util.readLinesFromFile(train_file)
        im_files = [line_curr.split(' ')[0] for line_curr in im_files]
        print len(im_files)
        num_videos = [os.path.split(im_file)[0] for im_file in im_files]
        num_videos = list(set(num_videos))
        print len(num_videos)

        im_rel_all = []

        for video_curr in num_videos:
            im_rel = [
                im_file for im_file in im_files
                if im_file.startswith(video_curr)
            ]
            random.shuffle(im_rel)
            im_rel_all = im_rel_all + im_rel[:num_per_video]

        print len(im_rel_all)

        out_file_mean = os.path.join(dir_files,
                                     'train_' + str(fold_num) + '_mean.png')
        out_file_std = os.path.join(dir_files,
                                    'train_' + str(fold_num) + '_std.png')
        save_mean_std_im(im_rel_all, out_file_mean, out_file_std)
コード例 #27
0
def save_all_patches():
    out_dir = '../experiments/figures/primary_caps_viz/im_all_patches/train'
    util.makedirs(out_dir)
    _, test_file, convnet, imsize = get_caps_compiled()
    test_im = [
        scipy.misc.imread(line_curr.split(' ')[0])
        for line_curr in util.readLinesFromFile(test_file)
    ]

    for idx_test_im_curr, im_curr in enumerate(test_im):
        for x in range(6):
            for y in range(6):

                out_file_curr = os.path.join(
                    out_dir,
                    '_'.join([str(val)
                              for val in [idx_test_im_curr, x, y]]) + '.jpg')
                print out_file_curr
                rec_field, center = receptive_field.get_receptive_field(
                    convnet, imsize,
                    len(convnet) - 1, x, y)
                center = [int(round(val)) for val in center]
                range_x = [
                    max(0, center[0] - rec_field / 2),
                    min(imsize, center[0] + rec_field / 2)
                ]
                range_y = [
                    max(0, center[1] - rec_field / 2),
                    min(imsize, center[1] + rec_field / 2)
                ]
                patch = im_curr[range_y[0]:range_y[1], range_x[0]:range_x[1]]
                # print out_file_curr
                # raw_input()
                scipy.misc.imsave(out_file_curr, patch)
コード例 #28
0
def visualize_max_au_per_id():
    model_name = 'vgg_capsule_7_33/bp4d_256_train_test_files_256_color_align_0_reconstruct_True_True_all_aug_marginmulti_False_wdecay_0_1_exp_0.96_350_1e-06_0.0001_0.001_0.001_lossweights_1.0_0.1_True'
    model_file_name = 'model_0.pt'

    model_file = os.path.join('../../eccv_18/experiments',model_name,model_file_name)

    out_dir = os.path.join('../experiments',model_name,'au_info')

    out_file_html = os.path.join(out_dir,'max_idx.html')
    
    ims_html = []
    captions_html = []
    for au_num, au_curr in enumerate(au_map):
        out_dir_caps = os.path.join(out_dir,'au_'+str(au_num)+'_caps')
        test_file = os.path.join(out_dir,'correct_'+str(au_num)+'.txt')
        out_file = os.path.join(out_dir,'au_'+str(au_num)+'_max_idx.txt')
        ims_row = util.readLinesFromFile(out_file)
        ims_row = [util.getRelPath(file_curr.replace(str_replace[0],str_replace[1]),dir_server) for file_curr in ims_row]
        captions_row = [str(au_curr)]*len(ims_row)
        ims_html.append(ims_row)
        captions_html.append(captions_row)
        # print captions_row
        # raw_input()

        

    visualize.writeHTML(out_file_html, ims_html, captions_html, 224,224)
コード例 #29
0
def write_train_test_files():
    post_pend = '_untf.txt'
    data_dir = '../data/ucf101/train_test_files'

    new_data_path = '../data/untf/npy'

    old_data_path = '../data/i3d_features/Thumos14-I3D-JOINTFeatures_val'
    train_file = os.path.join(data_dir, 'train.txt')
    # old_data_path = '../data/i3d_features/Thumos14-I3D-JOINTFeatures_test'
    # train_file = os.path.join(data_dir, 'test.txt')

    replace_str = [old_data_path, new_data_path]

    out_file = train_file[:train_file.rindex('.')] + post_pend
    lines = util.readLinesFromFile(train_file)
    new_lines = []
    for idx_line, line in enumerate(lines):
        # print line
        line = line.replace(replace_str[0], replace_str[1])
        # print line
        # raw_input()
        npy_file = line.split(' ')[0]
        if not os.path.exists(npy_file):
            print 'skipping', idx_line, npy_file
            continue

        assert os.path.exists(npy_file)
        # raw_input()
        new_lines.append(line)

    print out_file, len(new_lines), len(lines)
    util.writeFile(out_file, new_lines)
コード例 #30
0
ファイル: cameras.py プロジェクト: menorashid/gross_pain
def main():

    w0 = util.readLinesFromFile('figuring_h36/w0.txt')
    w0 = w0[0]
    w0 = w0.replace('[', '').replace(']', '')
    w0 = [float(val) for val in w0.split()]
    print(len(w0))

    c = 1
    s = 1
    w1 = np.zeros((15, ))

    params = np.zeros((4, 11, 15))

    for c in range(1, 5):
        for s in range(1, 12):
            start = 6 * ((c - 1) * 11 + (s - 1))
            end = start + 6
            os = 264 + (c - 1) * 9
            oe = 264 + c * 9
            params[c - 1, s - 1, 0:6] = w0[start:end]
            params[c - 1, s - 1, 6:] = w0[os:oe]

    print(params[:, 1, 6:7])
    l = input()
    rot_params = params[:, :5, :3]
    rot_params = np.reshape(rot_params,
                            (rot_params.shape[0] * rot_params.shape[1], 3))

    for idx in range(rot_params.shape[0]):
        rot_params_curr = rot_params[idx, :] * 180 / np.pi
        theta = rot_params[idx, 0]
        phi = rot_params[idx, 1]
        psi = rot_params[idx, 2]
        print(rot_params_curr)