def save_all_patches():
    out_dir = '../experiments/figures/primary_caps_viz/im_all_patches/train'
    util.makedirs(out_dir)
    _, test_file, convnet, imsize = get_caps_compiled()
    test_im = [
        scipy.misc.imread(line_curr.split(' ')[0])
        for line_curr in util.readLinesFromFile(test_file)
    ]

    for idx_test_im_curr, im_curr in enumerate(test_im):
        for x in range(6):
            for y in range(6):

                out_file_curr = os.path.join(
                    out_dir,
                    '_'.join([str(val)
                              for val in [idx_test_im_curr, x, y]]) + '.jpg')
                print out_file_curr
                rec_field, center = receptive_field.get_receptive_field(
                    convnet, imsize,
                    len(convnet) - 1, x, y)
                center = [int(round(val)) for val in center]
                range_x = [
                    max(0, center[0] - rec_field / 2),
                    min(imsize, center[0] + rec_field / 2)
                ]
                range_y = [
                    max(0, center[1] - rec_field / 2),
                    min(imsize, center[1] + rec_field / 2)
                ]
                patch = im_curr[range_y[0]:range_y[1], range_x[0]:range_x[1]]
                # print out_file_curr
                # raw_input()
                scipy.misc.imsave(out_file_curr, patch)
예제 #2
0
def script_save_align_im_diff_scale():

    dir_meta = '../data/bp4d'
    dir_im_org = os.path.join(dir_meta, 'BP4D','BP4D-training')
    dir_kp = os.path.join(dir_meta,'kp_256')    
    out_dir_im = os.path.join(dir_meta, 'preprocess_im_256_color_align')
    np_done = glob.glob(os.path.join(dir_kp,'*','*','*.npy'))
    
    print len(np_done)
    

    params = [192,32,56]
    avg_kp_file = os.path.join(dir_meta, 'avg_kp_256_'+'_'.join([str(val) for val in params])+'.npy')

    args = []
    for idx_kp_file, kp_in_file in enumerate(np_done):
        # kp_in_file = np_done[-1]
        im_org_file = kp_in_file.replace(dir_kp, dir_im_org).replace('.npy','.jpg')
        out_file = im_org_file.replace(dir_im_org,out_dir_im)
        out_scale = [256,256]
        if os.path.exists(out_file):
            continue
        out_dir_curr = os.path.split(out_file)[0]
        util.makedirs(out_dir_curr)
        args.append((kp_in_file,im_org_file,avg_kp_file,out_file, out_scale,idx_kp_file))

    print len(args)
    # return
    import time
    for idx_arg,arg in enumerate(args):
        # print arg
        # t= time.time()
        if idx_arg%100==0:
            print idx_arg
        save_align_im_diff_scale(arg)
예제 #3
0
def script_save_cropped_faces():
    dir_meta= '../data/pain'
    in_dir_im = os.path.join(dir_meta,'Images')
    bbox_dir = os.path.join(dir_meta,'im_best_bbox')
    
    size_im = [256,256]
    savegray = False
    out_dir_im = os.path.join(dir_meta,'preprocess_'+str(size_im[0])+'_color')
    util.mkdir(out_dir_im)

    im_list_in = glob.glob(os.path.join(in_dir_im,'*','*','*.png'))

    args = []
    
    for idx_file_curr,in_file_curr in enumerate(im_list_in):
        out_file_curr = in_file_curr.replace(in_dir_im,out_dir_im).replace('.png','.jpg')
        bbox_file = in_file_curr.replace(in_dir_im,bbox_dir).replace('.png','.npy')
        if os.path.exists(out_file_curr) or not os.path.exists(bbox_file):
            continue
        out_dir_curr = os.path.split(out_file_curr)[0]
        util.makedirs(out_dir_curr)
        arg_curr = (in_file_curr,out_file_curr,bbox_file,size_im,savegray,idx_file_curr)
        args.append(arg_curr)

    print len(args)
    for arg_curr in args:
        # print arg_curr
        # raw_input()
        save_cropped_face_from_bbox(arg_curr)
예제 #4
0
def save_all_im(config_dict,
                config_path,
                all_subjects,
                out_path_meta,
                view=None,
                bg=None):
    # (config_dict, config_path, all_subjects, out_path_meta, input_to_get, output_to_get, task):
    output_to_get = ['img_crop']
    input_to_get = ['img_crop']
    # task = 'simple_imsave'

    edit_config_retvals(config_dict, input_to_get, output_to_get)

    for test_subject_curr in all_subjects:
        print(test_subject_curr, all_subjects)
        out_dir_data = os.path.join(out_path_meta, test_subject_curr)
        config_dict['test_subjects'] = [test_subject_curr]

        tester = IgniteTestNVS(config_path, config_dict)
        ret_vals = tester.get_images(input_to_get, output_to_get, view, bg)

        for idx_batch, in_batch in enumerate(ret_vals[0]['img_crop']):
            out_batch = ret_vals[1]['img_crop'][idx_batch]
            for im_num in range(in_batch.shape[0]):
                out_file_pre = os.path.join(out_dir_data,
                                            '%04d_%04d') % (idx_batch, im_num)
                util.makedirs(out_dir_data)

                out_file = out_file_pre + '_in.jpg'
                imageio.imsave(out_file, in_batch[im_num])
                out_file = out_file_pre + '_out.jpg'
                imageio.imsave(out_file, out_batch[im_num])

        visualize.writeHTMLForFolder(out_dir_data, height=128, width=128)
예제 #5
0
def script_save_align_im():
    dir_meta = '../data/bp4d'
    kp_dir = os.path.join(dir_meta, 'preprocess_im_110_color_kp')
    im_dir_in = os.path.join(dir_meta, 'preprocess_im_110_color')
    im_dir_out = os.path.join(dir_meta, 'preprocess_im_110_color_align')

    # out_dir = '../scratch/check_align_bp4d'
    # util.mkdir(out_dir)

    list_of_nps = glob.glob(os.path.join(kp_dir, '*', '*', '*.npy'))
    # random.shuffle(list_of_nps)
    # np_curr = list_of_nps[0]
    avg_pts_file = os.path.join(kp_dir, 'avg_kp.npy')

    args = []
    for idx_np_curr, np_curr in enumerate(list_of_nps):
        im_curr = np_curr.replace(kp_dir, im_dir_in).replace('.npy', '.jpg')
        assert os.path.exists(im_curr)
        out_file = im_curr.replace(im_dir_in, im_dir_out)
        if os.path.exists(out_file):
            continue
        out_dir_curr = os.path.split(out_file)[0]
        util.makedirs(out_dir_curr)

        args.append((avg_pts_file, np_curr, im_curr, out_file, idx_np_curr))

    print len(args)
예제 #6
0
def script_save_resize_faces():
    dir_meta = '../data/bp4d'
    im_size = [110, 110]
    out_dir_meta = os.path.join(
        dir_meta, 'preprocess_im_' + str(im_size[0]) + '_color_nodetect')
    in_dir_meta = os.path.join(dir_meta, 'BP4D', 'BP4D-training')
    # in_dir_meta = os.path.join(dir_meta,'preprocess_im_'+str(256)+'_color_nodetect')

    im_list_in = glob.glob(os.path.join(in_dir_meta, '*', '*', '*.jpg'))

    savegray = False
    args = []
    for idx_im_in, im_in in enumerate(im_list_in):
        out_file = im_in.replace(in_dir_meta, out_dir_meta)
        if os.path.exists(out_file):
            continue

        out_dir_curr = os.path.split(out_file)[0]
        # print out_dir_curr
        util.makedirs(out_dir_curr)
        args.append((im_in, out_file, im_size, savegray, idx_im_in))

    print len(args)
    # args = args[:10]
    # for arg in args:
    # print arg
    # save_resized_images(arg)
    #     size = saveCroppedFace(arg)
    # raw_input()

    pool = multiprocessing.Pool(4)
    pool.map(save_resized_images, args)
예제 #7
0
def get_correct_for_au():

    model_name = 'vgg_capsule_7_33/bp4d_256_train_test_files_256_color_align_0_reconstruct_True_True_all_aug_marginmulti_False_wdecay_0_1_exp_0.96_350_1e-06_0.0001_0.001_0.001_lossweights_1.0_0.1_True'
    model_file_name = 'model_0.pt'

    results_dir = os.path.join(
        '../../eccv_18/experiments', model_name,
        'results_' + model_file_name[:model_file_name.rindex('.')])
    assert os.path.exists(results_dir)

    out_dir = os.path.join('../experiments', model_name, 'au_info')
    util.makedirs(out_dir)

    test_file = '../data/bp4d/train_test_files_256_color_align/test_0.txt'
    test_im = [
        line_curr.split(' ')[0]
        for line_curr in util.readLinesFromFile(test_file)
    ]
    test_im = np.array(test_im)

    labels, preds = ca.collate_files([results_dir])
    preds[preds <= 0.5] = 0
    preds[preds > 0.5] = 1

    for au_num in range(labels.shape[1]):
        bin_keep = np.logical_and(labels[:, au_num] == preds[:, au_num],
                                  labels[:, au_num] == 1)
        im_keep = test_im[bin_keep]
        out_file = os.path.join(out_dir, 'correct_' + str(au_num) + '.txt')
        util.writeFile(out_file, im_keep)
        print out_file
        print au_num
        print bin_keep.shape, np.sum(bin_keep), im_keep.shape
def script_save_resize_faces():
    dir_meta = '../data/emotionet'
    im_size = [256, 256]
    im_file_list = os.path.join(dir_meta, 'im_list.txt')

    in_dir_meta = os.path.join(dir_meta, 'im')
    out_dir_meta = os.path.join(
        dir_meta, 'preprocess_im_' + str(im_size[0]) + '_color_nodetect')
    im_list_in = util.readLinesFromFile(im_file_list)

    savegray = False
    args = []
    for idx_im_in, im_in in enumerate(im_list_in):
        out_file = im_in.replace(in_dir_meta, out_dir_meta)
        if os.path.exists(out_file):
            continue

        out_dir_curr = os.path.split(out_file)[0]
        # print out_dir_curr
        util.makedirs(out_dir_curr)
        args.append((im_in, out_file, im_size, savegray, idx_im_in))

    print len(args)

    # for arg in args:
    #     print arg
    #     save_resized_images(arg)
    # break

    pool = multiprocessing.Pool(multiprocessing.cpu_count())
    pool.map(save_resized_images, args)
예제 #9
0
def save_all_im(config_dict, config_path, all_subjects, out_path_meta, input_to_get, output_to_get):
    
    for test_subject_curr in all_subjects:
        print (test_subject_curr, all_subjects)
        out_dir_data = os.path.join(out_path_meta,test_subject_curr)
        config_dict['test_subjects'] = [test_subject_curr]


        tester = IgniteTestNVS(config_path, config_dict,'simple_imsave')
        ret_vals = tester.get_values(input_to_get, output_to_get)
        
        for idx_batch, batch in enumerate(ret_vals['img_path']):
            new_batch = []
            for row in batch:
                [interval_int_pre, interval_int_post, interval_ind, view, frame] = row
                interval = '%014d_%06d'%(interval_int_pre,interval_int_post)
                img_path = util.get_image_name(test_subject_curr, interval_ind, interval, view, frame, config_dict['data_dir_path'])
                assert os.path.exists(img_path)
                new_batch.append(img_path)
            new_batch = np.array(new_batch)
            ret_vals['img_path'][idx_batch] = new_batch

        for k in ret_vals.keys():
            for idx_batch, batch in enumerate(ret_vals[k]):
                out_file = os.path.join(out_dir_data,k+'_%06d.npy'%idx_batch)
                util.makedirs(os.path.split(out_file)[0])
                np.save(out_file, batch)
예제 #10
0
def looking_at_gt_pain():
    dir_meta = '../data/pain'
    gt_pain_anno_dir = os.path.join(dir_meta, 'Sequence_Labels')
    out_dir_meta = os.path.join(gt_pain_anno_dir,'gt_avg')
    util.mkdir(out_dir_meta)
    gt_pain_anno_dirs = [os.path.join(gt_pain_anno_dir,dir_curr) for dir_curr in ['AFF','VAS','SEN']]
    min_maxs = [[1,14],[0,10],[1,14]]
    
    out_range = [0,10]
    
    sequence_names = [dir_curr.replace(gt_pain_anno_dirs[0]+'/','') for dir_curr in glob.glob(os.path.join(gt_pain_anno_dirs[0],'*','*.txt'))]
    
    # for gt_pain_anno_dir in gt_pain_anno_dirs:
    for sequence_name in sequence_names:
        pain_levels = []
        for gt_pain_anno_dir,min_max in zip(gt_pain_anno_dirs,min_maxs):    
            # print sequence_name,gt_pain_anno_dir
            file_curr = os.path.join(gt_pain_anno_dir,sequence_name)
            # print file_curr
            pain_val = int(float(util.readLinesFromFile(file_curr)[0]))
            pain_val = (pain_val-min_max[0])/float(min_max[1]-min_max[0]) * (out_range[1]-out_range[0])+out_range[0]
            pain_levels.append(pain_val)
        # print pain_levels
        avg_pain = np.mean(pain_levels)
        out_file_curr = os.path.join(out_dir_meta,sequence_name)
        util.makedirs(os.path.split(out_file_curr)[0])
        util.writeFile(out_file_curr,[str(avg_pain)])
예제 #11
0
def save_latent_view_diff(config_dict,
                          config_path,
                          all_subjects,
                          out_path_meta,
                          views=[0, 1, 2, 3]):
    # (config_dict, config_path, all_subjects, out_path_meta, input_to_get, output_to_get, task):
    output_to_get = ['latent_3d']
    input_to_get = ['img_path', 'view', 'frame']
    # task = 'simple_imsave'

    edit_config_retvals(config_dict, input_to_get, output_to_get)

    for test_subject_curr in all_subjects:
        out_dir_data = os.path.join(out_path_meta, test_subject_curr)
        config_dict['test_subjects'] = [test_subject_curr]

        tester = IgniteTestNVS(config_path, config_dict)
        ret_vals = tester.get_latent_diff(views)

        for k in ret_vals.keys():
            vals = ret_vals[k]

        for idx_batch in range(len(ret_vals['diffs'])):
            out_file = os.path.join(out_dir_data, '%04d.npz') % (idx_batch)
            util.makedirs(out_dir_data)
            print(np.mean(ret_vals['diffs'][idx_batch]))
            inner_batch = {}
            for k in ret_vals.keys():
                inner_batch[k] = ret_vals[k][idx_batch]
            np.savez_compressed(out_file, **inner_batch)
예제 #12
0
def script_save_best_bbox():

    dir_meta= '../data/pain'

    dir_im = os.path.join(dir_meta,'Images')
    
    out_dir_meta = os.path.join(dir_meta,'im_best_bbox')
    util.mkdir(out_dir_meta)
    

    im_list_in = glob.glob(os.path.join(dir_im,'*','*','*.png'))
    print len(im_list_in)
    # im_list_in = im_list_in[:len(im_list_in)//2]
    # im_list_in = im_list_in[len(im_list_in)//2:]
    # print len(im_list_in)
    
    savegray = False
    args = []
    
    args = []
    file_pairs = []
    for idx_im_in,im_in in enumerate(im_list_in):
        out_file = im_in.replace(dir_im,out_dir_meta).replace('.png','.npy')
        if os.path.exists(out_file):
            continue
        out_dir_curr = os.path.split(out_file)[0]
        util.makedirs(out_dir_curr)
        file_pairs.append((im_in,out_file))

    print len(args)
def script_save_kp():
    # out_file_list = os.path.join(dir_meta,'im_list.txt')
    # all_im = util.readLinesFromFile(out_file_list)

    dir_meta = '../data/emotionet'
    # im_size = [256,256]
    # out_dir_im = os.path.join(dir_meta,'preprocess_im_'+str(im_size[0])+'_color_nodetect')
    # out_dir_kp = out_dir_im.replace('_im_','_kp_')
    # str_im_rep = None
    # print out_dir_kp

    # im_file_list = out_dir_im+'_list_1.txt'
    # all_im = util.readLinesFromFile(im_file_list)

    out_dir_im = os.path.join(dir_meta, 'im')
    out_dir_kp = out_dir_im.replace('im', 'kp')
    print out_dir_kp
    str_im_rep = os.path.join(dir_meta, 'preprocess_im_256_color_nodetect')

    print out_dir_kp

    im_file_list = os.path.join(dir_meta, 'missing_256')
    all_im = util.readLinesFromFile(im_file_list)
    print len(all_im)
    if str_im_rep is not None:
        all_im = [
            im_curr.replace(str_im_rep, out_dir_im) for im_curr in all_im
        ]

    # chunk_size_im = len(all_im)//4
    # all_im_list = [all_im[x:x+chunk_size_im] for x in range(0, len(all_im), chunk_size_im)]
    # print len(all_im_list)
    # idx_to_do = 3
    # print idx_to_do
    # print len(all_im_list[idx_to_do])
    # arr_lens = [len(val) for val in all_im_list]
    # print arr_lens
    # print sum(arr_lens)
    # # raw_input()
    # # return
    # all_im = all_im_list[idx_to_do]

    args = []
    for idx_im_curr, im_curr in enumerate(all_im):
        out_file_curr = im_curr.replace(out_dir_im,
                                        out_dir_kp).replace('.jpg', '.npy')
        if os.path.exists(out_file_curr):
            print out_file_curr
            continue
        out_dir_curr = os.path.split(out_file_curr)[0]
        util.makedirs(out_dir_curr)

        args.append((im_curr, out_file_curr, idx_im_curr))
    #     # im_curr, out_file_curr, idx_arg

    print len(args)
예제 #14
0
def script_save_110_im_color():
    dir_meta = '../data/disfa'
    out_dir_train_test = os.path.join(
        dir_meta, 'train_test_8_au_all_method_256_color_align')

    im_size = [110, 110]
    out_dir_im = os.path.join(dir_meta, 'preprocess_im_110_color_align')

    num_folds = 3
    im_files_all = []
    for fold_curr in range(num_folds):
        for file_pre in ['train', 'test']:
            file_curr = os.path.join(out_dir_train_test,
                                     file_pre + '_' + str(fold_curr) + '.txt')
            print file_curr, len(util.readLinesFromFile(file_curr))

            im_files = [
                line_curr.split(' ')[0]
                for line_curr in util.readLinesFromFile(file_curr)
            ]
            im_files_all.extend(im_files)

    print len(im_files_all), len(list(set(im_files_all)))
    im_files_all = list(set(im_files_all))
    str_replace_in_files = [
        os.path.join(dir_meta, 'preprocess_im_110_color_align'),
        os.path.join(dir_meta, 'preprocess_im_256_color_align')
    ]
    im_files_all = [
        file_curr.replace(str_replace_in_files[0], str_replace_in_files[1])
        for file_curr in im_files_all
    ]

    str_replace_out_files = [str_replace_in_files[1], out_dir_im]
    args = []
    for idx_file_curr, im_file_curr in enumerate(im_files_all):
        out_file_curr = im_file_curr.replace(str_replace_out_files[0],
                                             str_replace_out_files[1])
        out_dir_curr = os.path.split(out_file_curr)[0]
        util.makedirs(out_dir_curr)
        # if os.path.exists(out_file_curr):
        #     continue
        args.append((im_file_curr, out_file_curr, im_size, idx_file_curr))

    # print len(im_files_all)
    # print len(args)
    # for arg in args:
    #     print 'ARG',arg

    #     save_resize_im(arg)
    #     raw_input()
    #     break

    pool = multiprocessing.Pool(multiprocessing.cpu_count())
    pool.map(save_resize_im, args)
예제 #15
0
def get_entropy_map():
    # out_dir = '../experiments/figures/ck_routing'
    # util.makedirs(out_dir)
    # out_file_table = os.path.join(out_dir,'ent_diff_table.txt')

    # str_file = []

    # num_emos = 8
    # emo_strs = ['Neutral','Anger', 'Contempt','Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise']

    out_dir = '../experiments/figures/bp4d_routing'
    util.makedirs(out_dir)
    out_file_table = os.path.join(out_dir, 'ent_diff_table.txt')

    str_file = []

    num_emos = 12
    aus = [1, 2, 4, 6, 7, 10, 12, 14, 15, 17, 23, 24]
    emo_strs = ['AU_' + str(num) for num in aus]

    num_routes = 2
    for route_num in range(num_routes):
        print route_num
        str_file.append(str(route_num))
        for label_curr in range(num_emos):
            label_arr = []
            for label_compare in range(num_emos):
                file_name = [label_curr, label_compare, route_num]
                ent_file = os.path.join(
                    out_dir,
                    '_'.join([str(val) for val in file_name]) + '.npy')
                ent_curr = np.load(ent_file)
                label_arr.append(ent_curr)

            # true_ent = np.mean(label_arr[label_curr],0)
            # print true_ent
            all_ents = [np.mean(label_arr[idx], 0) for idx in range(num_emos)]
            catted = np.concatenate(all_ents, 0)
            min_val = np.min(catted)
            max_val = np.max(catted)
            for idx_ent_curr, ent_curr in enumerate(all_ents):
                out_file_curr = os.path.join(
                    out_dir, '_'.join(
                        str(val) for val in
                        [label_curr, idx_ent_curr, route_num, route_num]) +
                    '.png')
                title = emo_strs[label_curr] + ' ' + emo_strs[idx_ent_curr]

                visualize.plot_colored_mats(out_file_curr,
                                            ent_curr,
                                            min_val,
                                            max_val,
                                            title=title)

    visualize.writeHTMLForFolder(out_dir, '.png')
예제 #16
0
def load_mats():
    dirs_rel = get_ck_16_dirs()

    out_dir = '../experiments/figures/ck_routing'
    util.makedirs(out_dir)

    mats_names = ['labels', 'preds', 'routes_0', 'routes_1']
    mat_arrs = [[] for name in mats_names]
    for dir_curr in dirs_rel:
        # print dir_curr
        for idx_mat_name, mat_name in enumerate(mats_names):
            arr_curr_file = os.path.join(dir_curr, mat_name + '.npy')
            arr_curr = np.load(arr_curr_file)
            mat_arrs[idx_mat_name].append(arr_curr)
    # mat_arrs = [np.concatenate(mat_arr,0) for mat_arr in mat_arrs]
    axis_combine = [0, 0, 1, 1]
    mat_arrs = [
        np.concatenate(mat_arr, axis_curr)
        for mat_arr, axis_curr in zip(mat_arrs, axis_combine)
    ]
    for mat_arr in mat_arrs:
        print mat_arr.shape

    # print mat_arrs[0][:10],mat_arrs[1][:10]
    accuracy = np.sum(mat_arrs[0] == mat_arrs[1]) / float(mat_arrs[0].size)
    print 'accuracy', accuracy

    # print mat_arrs
    routes_all = mat_arrs[2:]
    print len(routes_all)
    # raw_input()
    num_emos = 8

    for label_curr in range(num_emos):
        for label_compare in range(num_emos):
            for route_num, routes_0 in enumerate(routes_all):
                idx_keep = np.logical_and(mat_arrs[0] == label_curr,
                                          mat_arrs[0] == mat_arrs[1])
                # routes_0 = mat_arrs[3]
                routes_0 = routes_0[label_compare, idx_keep, :, :]
                routes_0 = np.sum(routes_0, 2)
                routes_0 = np.reshape(routes_0, (routes_0.shape[0], 32, 6, 6))
                entropy = get_entropy(routes_0)
                print entropy.shape
                file_name = [label_curr, label_compare, route_num]
                out_file = os.path.join(
                    out_dir,
                    '_'.join([str(val) for val in file_name]) + '.npy')
                print out_file
                # raw_input()
                np.save(out_file, entropy)

    np.save(os.path.join(out_dir, 'labels.py'), mat_arrs[0])
    np.save(os.path.join(out_dir, 'preds.py'), mat_arrs[1])
def script_save_align_im():
    dir_meta = '../data/emotionet'
    im_size = [256, 256]
    str_replace_im = os.path.join(
        dir_meta, 'preprocess_im_' + str(im_size[0]) + '_color_nodetect')
    out_dir_im_align = os.path.join(
        dir_meta, 'preprocess_im_' + str(im_size[0]) + '_color_align')
    im_file_list = str_replace_im + '_list_1.txt'

    out_dir_im = os.path.join(dir_meta, 'im')
    out_dir_kp = os.path.join(dir_meta, 'kp')
    # out_dir_kp = str_replace_im.replace('_im_','_kp_')

    all_im = util.readLinesFromFile(im_file_list)
    all_im = [
        im_curr.replace(str_replace_im, out_dir_im) for im_curr in all_im
    ]

    avg_pts_file = '../data/bp4d/avg_kp_256_192_32_56.npy'
    out_scale = im_size[:]
    # exists = []
    args = []
    for idx_im_curr, im_curr in enumerate(all_im):
        kp_file = im_curr.replace(out_dir_im,
                                  out_dir_kp).replace('.jpg', '.npy')

        out_file = im_curr.replace(out_dir_im, out_dir_im_align)

        # if not os.path.exists(kp_file):
        #     continue
        if os.path.exists(out_file) or not os.path.exists(kp_file):
            continue

        out_dir_curr = os.path.split(out_file)[0]
        util.makedirs(out_dir_curr)

        util.makedirs(os.path.split(out_file)[0])
        args.append(
            (im_curr, kp_file, avg_pts_file, out_file, out_scale, idx_im_curr))

    print len(args)
    print args[0]
    # args = args[:10]
    # # util.mkdir(out_dir_scratch)
    # for idx_arg, arg in enumerate(args):
    #     print arg
    #     raw_input()
    # save_align_im(arg)
    #     raw_input()
    # break
    pool = multiprocessing.dummy.Pool(multiprocessing.cpu_count())
    pool.map(save_align_im, args)
    pool.close()
    pool.join()
예제 #18
0
    def save_all_crop_im(self):
        data_path = self.data_path
        out_data_path = self.out_data_path

        for horse_name in self.horse_names:
            print(horse_name)
            im_files = self.get_horse_ims(horse_name)

            args = []
            im_files_used = []
            for idx_im_file, im_file in enumerate(im_files):
                det_file = os.path.join(
                    os.path.split(im_file)[0] + '_dets',
                    os.path.split(im_file)[1][:-4] + '.npz')

                out_im_file = im_file.replace(data_path, out_data_path)
                out_crop_info_file = os.path.join(
                    os.path.split(im_file)[0].replace(data_path, out_data_path)
                    + '_cropbox',
                    os.path.split(im_file)[1][:-4] + '.npz')

                if os.path.exists(out_im_file) and os.path.exists(
                        out_crop_info_file):
                    continue

                util.makedirs(os.path.split(out_im_file)[0])
                util.makedirs(os.path.split(out_crop_info_file)[0])

                arg_curr = (im_file, det_file, out_im_file, out_crop_info_file)
                args.append(arg_curr)
                im_files_used.append(im_file)

            print(len(args), len(im_files))
            pool = multiprocessing.Pool(multiprocessing.cpu_count())
            ret_vals = pool.map(self.save_crop_and_det, args)
            pool.close()
            pool.join()

            # ret_vals = []
            # for arg in args:
            #     ret_vals.append(self.save_crop_and_det(arg))
            #     print (ret_vals)
            #     break
            # return

            assert len(ret_vals) == len(im_files_used)
            print('0:', ret_vals.count(0), ', 1:', ret_vals.count(1), ', 2:',
                  ret_vals.count(2), ', total:', len(ret_vals))
            out_file_log = os.path.join(out_data_path,
                                        horse_name + '_im_crop_log.npz')
            np.savez(out_file_log,
                     ret_vals=np.array(ret_vals),
                     im_files_used=np.array(im_files_used))
예제 #19
0
def script_checking_flows():
    in_dir_meta = '../data/ucf101/rgb_ziss/jpegs_256'
    video_name = 'v_CricketShot_g04_c01'
    out_dir_u = '../scratch/check_u_gpu'
    out_dir_v = '../scratch/check_v_gpu'
    util.mkdir(out_dir_u)
    util.mkdir(out_dir_v)

    # save_flows((in_dir_meta, out_dir_u, out_dir_v, video_name, 1))

    old_dir_u = '../data/ucf101/flow_ziss/tvl1_flow/u'
    old_dir_v = '../data/ucf101/flow_ziss/tvl1_flow/v'

    out_dir_diff_u = '../scratch/check_u_diff_gpu'
    out_dir_diff_v = '../scratch/check_v_diff_gpu'

    save_flows_gpu((in_dir_meta, out_dir_u, out_dir_v, video_name, 1, 0))

    raw_input()
    dir_pair_u = [
        os.path.join(dir_curr, video_name)
        for dir_curr in [old_dir_u, out_dir_u, out_dir_diff_u]
    ]
    dir_pair_v = [
        os.path.join(dir_curr, video_name)
        for dir_curr in [old_dir_v, out_dir_v, out_dir_diff_v]
    ]

    for old_dir, new_dir, out_dir_diff in [dir_pair_u, dir_pair_v]:
        util.makedirs(out_dir_diff)
        print old_dir, new_dir
        im_files = glob.glob(os.path.join(old_dir, '*.jpg'))
        im_files.sort()

        for im_file in im_files:
            flo_old = cv2.imread(im_file, cv2.IMREAD_GRAYSCALE).astype(float)
            flo_new = cv2.imread(
                os.path.join(new_dir,
                             os.path.split(im_file)[1]),
                cv2.IMREAD_GRAYSCALE)[:, :-1].astype(float)
            print flo_old.shape, flo_new.shape

            print np.min(flo_old), np.max(flo_old)
            print np.min(flo_new), np.max(flo_new)

            diff = np.abs(flo_old - flo_new)

            print np.min(diff), np.max(diff)

            cv2.imwrite(os.path.join(out_dir_diff,
                                     os.path.split(im_file)[1]), diff)

        visualize.writeHTMLForFolder(out_dir_diff)
예제 #20
0
def script_save_flows_gpu():
    dir_meta = '../data/ucf101'
    fps = 10
    small_dim = 256
    sample = 4

    # in_dir_meta = os.path.join(dir_meta,'val_data','rgb_'+str(fps)+'_fps_'+str(small_dim))
    # out_dir_meta = os.path.join(dir_meta,'val_data','flo_'+str(fps/float(sample))+'_fps_'+str(small_dim))

    in_dir_meta = os.path.join(dir_meta, 'test_data',
                               'rgb_' + str(fps) + '_fps_' + str(small_dim))
    out_dir_meta = os.path.join(
        dir_meta, 'test_data',
        'flo_' + str(fps / float(sample)) + '_fps_' + str(small_dim))

    out_dir_u = os.path.join(out_dir_meta, 'u')
    out_dir_v = os.path.join(out_dir_meta, 'v')

    util.makedirs(out_dir_u)
    util.makedirs(out_dir_v)

    video_names = [
        os.path.split(dir_curr)[1]
        for dir_curr in glob.glob(os.path.join(in_dir_meta, '*'))
        if os.path.isdir(dir_curr)
    ]
    print len(video_names)
    video_names.sort()
    args = []
    to_del = []
    for idx_video_name, video_name in enumerate(video_names):
        # file_check = os.path.join(out_dir_u, video_name, 'frame000001.jpg')
        # if os.path.exists(file_check):
        # 	continue
        if not check_done(in_dir_meta, out_dir_u, out_dir_v, video_name,
                          sample, idx_video_name):
            args.append((in_dir_meta, out_dir_u, out_dir_v, video_name, sample,
                         idx_video_name))

    print len(args)

    # return
    args = args[100:]
    print len(args)
    # for arg in args:
    # 	print arg[-1]
    # save_flows_gpu(arg)
    # raw_input()
    # break

    pool = multiprocessing.Pool(4)
    # rets =
    pool.map(save_flows_gpu, args)
예제 #21
0
def script_save_cropped_faces():
    dir_meta = '../data/bp4d'

    # in_dir_meta = os.path.join(dir_meta,'preprocess_im_'+str(256)+'_color_nodetect')
    # im_size = [110,110]
    # out_dir_meta = os.path.join(dir_meta,'preprocess_im_'+str(im_size)+'_color')
    # util.mkdir(out_dir_meta)

    in_dir_meta = os.path.join(dir_meta, 'BP4D', 'BP4D-training')
    im_size = [256, 256]
    out_dir_meta = os.path.join(dir_meta,
                                'preprocess_im_' + str(im_size[0]) + '_color')
    util.mkdir(out_dir_meta)

    im_list_in = glob.glob(os.path.join(in_dir_meta, '*', '*', '*.jpg'))
    print len(im_list_in)

    savegray = False
    args = []

    args = []
    file_pairs = []
    for idx_im_in, im_in in enumerate(im_list_in):
        out_file = im_in.replace(in_dir_meta, out_dir_meta)
        if os.path.exists(out_file):
            continue
        out_dir_curr = os.path.split(out_file)[0]
        util.makedirs(out_dir_curr)
        file_pairs.append((im_in, out_file))

    chunk_size = 500
    chunks = [
        file_pairs[x:x + chunk_size]
        for x in range(0, len(file_pairs), chunk_size)
    ]
    args = [(chunk_curr, im_size, savegray, idx_im_in)
            for idx_im_in, chunk_curr in enumerate(chunks)]

    print len(args)
    # args = args[:1000]
    # for arg in args:
    #     print arg
    #     size = saveCroppedFace_NEW_batch(arg)
    #     # saveCroppedFace(arg)
    #     raw_input()

    pool = multiprocessing.Pool(4)
    crop_sizes = pool.map(saveCroppedFace_NEW_batch, args)
    content = []
    out_im_all = [arg_curr[1] for arg_curr in args]
    np.savez(os.path.join(dir_meta, 'sizes_256.npz'),
             crop_sizes=np.array(crop_sizes),
             out_im_all=np.array(out_im_all))
예제 #22
0
    def extract_frames(self, replace = True, subjects_to_extract = None):
        
        if subjects_to_extract is None:
            subjects_to_extract = self.subjects
        
        # To help us read the data, we save a long .csv-file
        # with labels for each frame(a frame index).
        column_headers = ['interval', 'interval_ind', 'view', 'subject', 'pain', 'frame']
        
        for i, subject in enumerate(subjects_to_extract):

            out_file_index = os.path.join(self.output_dir,subject+'_'+'frame_index.csv')
            frames = pd.read_csv(out_file_index)
            rel_intervals = frames.interval.unique()
            rel_views = frames.view.unique()
            for idx_interval,interval in enumerate(rel_intervals):
                    
                for view in rel_views:
                    rel_frames = frames.loc[(frames['interval'] == interval) & (frames['view'] == view)]
                    rel_frames = rel_frames.sort_values('frame')
                    print (len(rel_frames))
                    # print (rel_frames)
                    # rel_frames = rel_frames.reindex()
                    # print (rel_frames)
                    args = []
                    for idx in range(len(rel_frames)-1):
                        first = self.get_im_path(rel_frames.iloc[idx])
                        second = self.get_im_path(rel_frames.iloc[idx+1])
                        if (first is None) or (second is None):
                            continue

                        out_file = self.get_flow_path_from_rgb(second)
                        if not replace and os.path.exists(out_file):
                            continue
                        util.makedirs(os.path.split(out_file)[0])
                        args.append((out_file, first, second))
                            # , self.mag_max, self.min, self.max))

                    t = time.time()
                    print ('doing interval number {} out of {}, view {}, num frames {}'.format(idx_interval, len(rel_intervals), view, len(args)))
                    # args = args[:10]
                    # for arg in args:
                    #     self.get_opt_flow(arg)
                    pool = multiprocessing.Pool(self.num_processes)
                    pool.map(self.get_opt_flow,args)
                    pool.close()
                    pool.join()

                    print ('done with interval number {} out of {}, view {}, num frames {}, time taken {}'.format(idx_interval, len(rel_intervals), view, len(args),time.time()-t ))
                    visualize.writeHTMLForFolder(out_dir, ext = '.png')
예제 #23
0
def script_save_256_im():
    dir_meta = '../data/disfa'
    out_dir_train_test = os.path.join(
        dir_meta, 'train_test_8_au_all_method_110_gray_align')

    im_size = [256, 256]
    out_dir_im = os.path.join(dir_meta, 'preprocess_im_256_color_align')

    num_folds = 3
    im_files_all = []
    for fold_curr in range(num_folds):
        for file_pre in ['train', 'test']:
            file_curr = os.path.join(out_dir_train_test,
                                     file_pre + '_' + str(fold_curr) + '.txt')
            print file_curr, len(util.readLinesFromFile(file_curr))

            im_files = [
                line_curr.split(' ')[0]
                for line_curr in util.readLinesFromFile(file_curr)
            ]
            im_files_all.extend(im_files)

    print len(im_files_all), len(list(set(im_files_all)))
    im_files_all = list(set(im_files_all))
    str_replace_in_files = [
        os.path.join(dir_meta, 'preprocess_im_110_gray_align'),
        os.path.join(dir_meta, 'Videos_LeftCamera_frames_200')
    ]
    im_files_all = [
        file_curr.replace(str_replace_in_files[0], str_replace_in_files[1])
        for file_curr in im_files_all
    ]

    str_replace_out_files = [str_replace_in_files[1], out_dir_im]
    args = []
    for idx_file_curr, im_file_curr in enumerate(im_files_all):
        out_file_curr = im_file_curr.replace(str_replace_out_files[0],
                                             str_replace_out_files[1])
        out_dir_curr = os.path.split(out_file_curr)[0]
        util.makedirs(out_dir_curr)
        # if os.path.exists(out_file_curr):
        #     continue
        args.append((im_file_curr, out_file_curr, im_size, idx_file_curr))

    print len(im_files_all)
    print len(args)
    for arg in args:
        print 'ARG', arg
        raw_input()
def save_resize_kp():
    dir_meta = '../data/emotionet'
    im_size = [256, 256]
    out_dir_im = os.path.join(
        dir_meta, 'preprocess_im_' + str(im_size[0]) + '_color_nodetect')
    out_dir_kp = out_dir_im.replace('_im_', '_kp_')
    im_file_list = out_dir_im + '_list_1.txt'

    out_dir_im_org = os.path.join(dir_meta, 'im')
    out_dir_kp_org = os.path.join(dir_meta, 'kp')
    all_im = util.readLinesFromFile(im_file_list)
    print len(all_im)
    out_file_exists = os.path.join(dir_meta, 'exists.txt')

    # exists = []
    args = []
    for idx_im_curr, im_curr in enumerate(all_im):
        im_org_file = im_curr.replace(out_dir_im, out_dir_im_org)
        kp_in_file = im_curr.replace(out_dir_im,
                                     out_dir_kp).replace('.jpg', '.npy')
        kp_out_file = kp_in_file.replace(out_dir_kp, out_dir_kp_org)

        util.makedirs(os.path.split(kp_out_file)[0])

        if os.path.exists(kp_out_file) or not os.path.exists(kp_in_file):
            # print kp_out_file
            # exists.append(im_org_file)
            continue

        args.append(
            (im_org_file, im_size, kp_in_file, kp_out_file, idx_im_curr))

    print len(args)
    # print len(exists)
    # util.writeFile(out_file_exists,exists)
    # print args[0]
    # # out_dir_scratch = os.path.join('../scratch','emotionet_kp_rs')
    # # util.mkdir(out_dir_scratch)
    # for idx_arg, arg in enumerate(args):
    #     print idx_arg,arg
    # raw_input()
    # save_resize_kp_mp(arg)
    # raw_input()
    # out_file_curr = os.path.join('../scratch',str(idx_arg)+'.jpg')
    # save_im_kp(cv2.imread(arg[0]),np.load(arg[-2]),out_file_curr)
    # print out_file_curr
    # break
    pool = multiprocessing.Pool(multiprocessing.cpu_count())
    pool.map(save_resize_kp_mp, args)
예제 #25
0
def script_make_im_gray():
    dir_meta = '../data/bp4d'
    # out_dir_im = os.path.join(dir_meta, 'preprocess_im_110_color_align')
    # out_dir_files = os.path.join(dir_meta, 'train_test_files_110_color_align')
    # out_dir_files_new = os.path.join(dir_meta, 'train_test_files_110_gray_align')
    # out_dir_im_new = os.path.join(dir_meta, 'preprocess_im_110_gray_align')

    out_dir_im = os.path.join(dir_meta, 'preprocess_im_110_color_nodetect')
    out_dir_files = os.path.join(dir_meta,
                                 'train_test_files_110_color_nodetect')
    out_dir_files_new = os.path.join(dir_meta,
                                     'train_test_files_110_gray_nodetect')
    out_dir_im_new = os.path.join(dir_meta, 'preprocess_im_110_gray_nodetect')
    util.mkdir(out_dir_files_new)

    num_folds = 3
    im_size = None
    # [96,96]
    all_im = []
    for fold_curr in range(num_folds):
        train_file = os.path.join(out_dir_files,
                                  'train_' + str(fold_curr) + '.txt')
        test_file = os.path.join(out_dir_files,
                                 'test_' + str(fold_curr) + '.txt')
        all_data = util.readLinesFromFile(train_file) + util.readLinesFromFile(
            test_file)
        all_im = all_im + [line_curr.split(' ')[0] for line_curr in all_data]

    print len(all_im), len(set(all_im))
    all_im = list(set(all_im))
    args = []
    for idx_file_curr, file_curr in enumerate(all_im):
        out_file_curr = file_curr.replace(out_dir_im, out_dir_im_new)
        dir_curr = os.path.split(out_file_curr)[0]
        util.makedirs(dir_curr)
        # print out_file_curr
        # print dir_curr
        if not os.path.exists(out_file_curr):
            args.append((file_curr, out_file_curr, im_size, idx_file_curr))

    print len(args)
    # for arg in args:
    #     print arg
    #     save_color_as_gray(arg)
    #     raw_input()

    pool = multiprocessing.Pool(multiprocessing.cpu_count())
    pool.map(save_color_as_gray, args)
예제 #26
0
def save_kp_for_alignment():
    dir_meta = '../data/bp4d'
    out_dir_im = os.path.join(dir_meta, 'preprocess_im_110_color')
    out_dir_files = os.path.join(dir_meta, 'train_test_files_110_color')
    out_dir_kp = os.path.join(dir_meta, 'preprocess_im_110_color_kp')
    util.mkdir(out_dir_kp)

    all_im = []
    for fold_num in range(3):
        for file_pre in ['train', 'test']:
            file_curr = os.path.join(out_dir_files,
                                     file_pre + '_' + str(fold_num) + '.txt')
            im_list_curr = [
                line.split(' ')[0]
                for line in util.readLinesFromFile(file_curr)
            ]
            all_im.extend(im_list_curr)
    all_im = list(set(all_im))
    print len(all_im)

    batch_size = 128
    batches = [
        all_im[x:x + batch_size] for x in range(0, len(all_im), batch_size)
    ]
    total = 0

    for b in batches:
        total += len(b)

    assert total == len(all_im)

    fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D,
                                      enable_cuda=True,
                                      flip_input=False)
    for idx_im_list, im_list in enumerate(batches):
        print 'batch', idx_im_list, 'of', len(batches)
        preds = fa.get_landmarks_simple(im_list)
        # print preds.shape,len(im_list)
        for idx_im_curr, im_curr in enumerate(im_list):
            pred_curr = preds[idx_im_curr]
            # print pred_curr.shape
            out_file_pred = im_curr.replace(out_dir_im, out_dir_kp).replace(
                '.jpg', '.npy')
            # print out_file_pred,im_curr
            dir_curr = os.path.split(out_file_pred)[0]
            util.makedirs(dir_curr)
            # raw_input()
            np.save(out_file_pred, pred_curr)
예제 #27
0
def run_for_caps_exp():
    in_file = os.path.join('../scratch','rand_im_96.jpg')
    # save_rand_im((224,224,3),in_file)


    # return 
    print 'hello'

    model_name = 'khorrami_capsule_7_33/ck_96_4_reconstruct_True_True_all_aug_margin_False_wdecay_0_600_step_600_0.1_0.001_0.001_0.001'
    model_file_name =  'model_599.pt'

    out_dir = os.path.join('../experiments/visualizing',model_name)
    util.makedirs(out_dir)
    

    model_file = os.path.join('../../eccv_18/experiments', model_name, model_file_name)

    type_data = 'train_test_files'; n_classes = 8;
    train_pre = os.path.join('../data/ck_96',type_data)
    test_pre =  os.path.join('../data/ck_96',type_data)
    
    out_file = os.path.join(out_dir,'blur_less.jpg')

    split_num = 4
    train_file = os.path.join(train_pre,'train_'+str(split_num)+'.txt')
    test_file = os.path.join(test_pre,'test_'+str(split_num)+'.txt')
    mean_file = os.path.join(train_pre,'train_'+str(split_num)+'_mean.png')
    std_file = os.path.join(train_pre,'train_'+str(split_num)+'_std.png')

    test_im = [line.split(' ')[0] for line in util.readLinesFromFile(test_file)]
    # in_file = test_im[0]

    # bl_khorrami_ck_96/split_0_100_100_0.01_0.01/model_99.pt';
    model = torch.load(model_file)
    print model

    dreamer = Deep_Dream(mean_file,std_file)
    for control in range(1):
        out_file = os.path.join(out_dir, str(control)+'_blur_less.jpg')
        out_im = dreamer.dream_fc_caps(model,in_file, octave_n = 2, control =control ,learning_rate = 5e-2, num_iterations = 80, sigma = [0.1,0.1])

    # print 'in_file',in_file
    #     # in_file) 
    # print out_im.shape
        scipy.misc.imsave(out_file, out_im)
    visualize.writeHTMLForFolder(out_dir)
예제 #28
0
def script_create_sess_facs_anno():
    dir_meta = '../data/pain'
    frame_dir = os.path.join(dir_meta, 'Frame_Labels','FACS')
    dirs = glob.glob(os.path.join(frame_dir,'*','*'))
    facs_to_keep = [4,6,7,9,10,43]
    
    anno_dir = os.path.join(dir_meta,'anno_au')
    util.mkdir(anno_dir)

    str_replace = [frame_dir,anno_dir]

    
    for idx_dir_curr,dir_curr in enumerate(dirs):
        out_file_curr = dir_curr.replace(str_replace[0],str_replace[1])+'.txt'
        util.makedirs(os.path.split(out_file_curr)[0])
        print out_file_curr, idx_dir_curr,'of',len(dirs)
        create_sess_facs_anno(out_file_curr, dir_curr, facs_to_keep)
예제 #29
0
def script_create_sess_pspi_anno():
    dir_meta = '../data/pain'
    frame_dir = os.path.join(dir_meta, 'Frame_Labels','PSPI')
    dirs = glob.glob(os.path.join(frame_dir,'*','*'))
    
    anno_dir = os.path.join(dir_meta,'anno_pspi')
    util.mkdir(anno_dir)

    str_replace = [frame_dir,anno_dir]

    num_pains = 0
    for idx_dir_curr,dir_curr in enumerate(dirs):
        out_file_curr = dir_curr.replace(str_replace[0],str_replace[1])+'.txt'
        util.makedirs(os.path.split(out_file_curr)[0])
        print out_file_curr, idx_dir_curr,'of',len(dirs)
        count_pain = create_sess_pspi_anno(out_file_curr, dir_curr)
        num_pains +=count_pain>0
    print num_pains, len(dirs)
예제 #30
0
def script_save_registered_faces():
    mat_dir_meta = '../data/disfa/Landmark_Points'
    frame_dir_meta = '../data/disfa/Videos_LeftCamera_frames'
    out_dir_meta = '../data/disfa/Videos_LeftCamera_frames_200'
    util.mkdir(out_dir_meta)

    avg_pts_file = '../data/disfa/avg_kp_200_75_20_5.npy'
    video_names = [
        dir_curr for dir_curr in os.listdir(mat_dir_meta)
        if os.path.isdir(os.path.join(mat_dir_meta, dir_curr))
    ]

    args = []

    for video_name in video_names:
        mat_dir = os.path.join(mat_dir_meta, video_name, 'tmp_frame_lm')
        mat_files = glob.glob(os.path.join(mat_dir, '*.mat'))
        im_files = glob.glob(
            os.path.join(frame_dir_meta, 'LeftVideo' + video_name + '_comp',
                         '*.jpg'))
        im_files.sort()
        mat_files.sort()
        if len(mat_files) > len(im_files):
            mat_files = mat_files[1:]
        assert len(mat_files) == len(im_files)

        for idx_mat_file, (mat_file,
                           im_file) in enumerate(zip(mat_files, im_files)):
            mat_num = int(mat_file[-11:-7])
            im_num = int(im_file[-8:-4])
            if im_num != mat_num:
                assert im_num - mat_num == 1

            out_file = im_file.replace(frame_dir_meta, out_dir_meta)
            if not os.path.exists(out_file):
                util.makedirs(os.path.dirname(out_file))
                args.append(
                    (avg_pts_file, mat_file, im_file, out_file, len(args)))

    print len(args)

    pool = multiprocessing.Pool(multiprocessing.cpu_count())
    pool.map(save_registered_face, args)