def main():
    vid_arr = [
        'ch01_20181209092600', 'ch03_20181209092844', 'ch05_20181208230458',
        'ch06_20181209093714'
    ]

    data_dir = '../data/tester_kit/naughty_but_nice'
    out_dir = os.path.join('../scratch', 'test_first_frame')
    util.mkdir(out_dir)

    model_path = '../data/deeplab_xception_coco_voctrainval.tar.gz'
    model = DeepLabModel(model_path)

    frames = [(val * 20) + 1 for val in range(5)]
    frames = ['frame_' + '%09d' % val + '.jpg' for val in frames]

    for vid_curr in vid_arr:

        for frame_curr in frames:
            frame_curr = os.path.join(data_dir, vid_curr, frame_curr)
            out_dir_anno = os.path.join(data_dir, vid_curr + '_anno')
            util.mkdir(out_dir_anno)
            out_file = os.path.join(
                out_dir_anno,
                os.path.split(frame_curr)[1].replace('.jpg', '.png'))
            run_visualization(model, frame_curr, out_file)
            print out_file

    visualize.writeHTMLForFolder(out_dir)
示例#2
0
def main(video_file, to_run, fps, smooth):
    print video_file
    print 'to_run', to_run
    print 'sec', fps

    # video_file = '../data/Surveillance/ch02_20161212115300.mp4'
    out_dir = video_file[:video_file.rindex('.')] + '_result_files'
    data_dir = video_file[:video_file.rindex('.')] + '_frames'
    util.mkdir(data_dir)
    util.mkdir(out_dir)

    # fps = 5 #extract one frame every n seconds
    size_output = [416, 416]

    if to_run == 'all' or to_run == 'extract':
        print 'EXTRACTING FRAMES'
        subprocess.call('rm ' + os.path.join(data_dir, '*.jpg'), shell=True)
        extract_frames(video_file, data_dir, fps, size_output)
        visualize.writeHTMLForFolder(data_dir)
        print 'DONE EXTRACTING FRAMES'

    if to_run == 'all' or to_run == 'test':
        print 'TESTING FRAMES'
        test_frames(out_dir, data_dir)
        print 'DONE TESTING'

    if to_run == 'all' or to_run == 'graph':
        print 'PLOTTING DETECTIONS OVER TIME'
        plot_detections_over_time(data_dir, out_dir, fps, smooth)

    if to_run == 'plot':
        plot_detections(data_dir, out_dir)
def viz_hists(out_dir, class_names, counts, threshes):
    widths = threshes[1:, :] - threshes[:-1, :]
    for class_idx in range(len(class_names)):
        out_file = os.path.join(
            out_dir, class_names[class_idx].replace(' ', '_') + '.jpg')
        y_vals = counts[:, class_idx]
        x_vals = threshes[:-1, class_idx]

        w = widths[:, class_idx]
        visualize.plotBars(out_file,
                           x_vals,
                           w,
                           y_vals,
                           'r',
                           xlabel='Val',
                           ylabel='Count',
                           title=class_names[class_idx])
        print out_file

    out_file = os.path.join(out_dir, 'total.jpg')
    y_vals = np.sum(counts, axis=1)
    x_vals = threshes[:-1, 0]
    w = widths[:, 0]
    print y_vals.shape, x_vals.shape, widths
    visualize.plotBars(out_file,
                       x_vals,
                       w,
                       y_vals,
                       'r',
                       xlabel='Val',
                       ylabel='Count',
                       title='Total')
    print out_file

    visualize.writeHTMLForFolder(out_dir)
示例#4
0
def extract_frames():

    folder = '../data/intervals_for_debugging_128_128_2fps_manual/sir_holger/20181104162109_165940_5/1'
    visualize.writeHTMLForFolder(folder)
    return
    data_path = '../data/lps_data/surveillance_camera'
    data_selection_path = '../metadata/intervals_for_debugging.csv'
    out_dir_offsets = '../metadata/fixing_offsets_with_cam_on'
    out_file_final = os.path.join(out_dir_offsets, 'video_offsets_final.csv')
    width = 128
    height = 128
    frame_rate = 2
    out_file_str = [os.path.split(data_selection_path)[1][:-4]]
    out_file_str.extend([str(val) for val in [width, height]])
    out_file_str.extend([str(frame_rate) + 'fps'])
    out_dir_testing = os.path.join('../data', '_'.join(out_file_str))
    util.mkdir(out_dir_testing)
    mve = MultiViewFrameExtractor(data_path=data_path,
                                  width=width,
                                  height=height,
                                  frame_rate=frame_rate,
                                  output_dir=out_dir_testing,
                                  views=[1],
                                  data_selection_path=data_selection_path,
                                  num_processes=multiprocessing.cpu_count(),
                                  offset_file=out_file_final)

    # video_paths = util.readLinesFromFile(out_file)
    # print (len(video_paths))
    # mve.get_videos_containing_intervals()
    # print ('now checking')
    # for video_path in video_paths:
    #     mve._get_video_start_time(video_path)
    mve.extract_frames(replace=False)
示例#5
0
def rough_work():



    # input = io.imread('../data/bp4d/BP4D/BP4D-training/F001/T1/2440.jpg')
    dir_im = '../data/bp4d/preprocess_im_110_color/F001/T1'
    im_list = glob.glob(os.path.join(dir_im,'*.jpg'))
    # print im_list
    im_list = im_list[:10]



    preds = fa.get_landmarks_simple(im_list)

    out_dir_check = os.path.join('../scratch/check_kp_fan')
    util.mkdir(out_dir_check)

    for idx_im_curr, im_curr in enumerate(im_list):
        im_curr =scipy.misc.imread(im_curr)
        pts_rel = preds[idx_im_curr]
        for pt_curr in pts_rel:
            cv2.circle(im_curr, (int(pt_curr[0]),int(pt_curr[1])), 2, (255,255,255),-1)
        out_file_curr = os.path.join(out_dir_check,str(idx_im_curr)+'.jpg')
        scipy.misc.imsave(out_file_curr,im_curr)

    visualize.writeHTMLForFolder(out_dir_check)
示例#6
0
def save_all_im(config_dict,
                config_path,
                all_subjects,
                out_path_meta,
                view=None,
                bg=None):
    # (config_dict, config_path, all_subjects, out_path_meta, input_to_get, output_to_get, task):
    output_to_get = ['img_crop']
    input_to_get = ['img_crop']
    # task = 'simple_imsave'

    edit_config_retvals(config_dict, input_to_get, output_to_get)

    for test_subject_curr in all_subjects:
        print(test_subject_curr, all_subjects)
        out_dir_data = os.path.join(out_path_meta, test_subject_curr)
        config_dict['test_subjects'] = [test_subject_curr]

        tester = IgniteTestNVS(config_path, config_dict)
        ret_vals = tester.get_images(input_to_get, output_to_get, view, bg)

        for idx_batch, in_batch in enumerate(ret_vals[0]['img_crop']):
            out_batch = ret_vals[1]['img_crop'][idx_batch]
            for im_num in range(in_batch.shape[0]):
                out_file_pre = os.path.join(out_dir_data,
                                            '%04d_%04d') % (idx_batch, im_num)
                util.makedirs(out_dir_data)

                out_file = out_file_pre + '_in.jpg'
                imageio.imsave(out_file, in_batch[im_num])
                out_file = out_file_pre + '_out.jpg'
                imageio.imsave(out_file, out_batch[im_num])

        visualize.writeHTMLForFolder(out_dir_data, height=128, width=128)
示例#7
0
def getting_edge_weights(file_curr, out_dir_labels,out_dir,k, set_k = set_k_mul, normalize_k = normalize_k_mul):
    npy_files, anno_all = readTrainTestFile(file_curr)
    k_count = np.zeros((len(class_names),k,k))
    k_count_big = np.zeros((k,k))

    for npy_file,anno_curr in zip(npy_files,anno_all):
        label_file = os.path.join(out_dir_labels, os.path.split(npy_file)[1])
        labels = np.load(label_file)
        
        k_count_big = set_k(k_count_big,labels)

        for gt_idx in np.where(anno_curr)[0]:
            k_count[gt_idx] = set_k(k_count[gt_idx],labels)

        
    k_count_big = normalize_k(k_count_big)
    print k_count_big.shape
    
    out_file = os.path.join(out_dir,'all_classes_mul.npy')
    np.save(out_file, k_count_big )

    out_file = os.path.join(out_dir,'all_classes_mul.jpg')
    visualize.saveMatAsImage(k_count_big, out_file)

    for class_idx in range(len(class_names)):
        k_count[class_idx] = normalize_k(k_count[class_idx])
        class_name = class_names[class_idx]
        
        out_file = os.path.join(out_dir,class_name+'.npy')
        np.save(out_file, k_count[class_idx])

        out_file = os.path.join(out_dir,class_name+'.jpg')
        visualize.saveMatAsImage(k_count[class_idx], out_file)
    visualize.writeHTMLForFolder(out_dir)
def script_run_deeplab_on_motion_frames():
    out_dir_meta = os.path.join('../scratch', 'frames_at_motion')
    util.mkdir(out_dir_meta)

    vid_names = [
        'ch01_20181209092600', 'ch03_20181209092844', 'ch05_20181208230458',
        'ch06_20181209093714'
    ]
    vid_dirs = [os.path.join(out_dir_meta, vid_name) for vid_name in vid_names]

    model_path = '../data/deeplab_xception_coco_voctrainval.tar.gz'
    model = dl.DeepLabModel(model_path)

    for vid_dir in vid_dirs:
        im_files = glob.glob(os.path.join(vid_dir, '*.jpg'))
        im_files.sort()
        out_dir = os.path.join(vid_dir, 'seg_maps')
        util.mkdir(out_dir)
        for im_file in im_files:
            out_file = os.path.join(
                out_dir,
                os.path.split(im_file)[1].replace('.jpg', '.png'))
            dl.run_visualization(model, im_file, out_file)
        visualize.writeHTMLForFolder(out_dir,
                                     ext='.png',
                                     height=256,
                                     width=448)
示例#9
0
def make_htmls(out_dir):
    # for class_name in class_names:
    dirs = [
        dir_curr for dir_curr in glob.glob(os.path.join(out_dir, '*'))
        if os.path.isdir(dir_curr)
    ]
    for out_dir_curr in dirs:
        # out_dir_curr = os.path.join(out_dir, class_name)
        visualize.writeHTMLForFolder(out_dir_curr)
示例#10
0
def under_bed():
    split_num = 0
    train_file = '../data/ck_96/train_test_files/train_' + str(
        split_num) + '.txt'
    test_file = '../data/ck_96/train_test_files/test_' + str(
        split_num) + '.txt'
    mean_file = '../data/ck_96/train_test_files/train_' + str(
        split_num) + '_mean.png'
    std_file = '../data/ck_96/train_test_files/train_' + str(
        split_num) + '_std.png'

    train_file_curr = util.readLinesFromFile(train_file)[0]
    mean = scipy.misc.imread(mean_file).astype(np.float32)
    std = scipy.misc.imread(std_file).astype(np.float32)
    std[std == 0] = 1.
    train_file_curr, label = train_file_curr.split(' ')
    label = int(label)
    image = scipy.misc.imread(train_file_curr).astype(np.float32)
    # image = (image-mean)/std
    # image = image[:,:,np.newaxis]

    out_dir = '../scratch/check_ck_aug'
    util.mkdir(out_dir)

    out_file_bef = os.path.join(out_dir, 'im.jpg')
    scipy.misc.imsave(out_file_bef, image)
    list_of_to_dos = ['pixel_augment']
    out_file_aft = os.path.join(out_dir,
                                'im_' + '_'.join(list_of_to_dos) + '.jpg')

    import torch
    data_transforms = {}
    data_transforms['train'] = transforms.Compose([
        # transforms.ToPILImage(),
        # transforms.ColorJitter(brightness=0.9, contrast=0.9, saturation=0.9, hue=0.5),
        lambda x: augment_image(x, list_of_to_dos, mean_im=mean, std_im=std),
        transforms.ToTensor(),
    ])

    train_data = dataset.CK_96_Dataset(train_file, mean_file, std_file,
                                       data_transforms['train'])
    train_dataloader = torch.utils.data.DataLoader(train_data,
                                                   batch_size=1,
                                                   shuffle=False,
                                                   num_workers=0)

    for batch in train_dataloader:
        print batch.keys()
        print torch.min(batch['image']), torch.max(batch['image'])
        print batch['label'].shape
        image = batch['image'][0].numpy()
        print image.shape
        break

    scipy.misc.imsave(out_file_aft, image[0])
    visualize.writeHTMLForFolder(out_dir)
示例#11
0
def main():
	# save_frames()

	meta_dir = '../scratch/matching_im'
	in_dir = os.path.join(meta_dir, 'sir_holger')
	im_format = 'si_%d_20181102111500.jpg'
	for views in [[0,1],[1,2],[2,3],[3,0]]:
		ims = [os.path.join(in_dir, im_format%view) for view in views]
		out_file = os.path.join(in_dir, 'correspondences_%d_%d.jpg'%(views[0],views[1]))


		img_left = rescale(io.imread(ims[0]),scale = 0.25).squeeze()
		img_right = rescale(io.imread(ims[1]),scale = 0.25).squeeze()
		
		# Find sparse feature correspondences between left and right image.

		descriptor_extractor = ORB()

		descriptor_extractor.detect_and_extract(img_left)
		keypoints_left = descriptor_extractor.keypoints
		descriptors_left = descriptor_extractor.descriptors

		descriptor_extractor.detect_and_extract(img_right)
		keypoints_right = descriptor_extractor.keypoints
		descriptors_right = descriptor_extractor.descriptors

		matches = match_descriptors(descriptors_left, descriptors_right,
		                            cross_check=True)

		# Estimate the epipolar geometry between the left and right image.

		model, inliers = ransac((keypoints_left[matches[:, 0]],
		                         keypoints_right[matches[:, 1]]),
		                        FundamentalMatrixTransform, min_samples=8,
		                        residual_threshold=1, max_trials=5000)

		inlier_keypoints_left = keypoints_left[matches[inliers, 0]]
		inlier_keypoints_right = keypoints_right[matches[inliers, 1]]

		print(f"Number of matches: {matches.shape[0]}")
		print(f"Number of inliers: {inliers.sum()}")
		
		plt.figure()
		
		plt.gray()

		plot_matches(plt.gca(), img_left, img_right, keypoints_left, keypoints_right,
		             matches[inliers], only_matches=True)
		plt.gca().axis("off")
		plt.gca().set_title("Inlier correspondences")

		plt.savefig(out_file)
		plt.close()
		print (out_file)

	visualize.writeHTMLForFolder(in_dir)
示例#12
0
def get_entropy_map():
    # out_dir = '../experiments/figures/ck_routing'
    # util.makedirs(out_dir)
    # out_file_table = os.path.join(out_dir,'ent_diff_table.txt')

    # str_file = []

    # num_emos = 8
    # emo_strs = ['Neutral','Anger', 'Contempt','Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise']

    out_dir = '../experiments/figures/bp4d_routing'
    util.makedirs(out_dir)
    out_file_table = os.path.join(out_dir, 'ent_diff_table.txt')

    str_file = []

    num_emos = 12
    aus = [1, 2, 4, 6, 7, 10, 12, 14, 15, 17, 23, 24]
    emo_strs = ['AU_' + str(num) for num in aus]

    num_routes = 2
    for route_num in range(num_routes):
        print route_num
        str_file.append(str(route_num))
        for label_curr in range(num_emos):
            label_arr = []
            for label_compare in range(num_emos):
                file_name = [label_curr, label_compare, route_num]
                ent_file = os.path.join(
                    out_dir,
                    '_'.join([str(val) for val in file_name]) + '.npy')
                ent_curr = np.load(ent_file)
                label_arr.append(ent_curr)

            # true_ent = np.mean(label_arr[label_curr],0)
            # print true_ent
            all_ents = [np.mean(label_arr[idx], 0) for idx in range(num_emos)]
            catted = np.concatenate(all_ents, 0)
            min_val = np.min(catted)
            max_val = np.max(catted)
            for idx_ent_curr, ent_curr in enumerate(all_ents):
                out_file_curr = os.path.join(
                    out_dir, '_'.join(
                        str(val) for val in
                        [label_curr, idx_ent_curr, route_num, route_num]) +
                    '.png')
                title = emo_strs[label_curr] + ' ' + emo_strs[idx_ent_curr]

                visualize.plot_colored_mats(out_file_curr,
                                            ent_curr,
                                            min_val,
                                            max_val,
                                            title=title)

    visualize.writeHTMLForFolder(out_dir, '.png')
def main():
    data_path = '../data/pain_no_pain_x2h_intervals_for_extraction_672_380_0.2fps'
    out_folder = '../data/bg_per_month_672_380_0.2fps'

    # data_path = '../data/pain_no_pain_x5h_intervals_for_extraction_672_380_0.01_fps'
    # out_folder = '../data/bg_per_month_x5h_672_380_0.01fps'

    util.mkdir(out_folder)
    interval_paths = glob.glob(os.path.join(data_path, '*', '*'))
    interval_paths = [
        dir_curr for dir_curr in interval_paths if os.path.isdir(dir_curr)
    ]

    days = [os.path.split(dir_curr)[1][:6] for dir_curr in interval_paths]
    days = list(set(days))
    print(days)

    for day in days:
        if '201903' not in day:
            continue

        path_list = []
        for view in range(4):
            path_list += glob.glob(
                os.path.join(data_path, '*', day + '*', str(view), '*.jpg'))

        print(day, len(path_list))
        path_list = path_list[::10]
        per_cam_lists = [[] for i in range(NUM_CAMERAS)]

        for idx, path in enumerate(path_list):
            camera = int(get_camera_from_path(path))
            cam_idx = camera - 1
            per_cam_lists[cam_idx].append(path)

        for i in range(NUM_CAMERAS):
            print('cam', i, len(per_cam_lists[i]))
            cam_list = per_cam_lists[i]
            with tqdm(total=len(cam_list)) as pbar:
                ims = []
                for path in cam_list:
                    pbar.update(1)
                    # print (path)
                    ims.append(imread(path))
                # if len(per_cam_lists[i])==0:
                #     continue
                ar = np.asarray(ims)
                med = np.median(ar, axis=0)
                out_file = os.path.join(
                    out_folder, day + '_0.01fps_camera_{}.jpg'.format(i))
                imsave(out_file, med.astype('uint8'))
            print('saved', out_file)

    visualize.writeHTMLForFolder(out_folder)
示例#14
0
def script_checking_flows():
    in_dir_meta = '../data/ucf101/rgb_ziss/jpegs_256'
    video_name = 'v_CricketShot_g04_c01'
    out_dir_u = '../scratch/check_u_gpu'
    out_dir_v = '../scratch/check_v_gpu'
    util.mkdir(out_dir_u)
    util.mkdir(out_dir_v)

    # save_flows((in_dir_meta, out_dir_u, out_dir_v, video_name, 1))

    old_dir_u = '../data/ucf101/flow_ziss/tvl1_flow/u'
    old_dir_v = '../data/ucf101/flow_ziss/tvl1_flow/v'

    out_dir_diff_u = '../scratch/check_u_diff_gpu'
    out_dir_diff_v = '../scratch/check_v_diff_gpu'

    save_flows_gpu((in_dir_meta, out_dir_u, out_dir_v, video_name, 1, 0))

    raw_input()
    dir_pair_u = [
        os.path.join(dir_curr, video_name)
        for dir_curr in [old_dir_u, out_dir_u, out_dir_diff_u]
    ]
    dir_pair_v = [
        os.path.join(dir_curr, video_name)
        for dir_curr in [old_dir_v, out_dir_v, out_dir_diff_v]
    ]

    for old_dir, new_dir, out_dir_diff in [dir_pair_u, dir_pair_v]:
        util.makedirs(out_dir_diff)
        print old_dir, new_dir
        im_files = glob.glob(os.path.join(old_dir, '*.jpg'))
        im_files.sort()

        for im_file in im_files:
            flo_old = cv2.imread(im_file, cv2.IMREAD_GRAYSCALE).astype(float)
            flo_new = cv2.imread(
                os.path.join(new_dir,
                             os.path.split(im_file)[1]),
                cv2.IMREAD_GRAYSCALE)[:, :-1].astype(float)
            print flo_old.shape, flo_new.shape

            print np.min(flo_old), np.max(flo_old)
            print np.min(flo_new), np.max(flo_new)

            diff = np.abs(flo_old - flo_new)

            print np.min(diff), np.max(diff)

            cv2.imwrite(os.path.join(out_dir_diff,
                                     os.path.split(im_file)[1]), diff)

        visualize.writeHTMLForFolder(out_dir_diff)
示例#15
0
def save_frames():
	meta_dir = '../scratch/matching_im'
	util.mkdir(meta_dir)
	data_path = '../data/lps_data/surveillance_camera'
	mfe = MultiViewFrameExtractor(data_path = data_path)

	subject = 'sir_holger'
	time_str = '20181102111500'
	views = [0,1,2,3]
	out_dir = os.path.join(meta_dir, subject)
	util.mkdir(out_dir)

	out_files = mfe.extract_single_time( subject, time_str, views, out_dir)
	visualize.writeHTMLForFolder(out_dir)
示例#16
0
def main():

    dir_data = '../data/tester_kit/naughty_but_nice'
    out_dir_meta = '../scratch/polyline_test'
    util.mkdir(out_dir_meta)

    vid_names = [
        'ch01_20181209092600', 'ch03_20181209092844', 'ch05_20181208230458',
        'ch06_20181209093714'
    ]

    vid_name = vid_names[0]
    contour_len = []
    for vid_name in vid_names:
        out_dir_curr = os.path.join(out_dir_meta, vid_name)
        util.mkdir(out_dir_curr)

        dir_anno = os.path.join(dir_data, vid_name + '_anno')
        mask_files = glob.glob(os.path.join(dir_anno, '*.png'))
        mask_files.sort()

        # mask_file = mask_files[0]
        for mask_file in mask_files:
            img = cv2.imread(mask_file, cv2.IMREAD_COLOR)
            ret, thresh = cv2.threshold(img, 127, 255, 0)
            contours, hierarchy = cv2.findContours(thresh[:, :, 0],
                                                   cv2.RETR_EXTERNAL, 2)
            img = cv2.drawContours(img, contours, -1, (0, 255, 0), 3)
            out_file_contours = os.path.join(out_dir_curr,
                                             os.path.split(mask_file)[1])
            cv2.imwrite(out_file_contours, img)
            if len(contours) > 0:
                contour_len.append(len(contours[0]))
            # cnt = contours[0]
            # print len
            # print out_file_contours
            # , len(cnt)

        visualize.writeHTMLForFolder(out_dir_curr,
                                     height=256,
                                     width=448,
                                     ext='.png')
        print np.min(contour_len), np.max(contour_len), np.mean(contour_len)


# perimeter = cv2.arcLength(cnt,True)
# epsilon = 0.1*cv2.arcLength(cnt,True)
# approx = cv2.approxPolyDP(cnt,epsilon,True)

    print('hello')
示例#17
0
    def extract_frames(self, replace = True, subjects_to_extract = None):
        
        if subjects_to_extract is None:
            subjects_to_extract = self.subjects
        
        # To help us read the data, we save a long .csv-file
        # with labels for each frame(a frame index).
        column_headers = ['interval', 'interval_ind', 'view', 'subject', 'pain', 'frame']
        
        for i, subject in enumerate(subjects_to_extract):

            out_file_index = os.path.join(self.output_dir,subject+'_'+'frame_index.csv')
            frames = pd.read_csv(out_file_index)
            rel_intervals = frames.interval.unique()
            rel_views = frames.view.unique()
            for idx_interval,interval in enumerate(rel_intervals):
                    
                for view in rel_views:
                    rel_frames = frames.loc[(frames['interval'] == interval) & (frames['view'] == view)]
                    rel_frames = rel_frames.sort_values('frame')
                    print (len(rel_frames))
                    # print (rel_frames)
                    # rel_frames = rel_frames.reindex()
                    # print (rel_frames)
                    args = []
                    for idx in range(len(rel_frames)-1):
                        first = self.get_im_path(rel_frames.iloc[idx])
                        second = self.get_im_path(rel_frames.iloc[idx+1])
                        if (first is None) or (second is None):
                            continue

                        out_file = self.get_flow_path_from_rgb(second)
                        if not replace and os.path.exists(out_file):
                            continue
                        util.makedirs(os.path.split(out_file)[0])
                        args.append((out_file, first, second))
                            # , self.mag_max, self.min, self.max))

                    t = time.time()
                    print ('doing interval number {} out of {}, view {}, num frames {}'.format(idx_interval, len(rel_intervals), view, len(args)))
                    # args = args[:10]
                    # for arg in args:
                    #     self.get_opt_flow(arg)
                    pool = multiprocessing.Pool(self.num_processes)
                    pool.map(self.get_opt_flow,args)
                    pool.close()
                    pool.join()

                    print ('done with interval number {} out of {}, view {}, num frames {}, time taken {}'.format(idx_interval, len(rel_intervals), view, len(args),time.time()-t ))
                    visualize.writeHTMLForFolder(out_dir, ext = '.png')
示例#18
0
def save_graphs_to_look_at(model_file, graph_nums):
    out_dir_meta = model_file[:model_file.rindex('.')]
    out_dir_meta_meta = out_dir_meta + '_graph_etc'
    out_dir_viz = out_dir_meta_meta + '_viz'
    util.mkdir(out_dir_viz)
    for graph_num in graph_nums:
        out_dir_meta = out_dir_meta_meta + '_' + str(graph_num)
        assert os.path.exists(out_dir_meta)
        vid_files = glob.glob(os.path.join(out_dir_meta, '*test*.npz'))

        for vid_file in vid_files:

            npz_data = np.load(vid_file)
            vid_file = os.path.split(vid_file)[1]
            affinity = npz_data['affinity']

            gt_vecs = npz_data['gt_vecs']
            gt_classes = npz_data['gt_classes']
            x_all = npz_data['x_all']

            plotter = []
            legend_entries = []
            for gt_idx, gt_class in enumerate(gt_classes):
                gt_vec = gt_vecs[gt_idx]
                val_rel = x_all[0, :, gt_class]
                gt_vec = gt_vec / np.max(gt_vec)
                gt_vec = gt_vec * np.max(val_rel)
                # (gt_idx+1)
                x_axis = range(gt_vec.size)
                plotter.append((x_axis, gt_vec))
                plotter.append((x_axis, val_rel))
                legend_entries.append(class_names[gt_class])
                legend_entries.append(class_names[gt_class] + ' pred')

            out_file = os.path.join(
                out_dir_viz, vid_file[:vid_file.rindex('.')] + '_gt.jpg')
            visualize.plotSimple(plotter,
                                 out_file=out_file,
                                 xlabel='time',
                                 ylabel='',
                                 legend_entries=legend_entries,
                                 outside=True)

            out_file = os.path.join(
                out_dir_viz, vid_file[:vid_file.rindex('.')] + '_' +
                str(graph_num) + '.jpg')
            visualize.saveMatAsImage(affinity, out_file)

            visualize.writeHTMLForFolder(out_dir_viz)
示例#19
0
def main():
    out_dir = '../scratch/deep_dream_on_nb'
# if not os.path.exists(out_dir):
    mkdir(out_dir)

    normalise = transforms.Normalize(
        mean=[0.485, 0.456, 0.406],
        std=[0.229, 0.224, 0.225]
        )

    preprocess = transforms.Compose([
        transforms.Resize((224,224)),
        transforms.ToTensor(),
        normalise
        ])

    model_urls['vgg16'] = model_urls['vgg16'].replace('https://', 'http://')
    vgg = torchvision.models.vgg16(pretrained=True)

    # vgg = models.vgg16(pretrained=True)
    vgg = vgg.cuda()
    print(vgg)
    modulelist = list(vgg.features.modules())

    out_dir = '../scratch/rand'
    # visualize.writeHTMLForFolder(out_dir)


    # in_file = os.path.join(out_dir,'sky_eye.jpg')
    in_file = os.path.join(out_dir,'rand.jpg')

    # rand_noise_im = np.uint8(np.random.randint(256,size = (224,224,3)))
    # scipy.misc.imsave(in_file, rand_noise_im)

    # sky-dd.jpeg'
    sky = load_image(in_file)
    # print sky.shape
    num_iter = 10
    lr = 0.05
    cat = 281

    # deep_dream_vgg(image, layer, iterations, lr, octave_scale, num_octaves,out_file)
    for num_iter in range(25,50,5):
        out_file = os.path.join(out_dir,'_'.join([str(val) for val in [num_iter, lr, cat]])+'.jpg')
        deep_dream_vgg(sky, 28, num_iter, 0.3, 2, 20, out_file)
        print 'done with ',out_file

    visualize.writeHTMLForFolder(out_dir)
示例#20
0
def save_neg_cooc_graphs(out_dir):
    all_file = os.path.join(out_dir,'all_classes_mul.npy')
    all_cooc = np.load(all_file)
    for class_name in class_names:
        in_file = os.path.join(out_dir,class_name+'.npy')
        curr_cooc = np.load(in_file)
        out_cooc = curr_cooc - all_cooc
        out_cooc = out_cooc + np.eye(out_cooc.shape[0])
        out_file = os.path.join(out_dir,class_name+'neg.jpg')
        visualize.saveMatAsImage(out_cooc, out_file)
        # print 'curr_cooc',curr_cooc.shape,np.min(curr_cooc),np.max(curr_cooc)
        # print 'out_cooc',out_cooc.shape,np.min(out_cooc),np.max(out_cooc)
        # print 'all_cooc',all_cooc.shape,np.min(all_cooc),np.max(all_cooc)
        # print out_file
        out_file = os.path.join(out_dir,class_name+'neg.npy')
        np.save(out_file, out_cooc)
    visualize.writeHTMLForFolder(out_dir)
def run_for_caps_exp():
    in_file = os.path.join('../scratch','rand_im_96.jpg')
    # save_rand_im((224,224,3),in_file)


    # return 
    print 'hello'

    model_name = 'khorrami_capsule_7_33/ck_96_4_reconstruct_True_True_all_aug_margin_False_wdecay_0_600_step_600_0.1_0.001_0.001_0.001'
    model_file_name =  'model_599.pt'

    out_dir = os.path.join('../experiments/visualizing',model_name)
    util.makedirs(out_dir)
    

    model_file = os.path.join('../../eccv_18/experiments', model_name, model_file_name)

    type_data = 'train_test_files'; n_classes = 8;
    train_pre = os.path.join('../data/ck_96',type_data)
    test_pre =  os.path.join('../data/ck_96',type_data)
    
    out_file = os.path.join(out_dir,'blur_less.jpg')

    split_num = 4
    train_file = os.path.join(train_pre,'train_'+str(split_num)+'.txt')
    test_file = os.path.join(test_pre,'test_'+str(split_num)+'.txt')
    mean_file = os.path.join(train_pre,'train_'+str(split_num)+'_mean.png')
    std_file = os.path.join(train_pre,'train_'+str(split_num)+'_std.png')

    test_im = [line.split(' ')[0] for line in util.readLinesFromFile(test_file)]
    # in_file = test_im[0]

    # bl_khorrami_ck_96/split_0_100_100_0.01_0.01/model_99.pt';
    model = torch.load(model_file)
    print model

    dreamer = Deep_Dream(mean_file,std_file)
    for control in range(1):
        out_file = os.path.join(out_dir, str(control)+'_blur_less.jpg')
        out_im = dreamer.dream_fc_caps(model,in_file, octave_n = 2, control =control ,learning_rate = 5e-2, num_iterations = 80, sigma = [0.1,0.1])

    # print 'in_file',in_file
    #     # in_file) 
    # print out_im.shape
        scipy.misc.imsave(out_file, out_im)
    visualize.writeHTMLForFolder(out_dir)
示例#22
0
def script_extract_motion_frames():
    in_dir = '../data/tester_kit/naughty_but_nice'
    motion_files = glob.glob(os.path.join(in_dir, '*.txt'))
    motion_files.sort()

    # print (motion_files)
    # input()

    out_dir_meta = os.path.join('../scratch', 'frames_at_motion')
    util.mkdir(out_dir_meta)

    for motion_file in motion_files:
        print(motion_file)
        video_file = motion_file.replace('.txt', '.mp4')
        vid_name = os.path.split(motion_file)[1]
        vid_name = vid_name[:vid_name.rindex('.')]

        out_dir = os.path.join(out_dir_meta, vid_name)
        util.mkdir(out_dir)

        cam, vid_start_time = pms.get_vid_info(video_file)

        df = pms.read_motion_file(motion_file)
        motion_str = 'Motion Detection Started'
        rows = df.loc[(df['Minor Type'] == motion_str), ['Date Time']]

        motion_times = rows.iloc[:, 0].values
        vid_times = motion_times - vid_start_time
        vid_times = vid_times.astype(np.datetime64)

        commands = [
            extract_frame_at_time(video_file, out_dir, time_curr)
            for time_curr in vid_times
        ]
        commands = set(commands)
        commands_file = os.path.join(out_dir, 'commands.txt')
        # print (commands[0])
        # print (commands_file)
        util.writeFile(commands_file, commands)
        # break
        for command in commands:
            subprocess.call(command, shell=True)

        visualize.writeHTMLForFolder(out_dir, height=256, width=448)
def script_viz_k_means():
    out_dir_htmls = '../experiments/figures/primary_caps_viz_pca'.replace(
        str_replace[0], str_replace[1])
    util.mkdir(out_dir_htmls)
    out_dir_im = os.path.join(out_dir_htmls, 'im')
    util.mkdir(out_dir_im)

    caps, test_file, convnet, imsize = get_caps_compiled()
    num_clusters = 32

    # arr_vals = [(x,y,filter_num) for x in range(6) for y in range(6) for filter_num in range(32)]
    arr_vals = [(x, y, filter_num) for x in [3] for y in [5]
                for filter_num in [3]]

    test_im = [
        scipy.misc.imread(line_curr.split(' ')[0])
        for line_curr in util.readLinesFromFile(test_file)
    ]
    print len(test_im)
    print test_im[0].shape

    for x, y, filter_num in arr_vals:
        out_dir_curr = os.path.join(
            out_dir_im,
            str(x) + '_' + str(y) + '_' + str(filter_num))
        util.mkdir(out_dir_curr)
        out_file_html = os.path.join(
            out_dir_htmls,
            str(x) + '_' + str(y) + '_' + str(filter_num) + '.html')
        # if os.path.exists(out_file_html):
        #     continue
        pca(caps,
            num_clusters,
            filter_num,
            x,
            y,
            test_im,
            out_dir_curr,
            out_file_html,
            convnet,
            imsize,
            rewrite=False)
        # break
    visualize.writeHTMLForFolder(out_dir_im)
示例#24
0
def extract_first_frames(vid_files, out_dir):
    # extract first frame
    # print (len(vid_files))
    # counter = 0
    for vid_file in vid_files:
        out_file = os.path.join(
            out_dir,
            os.path.split(vid_file)[1].replace('.mp4', '.jpg'))
        if not os.path.exists(out_file):
            # counter+=1
            command = [
                'ffmpeg', '-i', vid_file, '-y', '-vframes', '1', '-f',
                'image2', out_file
            ]
            subprocess.call(command)
    # print ('counter',counter)

    # view first frames
    visualize.writeHTMLForFolder(out_dir, height=506, width=896)
示例#25
0
def main():
    overfitting_do()
    # overfitting()
    return

    # ck_stuff
    meta_us = '../experiments/khorrami_capsule_7_3_bigclass3'
    us_pre = 'ck_96_train_test_files_non_peak_one_third_'
    us_post = '_reconstruct_True_True_all_aug_margin_False_wdecay_0_300_exp_0.96_350_1e-06_0.001_0.001_0.001'
    out_dir = '../experiments/figures/ck_intensity_exp'
    util.mkdir(out_dir)

    meta_them = '../experiments/khorrami_ck_96_caps_bl'
    them_pre = 'ck_'
    them_post = '_train_test_files_non_peak_one_third_khorrami_ck_96_300_exp_0.96_350_1e-06_0.001_0.001'

    folds = range(10)
    model_range = range(0, 300, 50) + [299]
    post_res_range = ['', '_easy']

    legend_entries = ['Ours Hard', 'Ours Easy', 'BL Hard', 'BL Easy']

    # for fold_curr in folds:
    # 	accus = [[] for i in range(4)]

    # 	for idx_post_res, post_res in enumerate(post_res_range):
    # 		for model_curr in model_range:
    # 			dir_res_us = os.path.join(meta_us, us_pre+str(fold_curr)+us_post,'results_model_'+str(model_curr)+post_res)
    # 			_,_, accu_us = collate_labels(dir_res_us,num_it=True)

    # 			dir_res_them = os.path.join(meta_them, them_pre+str(fold_curr)+them_post,'results_model_'+str(model_curr)+post_res)
    # 			_,_, accu_them = collate_labels(dir_res_them,num_it=False)

    # 			accus[idx_post_res].append(accu_us)
    # 			accus[idx_post_res+2].append(accu_them)

    # 	out_file_curr = os.path.join(out_dir,'fold_'+str(fold_curr)+'.png')
    # 	xAndYs = [(model_range,arr_curr) for arr_curr in accus]
    # 	print out_file_curr
    # 	visualize.plotSimple(xAndYs, out_file = out_file_curr,ylabel = 'Accuracy',xlabel='Epoch',legend_entries = legend_entries, title='Fold '+str(fold_curr),outside=True)

    visualize.writeHTMLForFolder(out_dir, '.png')
示例#26
0
def save_neg_exp_cooc_graphs(out_dir):
    for class_name in class_names:
        in_file = os.path.join(out_dir,class_name+'neg.npy')
        curr_cooc = np.load(in_file)
        print np.min(curr_cooc),np.max(curr_cooc)
        out_cooc = np.exp(curr_cooc-1)
        print np.min(out_cooc),np.max(out_cooc)

        
        out_file = os.path.join(out_dir,class_name+'negexp.jpg')
        visualize.saveMatAsImage(out_cooc, out_file)
        # print out_file
        # print 'curr_cooc',curr_cooc.shape,np.min(curr_cooc),np.max(curr_cooc)
        # print 'out_cooc',out_cooc.shape,np.min(out_cooc),np.max(out_cooc)
        # print 'all_cooc',all_cooc.shape,np.min(all_cooc),np.max(all_cooc)
        
        out_file = os.path.join(out_dir,class_name+'negexp.npy')
        print out_file
        np.save(out_file, out_cooc)
        # raw_input()
    visualize.writeHTMLForFolder(out_dir)
示例#27
0
def plot_detections(data_dir, out_dir):
    frame_files = glob.glob(os.path.join(data_dir, '*.jpg'))
    frame_files.sort()
    result_files = [
        file.replace(data_dir, out_dir).replace('.jpg', '.txt')
        for file in frame_files
    ]

    colors = [(255, 0, 255), (0, 255, 0)]
    for frame_file, result_file in zip(frame_files, result_files):
        out_file = result_file[:result_file.rindex('.')] + '.jpg'
        im = cv2.imread(frame_file)
        dets = util.readLinesFromFile(result_file)
        for det in dets:
            det = det.split(' ')
            class_curr = int(det[0])
            box = [int(val) for val in det[2:]]
            color = colors[class_curr]
            im = cv2.rectangle(im, (box[0], box[2]), (box[1], box[3]), color,
                               5)
        cv2.imwrite(out_file, im)
    visualize.writeHTMLForFolder(out_dir)
示例#28
0
def save_im_rot(config_dict, config_path, all_subjects, out_path_meta):
    output_to_get = ['img_crop']
    input_to_get = ['img_crop']
    edit_config_retvals(config_dict, input_to_get, output_to_get)
    config_dict['batch_size_test'] = 4

    test_subject_curr = all_subjects[0]

    out_dir_data = os.path.join(out_path_meta, test_subject_curr)
    config_dict['test_subjects'] = [test_subject_curr]

    tester = IgniteTestNVS(config_path, config_dict)
    ret_vals = tester.rotate_one_image()

    for im_num, im in enumerate(ret_vals):
        out_file_pre = os.path.join(out_dir_data, '%04d') % (im_num)
        util.makedirs(out_dir_data)

        out_file = out_file_pre + '.jpg'
        imageio.imsave(out_file, im)

    visualize.writeHTMLForFolder(out_dir_data, height=128, width=128)
def test_resize_kp():
    dir_meta = '../data/emotionet'
    im_size = [256, 256]
    out_dir_im = os.path.join(
        dir_meta, 'preprocess_im_' + str(im_size[0]) + '_color_nodetect')
    out_dir_kp = out_dir_im.replace('_im_', '_kp_')

    out_dir_im_org = os.path.join(dir_meta, 'im')

    im_file_list = out_dir_im + '_list_1.txt'
    all_im = util.readLinesFromFile(im_file_list)

    out_dir_scratch = '../scratch/emotionet_kp'
    util.mkdir(out_dir_scratch)

    for idx_im_curr, im_curr in enumerate(all_im[:100]):
        im_org_file = im_curr.replace(out_dir_im, out_dir_im_org)
        kp_in_file = im_curr.replace(out_dir_im,
                                     out_dir_kp).replace('.jpg', '.npy')
        if not os.path.exists(kp_in_file):
            print 'CONTINUING', kp_in_file
            continue
        im_org = scipy.misc.imread(im_org_file)
        im = scipy.misc.imread(im_curr)

        kp = np.load(kp_in_file)

        kp_org = kp / float(im_size[0])
        kp_org[:, 0] = kp_org[:, 0] * im_org.shape[1]
        kp_org[:, 1] = kp_org[:, 1] * im_org.shape[0]

        out_file = os.path.join(out_dir_scratch, str(idx_im_curr) + '.jpg')
        save_im_kp(im, kp, out_file)

        out_file_org = os.path.join(out_dir_scratch,
                                    str(idx_im_curr) + '_org.jpg')
        save_im_kp(im_org, kp_org, out_file_org)

    visualize.writeHTMLForFolder(out_dir_scratch)
def main():

    run_for_caps_exp()
    # script_explore_lr_steps()

    return
    in_file = os.path.join('../scratch','rand_im_224.jpg')
        # 'deep_dream_new_code','rand_im_224.jpg')
    # scratch/deep_dream_new_code/rand_im_224.jpg

#     background_color = np.float32([200.0, 200.0, 200.0])
# # generate initial random image
#     input_img = np.random.normal(background_color, 8, (224, 224, 3))
#     scipy.misc.imsave(in_file,input_img)


#     return
    model_name = 'vgg_capsule_7_33/bp4d_256_train_test_files_256_color_align_0_reconstruct_True_True_all_aug_marginmulti_False_wdecay_0_1_exp_0.96_350_1e-06_0.0001_0.001_0.001_lossweights_1.0_0.1_True'
    model_file_name = 'model_0.pt'


    out_dir = os.path.join('../experiments/visualizing',model_name)
    util.makedirs(out_dir)
    

    model_file = os.path.join('../../eccv_18/experiments', model_name, model_file_name)

    type_data = 'train_test_files_256_color_align'; n_classes = 12;
    train_pre = os.path.join('../data/bp4d',type_data)
    test_pre =  os.path.join('../data/bp4d',type_data)
    

    split_num = 0
    train_file = os.path.join(train_pre,'train_'+str(split_num)+'.txt')
    test_file = os.path.join(test_pre,'test_'+str(split_num)+'.txt')
    assert os.path.exists(train_file)
    assert os.path.exists(test_file)



    mean_file = 'vgg'
    std_file = 'vgg'

    test_im = [line.split(' ')[0] for line in util.readLinesFromFile(test_file)]
    # in_file = test_im[0]

    # bl_khorrami_ck_96/split_0_100_100_0.01_0.01/model_99.pt';
    model = torch.load(model_file)
    print model

    dreamer = Deep_Dream(mean_file,std_file)

    au_list = [1,2,4,6,7,10,12,14,15,17,23,24]
    
    out_dir_im = os.path.join(out_dir,'au_color_gauss_5e-1_200')
    util.mkdir(out_dir_im)

    for control in range(n_classes):
        au = au_list[control]
        out_file = os.path.join(out_dir_im, str(au))

        out_im = dreamer.dream_fc_caps(model,in_file, octave_n = 2, control =control ,color = True,num_iterations = 200, learning_rate = 5e-1)[:,:,::-1]
        scipy.misc.imsave(out_file+'.jpg', out_im)

    visualize.writeHTMLForFolder(out_dir_im)