Exemplo n.º 1
0
def main():
    # parse args
    ap = argparse.ArgumentParser()
    ap.add_argument('-i', '--input', type=str, required=True, help='path to input video file')
    ap.add_argument('-o', '--output', type=str, default='./', help='path to output directory')
    args = vars(ap.parse_args())
    in_vid = args['input']
    out_dir = args['output']
    video_name = in_vid.split('/')[-1].split('.')[0]

    # initialization
    model = Model(winname)
    view = View(winname)
    controller = Controller(winname, view, model)
    cap = utils.load_video(in_vid)
    frame_id = 0

    cv2.namedWindow(winname)

    # initial frame for selecting roi
    success, orig = cap.read()
    if not success:
        return

    # create dir to store output roi
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    model.curr_frame = orig.copy()
    view.freeze_frame(controller, model)
    utils.tracker_init(model, trackers, tracking, files, out_dir, video_name, frame_id)

    # play video
    while cap.isOpened():
        success, orig = cap.read()
        if not success:
            break
        model.curr_frame = orig.copy()
        frame_id += 1

        rois = utils.track(winname, trackers, model)
        view.show(model)

        # MAC: SPACE (32) ENTER (13) DELETE (8) ESC (27)
        key = cv2.waitKey(1)
        if key == 32:
            view.freeze_frame(controller, model)
            cv2.setMouseCallback(winname, controller.empty_click_event)
            utils.tracker_init(model, trackers, tracking, files, out_dir, video_name, frame_id)
        elif key == ord('q'):
            break

        # write rois to txt file
        utils.write_to_file(files, frame_id, rois)

    # clean up
    cv2.destroyAllWindows()
    for f in files:
        f.close()
Exemplo n.º 2
0
    def __getitem__(self, index):
        #load video
        vid_path = self.file_list[index]
        vid = load_video(vid_path, 'float')
        vid = vid.astype(np.float32)
        #print(vid.shape)
        vid_transformed = self.transform(vid)

        return vid_transformed, vid_path
def feature_extraction_videos(model, cores, batch_sz, video_list, output_path):
    """
      Function that extracts the intermediate CNN features
      of each video in a provided video list.

      Args:
        model: CNN network
        cores: CPU cores for the parallel video loading
        batch_sz: batch size fed to the CNN network
        video_list: list of video to extract features
        output_path: path to store video features
    """
    videos = {i: video.strip() for i, video in enumerate(open(video_list).readlines())}
    print('\nNumber of videos: ', len(videos))
    print('Storage directory: ', output_path)
    print('CPU cores: ', cores)
    print('Batch size: ', batch_sz)

    print('\nFeature Extraction Process')
    print('==========================')
    pool = Pool(cores)
    future_videos = dict()
    output_list = []
    pbar = tqdm(lrange(np.max(videos.keys()) + 1), mininterval=1.0, unit='videos')
    for video in pbar:
        if os.path.exists(videos[video]):
            video_name = os.path.splitext(os.path.basename(videos[video]))[0]
            if video not in future_videos:
                video_tensor = load_video(videos[video], model.desired_size)
            else:
                video_tensor = future_videos[video].get()
                del future_videos[video]

            # load videos in parallel
            for _ in lrange(cores - len(future_videos)):
                next_video = np.max(future_videos.keys()) + 1 \
                    if len(future_videos) else video + 1

                if next_video in videos and \
                    next_video not in future_videos and \
                        os.path.exists(videos[next_video]):
                    future_videos[next_video] = pool.apply_async(load_video,
                                                                 args=[videos[next_video], model.desired_size])

            # extract features
            features = model.extract(video_tensor, batch_sz)

            path = os.path.join(output_path, '{}_{}'.format(video_name, model.net_name))
            output_list += ['{}\t{}'.format(video_name, path)]
            pbar.set_postfix(video=video_name)

            # save features
            np.save(path, features)
    np.savetxt('{}/video_feature_list.txt'.format(output_path), output_list, fmt='%s')
Exemplo n.º 4
0
def main():
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    # path to input video
    cap = utils.load_video(in_vid)

    frame_id = 0

    # open file for reading ROI location
    f = open(in_txt, 'r')

    # get rid of header and get first bbox
    _ = f.readline()

    while cap.isOpened():
        # read frame and update bounding box
        _, img = cap.read()
        line = f.readline()
        if not _ or not line:
            break
        bbox = utils.get_bbox(line)
        utils.draw_box(img, bbox, (0, 0, 255))

        # cv2.imshow('frame', img[bbox[1]+1:bbox[1]+bbox[3], bbox[0]+1:bbox[0]+bbox[2]])
        cv2.imshow('frame', img)
        if cv2.waitKey(1) & 0xff == ord('q'):
            break

        # update frame_id and write to file
        if is_print:
            print('{}{}.jpg'.format(out_dir, frame_id))
            cv2.imwrite(
                '{}{}.jpg'.format(out_dir, frame_id),
                img[bbox[1] + 1:bbox[1] + bbox[3],
                    bbox[0] + 1:bbox[0] + bbox[2]])
        frame_id += 1
        # print('{}, {:1.0f}, {:1.0f}, {:1.0f}, {:1.0f}'
        #       .format(frame_id, bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]))
    cv2.waitKey(5)
    cap.release()
    cv2.destroyAllWindows()

    f.close()
Exemplo n.º 5
0
Arquivo: c2b.py Projeto: mrsalehi/C2B
    
        return c2b_frames


if __name__ == '__main__':
    DEBUG = 1
    
    # W = torch.FloatTensor([[[1, 1, 0], [1, 1, 1]], [[0 ,0, 0], [1, 0, 1]]]).cuda()
    
    if DEBUG:
        # subframes = torch.FloatTensor([[[1, 1, 1], [1, 0, 0]], [[0 ,0, 0], [0, 0, 1]]]).cuda()
        sf = torch.FloatTensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15 ,16]])
        subframes = torch.stack([sf, -sf, sf - 100, sf - 200, sf - 300, sf - 400, sf - 500, sf - 600], dim=0).cuda() 
    else:
        path = '/scratch/ondemand23/mrsalehi/original_high_fps_videos/720p_240fps_1.mov'
        subframes = load_video(path)
        print('Video loaded!')
        subframes = torch.FloatTensor(subframes).cuda()
        subframes = subframes[:S]  # picking the first S subframes

    height, width = subframes.shape[1], subframes.shape[2]

    # nbhds = [[(2*i, 2*j), (2*i, 2*j+1), (2*i+1, 2*j), (2*i+1, 2*j+1)] \
    # for i in range(int(height / 2)) for j in range(int(width / 2))]

#    nbhds_rows = torch.LongTensor([[el[0] for el in nbhd] for nbhd in nbhds]).cuda()
#    nbhds_cols = torch.LongTensor([[el[1] for el in nbhd] for nbhd in nbhds]).cuda()

    start = time()
    # c2b_frame_bucket0, c2b_frame_bucket1 = multiplex_v3(subframes, W)
    c2b_frame_bucket0, c2b_frame_bucket1 = multiplex_v5(subframes, (2, 2))
Exemplo n.º 6
0
 def __getitem__(self, index):
     return load_video(self.videos[index][1]), self.videos[index][0]
sys.path.append('./stimeval')
from stimeval import VideoEvaluator
from utils import load_video

#setting path
video_path = './test_data/vid'
true_vid_path = os.path.join(video_path, 'true')
recon_vid_path = os.path.join(video_path, 'recon')

true_vid_file_names = os.listdir(true_vid_path)
recon_vid_file_names = os.listdir(recon_vid_path)

#load numpy img
true_vid_list = [
    load_video(os.path.join(true_vid_path, true_vid), 'float', 224, 224)[:11]
    for true_vid in true_vid_file_names
]
recon_vid_list = [
    load_video(os.path.join(recon_vid_path, true_vid), 'float', 224, 224)
    for true_vid in recon_vid_file_names
]

true_vid_list_min = [
    load_video(os.path.join(true_vid_path, true_vid), 'float', 16, 16)[:11]
    for true_vid in true_vid_file_names
]
recon_vid_list_min = [
    load_video(os.path.join(recon_vid_path, true_vid), 'float', 16, 16)
    for true_vid in recon_vid_file_names
]