def create_sintel_submission(model, iters=32, warm_start=False, output_path='sintel_submission'):
    """ Create submission for the Sintel leaderboard """
    model.eval()
    for dstype in ['clean', 'final']:
        test_dataset = datasets.MpiSintel(split='test', aug_params=None, dstype=dstype)
        
        flow_prev, sequence_prev = None, None
        for test_id in range(len(test_dataset)):
            image1, image2, (sequence, frame) = test_dataset[test_id]
            if sequence != sequence_prev:
                flow_prev = None
            
            padder = InputPadder(image1.shape)
            image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda())

            flow_low, flow_pr = model(image1, image2, iters=iters, flow_init=flow_prev, test_mode=True)
            flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()

            if warm_start:
                flow_prev = forward_interpolate(flow_low[0])[None].cuda()
            
            output_dir = os.path.join(output_path, dstype, sequence)
            output_file = os.path.join(output_dir, 'frame%04d.flo' % (frame+1))

            if not os.path.exists(output_dir):
                os.makedirs(output_dir)

            frame_utils.writeFlow(output_file, flow)
            sequence_prev = sequence
Beispiel #2
0
def demo(args):
    model = torch.nn.DataParallel(RAFT(args))
    model.load_state_dict(torch.load(args.model))

    model = model.module
    model.to(DEVICE)
    model.eval()

    with torch.no_grad():
        images = glob.glob(os.path.join(args.path, '*.png')) + \
                 glob.glob(os.path.join(args.path, '*.jpg'))

        images = natsorted(images)
        for imfile1, imfile2 in tqdm(zip(images[:-1], images[1:]), total=len(images)):
            try :
                image1 = load_image(imfile1)
                image2 = load_image(imfile2)

                padder = InputPadder(image1.shape)
                image1, image2 = padder.pad(image1, image2)

            flow_low, flow_up = model(image1, image2, iters=20, test_mode=True) # Flow Up is the upsampled version

            if args.save :
                path = Path(args.path_save)
                path.mkdir(parents=True, exist_ok=True)
                flow = padder.unpad(flow_up[0]).permute(1, 2, 0).cpu().numpy()
                frame_utils.writeFlow(imfile1.replace(args.path,args.path_save).replace('.png','.flo'), flow)
            else :
                viz(image1, flow_up)
                
            except Exception as e :
                print(f'Error with {imfile1} : {e}')
def evaluate_davis(model, iters=32):
    """ Peform validation using the Sintel (train) split """
    model.eval()
    val_dataset = datasets.DAVISDataset(split='train')

    for val_id in tqdm(range(len(val_dataset))):
        image1, image2, image_paths = val_dataset[val_id]
        image1 = image1[None].cuda()
        image2 = image2[None].cuda()

        padder = InputPadder(image1.shape)
        image1, image2 = padder.pad(image1, image2)

        _, flow_pr = model(image1, image2,
            iters=iters, test_mode=True)
        forward_flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()
        _, flow_pr = model(image2, image1,
            iters=iters, test_mode=True)
        backward_flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()

        # find out result storing paths
        fpath = image_paths[0]
        ind = fpath.rfind("/")
        name = fpath[ind + 1:fpath.rfind(".")]
        folder_path = fpath[:ind]
        flow_folder = folder_path.replace("JPEGImages", "Flows")
        flowviz_folder = folder_path.replace("JPEGImages", "FlowVizs")
        flow_path = os.path.join(flow_folder, f"forward_{name}.flo")
        flowviz_path = os.path.join(flowviz_folder, f"forward_{name}.png")
        if not os.path.exists(flow_folder):
            os.makedirs(flow_folder)
        if not os.path.exists(flowviz_folder):
            os.makedirs(flowviz_folder)

        frame_utils.writeFlow(flow_path, forward_flow)
        Image.fromarray(flow_viz.flow_to_image(forward_flow)).save(
            open(flowviz_path, "wb"), format="PNG")
        flow_path = os.path.join(flow_folder, f"backward_{name}.flo")
        flowviz_path = os.path.join(flowviz_folder, f"backward_{name}.png")
        frame_utils.writeFlow(flow_path, backward_flow)
        Image.fromarray(flow_viz.flow_to_image(backward_flow)).save(
            open(flowviz_path, "wb"), format="PNG")
Beispiel #4
0
def compute_flow_dir(model, dirpath, dirpathsave, resize=None) :
    images = glob.glob(os.path.join(dirpath, '*.png')) + \
                 glob.glob(os.path.join(dirpath, '*.jpg'))

    images = natsorted(images)
    for imfile1, imfile2 in tqdm(zip(images[:-1], images[1:]), total=len(images)):
        image1 = load_image(imfile1)
        image2 = load_image(imfile2)
        extension=imfile1.split('.')[-1]

        padder = InputPadder(image1.shape)
        image1, image2 = padder.pad(image1, image2)

        flow_low, flow_up = model(image1, image2, iters=20, test_mode=True) # Flow Up is the upsampled version
        if resize is not None :
            flow_up = nn.functional.interpolate(flow_up, size=resize, mode='bilinear', align_corners=False)


        path = Path(dirpathsave)
        path.mkdir(parents=True, exist_ok=True)
        flow = padder.unpad(flow_up[0]).permute(1, 2, 0).cpu().numpy()
        frame_utils.writeFlow(imfile1.replace(dirpath, dirpathsave).replace(extension,'flo'), flow)
Beispiel #5
0
def opt_flow_estimation(args):
    """
    args.path: path to the directory of the dataset that contains the images.
        - base_dir/
            - ArgoVerse/
                - video1/
                    - frame1.jpg
                    - frame2.jpg
                - video2/
            - BDD/
            - Charades/
            - LaSOT/
            - YFCC100M/
    """
    model = torch.nn.DataParallel(RAFT(args))
    model.load_state_dict(torch.load(args.model))

    model = model.module
    model.to(DEVICE)
    model.eval()

    with torch.no_grad():
        base_dir = args.path
        data_srcs = [
            fn.split('/')[-1]
            for fn in sorted(glob.glob(os.path.join(base_dir, '*')))
        ]
        if args.datasrc:
            data_srcs = [args.datasrc]

        for data_src in data_srcs:
            print("Processing", data_src)
            videos = [
                fn.split('/')[-1] for fn in sorted(
                    glob.glob(os.path.join(base_dir, data_src, '*')))
            ]
            for idx, video in enumerate(tqdm.tqdm(videos)):
                fpath = os.path.join(base_dir, data_src, video)

                images = glob.glob(os.path.join(fpath, '*.png')) + \
                         glob.glob(os.path.join(fpath, '*.jpg'))

                images = sorted(images)
                for imfile1, imfile2 in zip(images[:-1], images[1:]):
                    image1 = load_image(imfile1)
                    image2 = load_image(imfile2)

                    padder = InputPadder(image1.shape)
                    image1, image2 = padder.pad(image1, image2)

                    flow_low, flow_up = model(image1,
                                              image2,
                                              iters=20,
                                              test_mode=True)

                    # Store the flow vector
                    flow_fname = imfile1.split("/")[-1].replace(".jpg", ".flo")
                    flow_up_fname = flow_fname.split(".")
                    flow_up_fname[0] = flow_up_fname[0] + "_up"
                    flow_up_fname = ".".join(flow_up_fname)
                    flow_low_fname = flow_fname.split(".")
                    flow_low_fname[0] = flow_low_fname[0] + "_low"
                    flow_low_fname = ".".join(flow_low_fname)

                    up_fname = os.path.join(args.outdir, data_src, video,
                                            flow_up_fname)
                    # low_fname = os.path.join(args.outdir, data_src, video, flow_low_fname)
                    if not os.path.exists(
                            os.path.join(args.outdir, data_src, video)):
                        os.makedirs(os.path.join(args.outdir, data_src, video))

                    flow_up = flow_up[0].permute(1, 2, 0).cpu().numpy()
                    # flow_low = flow_low[0].permute(1, 2, 0).cpu().numpy()

                    writeFlow(up_fname, flow_up)