os.mkdir(output_dir)

# this directory will contain the frames to build the video
frame_dir = os.path.join(opt.checkpoints_dir, opt.name, 'frames')
if os.path.isdir(frame_dir):
    shutil.rmtree(frame_dir)
os.mkdir(frame_dir)

FRAMES_PER_EPOCH = 48

frame_index = 1
for data in dataset:
    t = data['left_frame']
    video_utils.save_tensor(
        t,
        frame_dir + "/frame-%s.jpg" % str(frame_index).zfill(5),
        text="original video",
    )
    frame_index += 1
current_frame = t

frame_count = (opt.pstop - opt.pstart + 1) * FRAMES_PER_EPOCH
pbar = tqdm(total=frame_count)

duration_s = frame_count / opt.fps
video_id = "progress_epoch-%s-%s_%s_%.1f-s_%.1f-fps%s" % (
    str(opt.pstart), str(opt.pstop), opt.name, duration_s, opt.fps,
    "_with-%d-zoom" % opt.zoom_lvl if opt.zoom_lvl != 0 else "")

for epoch_index in range(opt.pstart, opt.pstop + 1):
예제 #2
0
        ############## Forward Pass - frame t -> frame t+1 ######################

        if opt.scheduled_sampling and (
                latest_generated_frame
                is not None) and np.random.randn(1) < opt.ss_recursion_prob:
            left_frame = latest_generated_frame.detach()
            recursion += 1
        else:
            left_frame = Variable(data['left_frame'])
            recursion = 0

        right_frame = Variable(data['right_frame'])

        if opt.debug:
            video_utils.save_tensor(
                left_frame,
                debug_dir + "/step-%d-left-r%d.jpg" % (total_steps, recursion))
            video_utils.save_tensor(
                right_frame, debug_dir + "/step-%d-right.jpg" % total_steps)

        losses, latest_generated_frame = model(left_frame,
                                               None,
                                               right_frame,
                                               None,
                                               infer=opt.scheduled_sampling)

        # sum per device losses
        losses = [
            torch.mean(x) if not isinstance(x, int) else x for x in losses
        ]
        loss_dict = dict(zip(model.module.loss_names, losses))
예제 #3
0
    shutil.rmtree(frame_dir)
os.mkdir(frame_dir)

frame_index = 0

if opt.start_from == "noise":
    # careful, default value is 1024x512
    t = torch.rand(1, 3, opt.fineSize, opt.loadSize)

elif opt.start_from  == "video":
    # use initial frames from the dataset
    for data in dataset:
        t = data['left_frame'] #kokokaeru! Specify first raw parts
        video_utils.save_tensor(
            t,
            frame_dir + "/frame-%s.png" % str(frame_index).zfill(5),
            text="",
        )
        frame_index += 1
else:
    # use specified image
    filepath = opt.start_from
    if os.path.isfile(filepath):
        t = video_utils.im2tensor(Image.open(filepath))
        for i in range(50):
            video_utils.save_tensor(
                t,
                frame_dir + "/frame-%s.png" % str(frame_index).zfill(5),
            )
            frame_index += 1
예제 #4
0
    for i, data in enumerate(tqdm(positive_ds)):
        cur_frame = Image.open(data['left_path'])
        next_frame = Image.open(data['right_path'])

        cur_frame = video_utils.im2tensor(cur_frame)
        next_frame = video_utils.im2tensor(next_frame)

        if opt.gpu:
            cur_frame = cur_frame.to('cuda')
            next_frame = next_frame.to('cuda')

        generated_next_frame = video_utils.next_frame_prediction(
            model, cur_frame)

        video_utils.save_tensor(
            generated_next_frame,
            frame_dir + "/frame-%s.png" % str(frame_index).zfill(5),
        )
        frame_index += 1

print('Finished generating images')
duration_s = frame_index / opt.fps
video_id = "epoch-%s_%s_%.1f-s_%.1f-fps" % (str(
    opt.which_epoch), opt.name, duration_s, opt.fps)

print(f'created video id {video_id}')
video_path = output_dir + "/" + video_id + ".mp4"
while os.path.isfile(video_path):
    video_path = video_path[:-4] + "-.mp4"
print(f'modified video path {video_path}')
video_utils.video_from_frame_directory(frame_dir,
                                       video_path,