def play(self, env, directory, mode):
    """ Returns the total reward for an episode of the game."""
    steps = []
    state = env.reset()
    done = False
    tot_reward = 0
    actions = [0] * self.actions
    while not done:
      if mode != "Train":
        s = env.render("rgb_array")
        steps.append(s)

      action = self.predict_action(state)
      actions[action] += 1
      state, reward, done, _ = env.step(action)
      tot_reward += reward
    self.cur_reward = tot_reward
    if mode != "Train" and tot_reward > self.max_reward:
      print("New high reward: ", tot_reward)
      clip = ImageSequenceClip(steps, fps=30)
      clip.write_gif("~/breakout.gif", fps=30)
      self.max_reward = tot_reward

    print("ACTIONS TAKEN", actions)
    return tot_reward
def main():
    parser = argparse.ArgumentParser(description='Create driving video.')
    parser.add_argument(
        'image_folder',
        type=str,
        default='',
        help='Path to image folder. The video will be created from these images.'
    )
    parser.add_argument(
        '--fps',
        type=int,
        default=60,
        help='FPS (Frames per second) setting for the video.')
    args = parser.parse_args()

    #convert file folder into list firltered for image file types
    image_list = sorted([os.path.join(args.image_folder, image_file)
                        for image_file in os.listdir(args.image_folder)])
    
    image_list = [image_file for image_file in image_list if os.path.splitext(image_file)[1][1:].lower() in IMAGE_EXT]

    #two methods of naming output video to handle varying environemnts
    video_file_1 = args.image_folder + '.mp4'
    video_file_2 = args.image_folder + 'output_video.mp4'

    print("Creating video {}, FPS={}".format(args.image_folder, args.fps))
    clip = ImageSequenceClip(image_list, fps=args.fps)
    
    try:
        clip.write_videofile(video_file_1)
    except:
        clip.write_videofile(video_file_2)
Example #3
0
def main():
    parser = argparse.ArgumentParser(description='Create driving video.')
    parser.add_argument(
        'image_folder',
        type=str,
        default='',
        help='Path to image folder. The video will be created from these images.'
    )
    parser.add_argument(
        '--fps',
        type=int,
        default=60,
        help='FPS (Frames per second) setting for the video.')
    args = parser.parse_args()
	
    setNum = args.image_folder.split('dataset')[1]
    setNum = setNum.split('/')[0]
	
    print (setNum)
	
    imageArray = []
    with open('dataset%s/data.csv' % setNum,'rt') as file:
        reader = csv.reader(file) #Create a reader to read the csv file line by line
        for line in reader: #For each line in the csv fileine == 1): #If it's not the first line
            if(line[0] != 'center'):
                imagePath = "dataset{}/png/".format(setNum) + line[0].split('IMG\\')[1] #Get the local path to the image
                image = pyplot.imread(imagePath) #Read the image
                imageArray.append(image)
	
    video_file = args.image_folder + '.mp4'
    print("Creating video {}, FPS={}".format(video_file, args.fps))
    clip = ImageSequenceClip(imageArray, fps=args.fps)
    clip.write_videofile(video_file)
def main(argv):
	if os.path.exists(OUTPUT_DIR):
		shutil.rmtree(OUTPUT_DIR)
	os.makedirs(OUTPUT_DIR)

	clip = ImageSequenceClip(data.images, fps=60)
	new_clip = clip.fl_image(process_image)
	new_clip.write_videofile(VIDEO_OUTPUT, audio=False)
Example #5
0
 def saturate_clip(self, clip, fps=None):
     # extract frames from the clip and saturate them
     new_frames = []
     total = int(clip.duration * fps) + 1
     for frame in tqdm(clip.iter_frames(fps), total=total):
         frame = img_as_ubyte(self.saturate_frame(frame))
         new_frames.append(frame)
     # create and return a new clip from the saturated frames
     new_clip = ImageSequenceClip(new_frames, fps=fps)
     new_clip = new_clip.set_audio(clip.audio)
     return new_clip
Example #6
0
def plotme(Elats, Elons, plotEclipse, Clats, Clons, plotConflict):
    count = 0

    d = os.path.dirname("globeframes/")
    if not os.path.exists(d):
        os.mkdir(d)

    # Rotate the earth
    for l in range((DEGPERTURN * count - 180), 180, DEGPERTURN):
        fig = plt.figure(figsize=(10, 10))
        map = Basemap(projection="ortho", lat_0=23.4, lon_0=l, resolution="l")

        # Make the globe more realistic
        map.bluemarble(scale=0.2)

        if plotEclipse:
            # compute the native map projection coordinates for countries.
            x, y = map(Elons, Elats)

            # plot filled circles at the locations of the contry.
            map.plot(x, y, "yo", ms=15, picker=5, mew=2)

        if plotConflict:

            x, y = map(Clons, Clats)
            map.plot(x, y, "rx", ms=10, picker=5, mew=4)

            # plot solar eclipse positions
            map.plot(x, y, "rx", ms=6)

        plt.savefig("globeframes/frame{0}".format((str(count).rjust(3, "0"))), facecolor="k")
        count += 1
        plt.clf()
        plt.close(fig)
        print("Percent completed: {} %".format((count * DEGPERTURN / 360) * 100))

    frames = []

    # Put all the frame names in a list
    for i in range(count):
        frames.append("./globeframes/frame{0}.png".format((str(i).rjust(3, "0"))))

    # Create a video file from the frames
    clip = ImageSequenceClip(frames, fps=FPS)
    clip.write_videofile("SpinningGlobe.mp4", fps=FPS)
def main():
    parser = argparse.ArgumentParser(description='Create driving video.')
    parser.add_argument(
        'image_folder',
        type=str,
        default='',
        help='Path to image folder. The video will be created from these images.'
    )
    parser.add_argument(
        '--fps',
        type=int,
        default=60,
        help='FPS (Frames per second) setting for the video.')
    args = parser.parse_args()

    video_file = args.image_folder + '.mp4'
    print("Creating video {}, FPS={}".format(video_file, args.fps))
    clip = ImageSequenceClip(args.image_folder, fps=args.fps)
    clip.write_videofile(video_file)
Example #8
0
    def run(self):
        """

        :return:
        """
        #   check if folder exists and if not create it
        folder = self._name
        if not os.path.exists(folder):
            os.makedirs(folder)

        #   absolute path to store animation images to use them to create video file
        animation_folder_abs_path = os.path.join(os.getcwd(), folder)

        #   looop through solution data and create figure with opengl function grabFrameBuffer() every i-th step
        for _step in xrange(0, len(self._parent.step), int(self._parent._delta_step)):
            filename = "step_%06d"%_step+".png"

            #   assign step and repaint GL widget
            self._parent._step = int(_step)
            self._parent._update_GL()

            #   get image of current opengl widget scene
            image = self._parent.OpenGLWidget.grabFrameBuffer()

            #   abs path to image object of current simulation time step
            file_abs_path = os.path.join(animation_folder_abs_path, filename)
            #   save image to file
            image.save(file_abs_path)


        #   create video object
        video = ImageSequenceClip(animation_folder_abs_path, fps=24)#24
        #   write data to video object
        __animation_filename = self._name+".avi"

        #   check filename
        __animation_filename = check_filename(__animation_filename)

        video.write_videofile(__animation_filename,codec='mpeg4')

        #   delete animation folder with animation figures
        shutil.rmtree(animation_folder_abs_path)
Example #9
0
def make_gif():
    files = []
    for (dirpath, dirnames, filenames) in walk("input"):
        files.extend(["input/" + x for x in filenames if ".jpg" in x])
    
    image_names = ["output/frame_{}.jpg".format(x) for x in range(len(files))]
    for i in range(len(files)):
        im = Image.open(files[i])
        im.thumbnail(pic_size, Image.ANTIALIAS)
        print image_names[i]
        im.save(image_names[i], quality=100)

    newName = "result.gif"

    clip = ImageSequenceClip(image_names, fps=4)
    clip.write_gif(newName, fuzz=False)

    for f in image_names:
        remove(f)
    print ""
Example #10
0
def arr2aviMPY(fileName, M, fps):
    """Convert gray-scale Numpy 3D image array to AVI file (use moviepy).
    
    Parameters
    ----------
    fileName : str
        Path for output AVI file.
        
    M : np.ndarray(uint8)
        F x H x W 3D array, representing a sequence of F images, each H x W.
        
    fps : int
        frame rate for the output file.
        
    """
    import numpy as np
    from moviepy.editor import ImageSequenceClip
    
    D = [np.dstack([m] * 3) for m in M]
    clip = ImageSequenceClip(D, fps=fps)    
    clip.write_videofile(fileName, codec='mpeg4', ffmpeg_params=['-vb','1M'])
    video_save_path = join(data_path, "retina-simulation",
                           "horse-riding.gif")
    parvo_save_path = join(data_path, "retina-simulation",
                           "horse-riding-parvo.gif")
    magno_save_path = join(data_path, "retina-simulation",
                           "horse-riding-magno.gif")

    parvo_frames = []
    magno_frames = []
    origin_frames = []
    for frame in frames:
        retina.run(frame)

        origin_frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
        parvo_frame = retina.getParvo()
        parvo_frame = cv2.cvtColor(parvo_frame, cv2.COLOR_BGR2RGB)
        parvo_frames.append(parvo_frame)
        magno_frames.append(retina.getMagno())

    clip = ImageSequenceClip(parvo_frames, fps=30)
    clip.write_gif(parvo_save_path, fps=30)
    print "[MESSAGE] Parvo frames is saved at: %s" % (parvo_save_path)

    clip = ImageSequenceClip(magno_frames, fps=30)
    clip.write_gif(magno_save_path, fps=30)
    print "[MESSAGE] Magno frames is saved at: %s" % (magno_save_path)

    clip = ImageSequenceClip(origin_frames, fps=30)
    clip.write_gif(video_save_path, fps=30)
    print "[MESSAGE] Original frames is saved at: %s" % (video_save_path)
Example #12
0
###
import numpy as np
import os
import sys
import glob
from moviepy.editor import ImageSequenceClip

file_list = glob.glob(os.path.join('./image_02/', '*.png'))
file_list.sort()

clip = ImageSequenceClip(file_list, fps=15)
name = '15.gif'
#clip.write_gif(name, fps=15)
clip.write_videofile("15.mp4", fps=15)
Example #13
0
 def _make_gif(self, frames):
     clip = ImageSequenceClip(frames, fps=10)
     # clip = ImageSequenceClip('./tempdir/', fps=10, with_mask=True)
     clip.write_gif(filename + '.gif')
Example #14
0
from moviepy.editor import VideoFileClip
from moviepy.editor import ImageSequenceClip
import glob

fps = 20

image_paths = glob.glob(
    '../input/lisa_traffic_light_dataset/lisa-traffic-light-dataset/daySequence1/daySequence1/frames/*.jpg'
)
image_paths.sort()
print(image_paths[:5])
clip = ImageSequenceClip(image_paths, fps=fps)
clip.write_videofile(
    '../input/lisa_traffic_light_dataset/input/test_data/day_seq1.mp4',
    fps=fps)
print('DONE')
Example #15
0
        (img.shape[0] + data.worldmap.shape[0], img.shape[1] * 2, 3))
    output_image[:img.shape[0], :img.shape[1]] = img

    # Let's create more images to add to the mosaic, first a warped image
    warped = perspect_transform(img, source, destination)
    output_image[:img.shape[0], img.shape[1]:] = warped

    # Overlay worldmap with ground truth map
    map_add = cv2.addWeighted(data.worldmap, 1, data.ground_truth, 0.5, 0)
    output_image[img.shape[0]:, :data.worldmap.shape[1]] = np.flipud(map_add)

    # Then putting some text over the image
    cv2.putText(output_image,
                "Populate this image with your analyses to make a video!",
                (20, 20), cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
    if data.count < len(data.images) - 1:
        data.count += 1  # Keep track of the index in the Databucket()

    return output_image


# Define pathname to save the output video
output = 'test_mapping.mp4'
data = Databucket(
)  # Re-initialize data in case you're running this cell multiple times
clip = ImageSequenceClip(data.images,
                         fps=60)  # Note: output video will be sped up because
# recording rate in simulator is fps=25
new_clip = clip.fl_image(
    process_image)  #NOTE: this function expects color images!!
new_clip.write_videofile(output, audio=False)
    model = model.to(device)

    if args["video"]:
        video_clip = VideoFileClip(VIDEO_IN, audio=False)
        audio_clip = AudioFileClip(VIDEO_IN)
        current_directory = os.getcwd()
        tmp_directory = os.path.join(current_directory, TMP_DIR)
        if not os.path.exists(tmp_directory):
            os.makedirs(tmp_directory)

        for i, frame in enumerate(video_clip.iter_frames()):
            frame = get_numpy_transform(frame).unsqueeze(0)
            pred = model(frame.to(device)).cpu().detach().numpy()[0]
            save_image(tmp_directory + "/" + str(i).zfill(5) + ".png", pred)

        video = ImageSequenceClip(sequence=tmp_directory + "/",
                                  fps=video_clip.fps)
        video = video.set_audio(audio_clip)
        video.write_videofile(VIDEO_OUT, audio=True)
        shutil.rmtree(tmp_directory + "/")

    elif args["webcam"]:
        # webcam mode, process frames in webcam stream
        cv2.startWindowThread()
        cv2.namedWindow("frame")
        while True:
            # Read an image.
            cap = cv2.VideoCapture(0)
            ret, frame = cap.read()
            cap.release()
            image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            image = load_image(None, Image.fromarray(image))
Example #17
0
    fileName = input('请输入保存文件的文件夹: ')
    # 创建文件夹
    if not os.path.isdir(fileName):
        os.mkdir(fileName)

    # 用于控制是否结束程序
    recording = True
    # 图像列表
    imageList = []
    # 时间差
    recordTime = 0

    # 创建线程
    t = threading.Thread(target=get_pictures, daemon=True)
    t.start()
    print('Recording screen to %s.mp4, press Ctrl-C to stop' % fileName)

    try:
        while 1:
            input()
    except Exception as e:
        recording = False
        # ImageSequenceClip 用于合成图像 fps:每秒中填充图像的帧数(帧/秒),数值越大,图像越清晰
        print("imageList:%s----fps=%d" %
              (imageList, int(len(imageList) / recordTime)))
        clip = ImageSequenceClip(imageList,
                                 fps=int(len(imageList) / recordTime))
        # 保存图像
        clip.write_videofile('%s.mp4' % fileName)
    fname = 'tmp_images/tmp_image_' + str(idx).zfill(4) + '.jpg'
    img = cv2.cvtColor(test_image, cv2.COLOR_RGB2BGR)
    cv2.imwrite(fname, img)

    # create a set of images in a temporary folder to of box scan
    for box in boxes:
        while box.out_of_bounds == 0:
            idx += 1
            fname = 'tmp_images/tmp_image_' + str(idx).zfill(4) + '.jpg'
            img = draw_box(test_image, box, boxlist)
            if box.out_of_bounds == 0:
                img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
                cv2.imwrite(fname, img)

        box.reset()

    # write video file
    clip = ImageSequenceClip("tmp_images", fps=40)
    clip.write_videofile("boxscan.mp4")

#-----------------------------#
### Make Final Output Video ###
#-----------------------------#
make_output_video = True
if make_output_video is True:
    heatmap = HeatMap(image=test_image)
    video_output = 'project_video_out.mp4'
    clip1 = VideoFileClip("project_video.mp4")
    video_clip = clip1.fl_image(make_output_image)
    video_clip.write_videofile(video_output, audio=False)
def write_video(images, fps, out_filename):
    print("Creating video {}, FPS={}".format(out_filename, fps))
    clip = ImageSequenceClip(images, fps=fps)
    
    clip.write_videofile(out_filename)
def split_video():

  movie_title = os.path.split(args.source_path)[-1]
  offset_csv = os.path.join(args.target_folder, 'offsets.csv')
  offsets = []
  video = VideoFileClip(args.source_path, audio=False)
  framerate = video.fps
  width = (np.size(video.get_frame(0), 1) - args.middle_gap_pixel_size) / 2
  left_video = moviepy.video.fx.all.crop(video, x1=0, width=width)
  right_video = moviepy.video.fx.all.crop(video, x1=width + args.middle_gap_pixel_size, width=width)
  right_frame_iterator = right_video.iter_frames()
  output_ind = args.output_starting_ind

  for ind, left_frame in enumerate(left_video.iter_frames()):
    left_frame = rgb2gray(left_frame)
    right_frame = rgb2gray(right_frame_iterator.next())
    if (ind % 20 == 0): # INITIALIZE
      left_frames = []
      right_frames = []
      offset_frames = []
      first_start = ind
      offset = randint(1,10)
      second_start = first_start + offset
      offset_left = randint(0, 1) == 1
    if (ind >= first_start and ind < first_start + 10): # ADD FRAMES
      right_frames.append(right_frame)
      left_frames.append(left_frame)
    if (ind >= second_start and ind < second_start + 10): # ADD OFFSET FRAMES
      if (offset_left):
        offset_frames.append(left_frame)
      else:
        offset_frames.append(right_frame)
    if (ind % 20 == 19): # SAVE SEGMENT FRAMES TO JPEG
      if args.output_images:
        assert len(left_frames) == 10, 'Only added ' + str(len(left_frames)) + ' left frames on segment ' + str(output_ind) + '. Should have 10.'
        assert len(right_frames) == 10, 'Only added ' + str(len(right_frames)) + ' right frames on segment ' + str(output_ind) + '. Should have 10.'
        assert len(offset_frames) == 10, 'Only added ' + str(len(offset_frames)) + ' offset frames on segment ' + str(output_ind) + '. Should have 10.'
        for frame_ind, left_frame in enumerate(left_frames):
          misc.toimage(left_frame, cmin=np.min(left_frame), cmax=np.max(left_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-left.jpeg').format(output_ind, frame_ind)))
        for frame_ind, right_frame in enumerate(right_frames):
          misc.toimage(right_frame, cmin=np.min(right_frame), cmax=np.max(right_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-right.jpeg').format(output_ind, frame_ind)))
      else:
        left_video_out = ImageSequenceClip(left_frames, fps=framerate)
        left_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-left.mp4' % output_ind), codec='libx264', audio=False)
        right_video_out = ImageSequenceClip(right_frames, fps=framerate)
        right_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-right.mp4' % output_ind), codec='libx264', audio=False)
      offsets.append({ 'id': '%06d' % output_ind, 'offset_frames': 0 })
      output_ind += 1
      if (offset_left):
        if args.output_images:
          for frame_ind, offset_frame in enumerate(offset_frames):
            misc.toimage(offset_frame, cmin=np.min(left_frame), cmax=np.max(left_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-left.jpeg').format(output_ind, frame_ind)))
          for frame_ind, right_frame in enumerate(right_frames):
            misc.toimage(right_frame, cmin=np.min(right_frame), cmax=np.max(right_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-right.jpeg').format(output_ind, frame_ind)))
        else:
          left_video_out = ImageSequenceClip(offset_frames, fps=framerate)
          left_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-left.mp4' % output_ind), codec='libx264', audio=False)
          right_video_out = ImageSequenceClip(right_frames, fps=framerate)
          right_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-right.mp4' % output_ind), codec='libx264', audio=False)
      else:
        if args.output_images:
          for frame_ind, left_frame in enumerate(left_frames):
            misc.toimage(left_frame, cmin=np.min(left_frame), cmax=np.max(left_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-left.jpeg').format(output_ind, frame_ind)))
          for frame_ind, offset_frame in enumerate(offset_frames):
            misc.toimage(offset_frame, cmin=np.min(right_frame), cmax=np.max(right_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-right.jpeg').format(output_ind, frame_ind)))
        else:
          left_video_out = ImageSequenceClip(left_frames, fps=framerate)
          left_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-left.mp4' % output_ind), codec='libx264', audio=False)
          right_video_out = ImageSequenceClip(offset_frames, fps=framerate)
          right_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-right.mp4' % output_ind), codec='libx264', audio=False)
      offsets.append({ 'id': '{:06d}'.format(output_ind), 'offset_frames': offset })
      output_ind += 1
    if (ind % 1000 == 0):
      print('Finished processing {:d} datapoints.'.format(output_ind))
  os.remove(offset_csv)
  with open(offset_csv, 'w') as offset_csv_file:
    w = csv.DictWriter(offset_csv_file, fieldnames=['id', 'offset_frames'])
    w.writeheader()
    w.writerows(offsets)
  return True
Example #21
0
import glob
from moviepy.editor import ImageSequenceClip

images_dir = './results/baby30/train_latest/videos/7/'
## Change XX to be objective images name
v = 'g05_c02'
images_list = glob.glob(images_dir + "v_BabyCrawling_{}**.png".format(v))
images_list.sort()
ouput_file = './{}.mp4'.format(v)
fps = 1



if __name__ == '__main__':
    clip = ImageSequenceClip(images_list, fps=fps)

    clip.write_videofile(ouput_file, fps=fps, audio=False)
Example #22
0
def get_output(video_path,
               out_filename,
               label,
               fps=30,
               font_scale=0.5,
               font_color='white',
               target_resolution=None,
               resize_algorithm='bicubic',
               use_frames=False):
    """Get demo output using ``moviepy``.

    This function will generate video file or gif file from raw video or
    frames, by using ``moviepy``. For more information of some parameters,
    you can refer to: https://github.com/Zulko/moviepy.

    Args:
        video_path (str): The video file path or the rawframes directory path.
            If ``use_frames`` is set to True, it should be rawframes directory
            path. Otherwise, it should be video file path.
        out_filename (str): Output filename for the generated file.
        label (str): Predicted label of the generated file.
        fps (int): Number of picture frames to read per second. Default: 30.
        font_scale (float): Font scale of the label. Default: 0.5.
        font_color (str): Font color of the label. Default: 'white'.
        target_resolution (None | tuple[int | None]): Set to
            (desired_width desired_height) to have resized frames. If either
            dimension is None, the frames are resized by keeping the existing
            aspect ratio. Default: None.
        resize_algorithm (str): Support "bicubic", "bilinear", "neighbor",
            "lanczos", etc. Default: 'bicubic'. For more information,
            see https://ffmpeg.org/ffmpeg-scaler.html
        use_frames: Determine Whether to use rawframes as input. Default:False.
    """

    if video_path.startswith(('http://', 'https://')):
        raise NotImplementedError

    try:
        from moviepy.editor import ImageSequenceClip
    except ImportError:
        raise ImportError('Please install moviepy to enable output file.')

    # Channel Order is BGR
    if use_frames:
        frame_list = sorted(
            [osp.join(video_path, x) for x in os.listdir(video_path)])
        frames = [cv2.imread(x) for x in frame_list]
    else:
        video = decord.VideoReader(video_path)
        frames = [x.asnumpy()[..., ::-1] for x in video]

    if target_resolution:
        w, h = target_resolution
        frame_h, frame_w, _ = frames[0].shape
        if w == -1:
            w = int(h / frame_h * frame_w)
        if h == -1:
            h = int(w / frame_w * frame_h)
        frames = [cv2.resize(f, (w, h)) for f in frames]

    textsize = cv2.getTextSize(label, cv2.FONT_HERSHEY_DUPLEX, font_scale,
                               1)[0]
    textheight = textsize[1]
    padding = 10
    location = (padding, padding + textheight)

    if isinstance(font_color, str):
        font_color = webcolors.name_to_rgb(font_color)[::-1]

    frames = [np.array(frame) for frame in frames]
    for frame in frames:
        cv2.putText(frame, label, location, cv2.FONT_HERSHEY_DUPLEX,
                    font_scale, font_color, 1)

    # RGB order
    frames = [x[..., ::-1] for x in frames]
    video_clips = ImageSequenceClip(frames, fps=fps)

    out_type = osp.splitext(out_filename)[1][1:]
    if out_type == 'gif':
        video_clips.write_gif(out_filename)
    else:
        video_clips.write_videofile(out_filename, remove_temp=True)
Example #23
0
from rlbench.action_modes.gripper_action_modes import Discrete
from rlbench.environment import Environment
from rlbench.observation_config import ObservationConfig
from rlbench.tasks import CloseBox
from rlbench.demo import Demo

from mohou.types import RGBImage

if __name__ == '__main__':
    obs_config = ObservationConfig()
    obs_config.set_all(True)

    env = Environment(action_mode=MoveArmThenGripper(
        arm_action_mode=JointPosition(), gripper_action_mode=Discrete()),
                      obs_config=ObservationConfig(),
                      headless=True)
    env.launch()

    task = env.get_task(CloseBox)
    task.reset()
    rgb_seq = []
    for i in range(30):
        gripper = 0.0
        action = np.ones(7) * 0.01 * i
        obs, _, _ = task.step(np.array(action.tolist() + [0]))
        rgb = RGBImage(obs.overhead_rgb)
        rgb_seq.append(rgb)

    clip = ImageSequenceClip([img.numpy() for img in rgb_seq], fps=50)
    clip.write_gif("tmp.gif", fps=50, loop=1)
Example #24
0
images_list = []

for i in range(len(jpeg_files)):
    img = cv2.imread(
        'C:/Users/DBM/Desktop/DAVIS-JPEGImages/JPEGImages/night-race/' +
        jpeg_files[i])
    seg = cv2.imread(
        'C:/Users/DBM/Desktop/DAVIS-JPEGImages/Annotations/night-race/' +
        jpeg_files[i].split('.')[0] + '.png', cv2.IMREAD_GRAYSCALE)

    height, width = img.shape[:2]
    seg = (seg == 38)
    image_of_the_guy = np.zeros((height, width, 3), np.uint8)  #create template
    image_without_the_guy = np.zeros((height, width, 3),
                                     np.uint8)  #create template

    image_of_the_guy[:, :,
                     1] = seg * img[:, :, 1]  #only red color channel is needed

    image_without_the_guy[:, :, 0] = np.logical_xor(seg, True) * img[:, :, 0]
    image_without_the_guy[:, :, 1] = np.logical_xor(seg, True) * img[:, :, 1]
    image_without_the_guy[:, :, 2] = np.logical_xor(seg, True) * img[:, :, 2]

    image = image_of_the_guy + image_without_the_guy
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    #os.chdir(r'C:\Users\DBM\Desktop\results')
    #cv2.imwrite(jpeg_files[i].split('.')[0]+'.png', image)
    images_list.append(image)

clip = ImageSequenceClip(images_list, fps=25)
clip.write_videofile('part1_video3.mp4', codec='mpeg4')
 def startVideo(self):
     print("Starting Video...")
     self.run_video = True
     while self.run_video:
         ret,frame = self.camera.read()
         if ret == True:
             # get frames from CNT class
             print("Capturing Frame: ", self.counter)
             # resize and save the frame as a jpeg image locally
             frame2resize = cv2.resize(frame,(0,0),fx=self.frame,fy=self.frame)
             cv2.imwrite(self.localDir + "/frames/frame.jpg", frame2resize)
             # # # Based on the chosen transmission method the images are either sent over LAN or via HACKRF
             if(self.counter >= self.skipValue):  
                 # # # if we are transmitting over LAN
                 if self.transMeth == 0:
                     try:
                         print("sending image: ", self.counter , " over LAN")
                         self.sendFile(self.localDir + "/frames/frame.jpg")
                         # finally reset the counter
                         self.counter = 0
                     except Exception as e:
                         print("Socket error.\nException:" + str(e))
                         self.counter = 0  
                         self.run_video = False
                         break
                 # # # if we are tranmsitting over HACKRF with IQ modulation
                 elif self.transMeth == 1:   
                     try:
                         print("Encoding frame: ", self.counter)
                         # frame can be flipped depending on receiving waterfall
                         if self.flipFrame == True:
                             frame_toSend = cv2.flip(frame2resize, 0)
                         else:
                             frame_toSend = frame2resize
                         cv2.imwrite(self.localDir + "/frames/frameSmallG.jpg", frame_toSend)
                         # use the IQstream converter to convert the image into a hackrf tranmsission file
                         tran = Image2IQFile(self.transSamp,self.lineTime,self.outputFile,self.sourceFile)
                         tran.convert()
                         print("Transmiting frame: ")
                         # transmit the saved image using the hackRF
                         os.system("hackrf_transfer -t " + self.localDir +  "/frames/frameSmallG.iqhackrf -f " + str(self.transFreq) + " -b " + str(self.transBand) + " -s " + str(self.transSamp) + " -x 20 -a 1")
                         # finally reset the counter
                         self.counter = 0 
                     except Exception as e:
                         print("HACKRF IQ Tx error.\nException:" + str(e))
                         self.counter = 0  
                         self.run_video = False
                         errorBox("HACKRF IQ Tx error.\nException:" + str(e))
                         break
                 
                 # # # if we are transmitting over HACKRF with PAL modulation
                 elif self.transMeth == 2:
                     try:
                         print("Encoding Video with ", self.counter, " images.")
                         clip = ImageSequenceClip("recording", 10)
                         print("Writing file...")
                         clip.write_videofile(self.localDir + "/toSend.mp4",codec = "libx264")
                         os.system("hacktv -f "+str(self.transFreq)+" -m i -g 47 "+self.localDir+"/toSend.mp4")
                         # finally reset the counter
                         self.counter = 0 
                     except Exception as e:
                         print("HACKRF Pal Tx error.\nException:" + str(e))
                         self.counter = 0  
                         self.run_video = False
                         errorBox("HACKRF Pal Tx error.\nException:" + str(e)) 
                         break                         
                         
             # if we are using PAL transmission save a number of images
             if self.transMeth == 2:
                 cv2.imwrite(self.localDir + "/recording/" + str(self.counter) + ".jpg", frame2resize)
             else:
                 print("skipping Frame: ", self.counter)
             self.counter = self.counter + 1
             # do operations to make cv2 video compatible with PyQt5
             color_swapped_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)             
             height, width, _ = color_swapped_image.shape                
             qt_image = QImage(color_swapped_image.data,
                                     width,
                                     height,
                                     color_swapped_image.strides[0],
                                     QImage.Format_RGB888)
             self.video_signal.emit(qt_image)# emit the QImage
     # set the default image and transmit it
     self.emitted_signal = self.video_signal.emit(self.pause_image)
def main(data_json_path, mode="dense_optical_flow", video_generation="no"):

    json_file = open(model_path, 'r')
    loaded_model_val = json_file.read()
    json_file.close()
    model_val = model_from_json(loaded_model_val)
    print("Loaded the model")

    # load the trained weights
    if mode == "dense_optical_flow":
        model_val.load_weights(model_weights_path + 'DenseOptflow_weights.h5')
    else:
        model_val.load_weights(model_weights_path + 'rgb_weights.h5')
    print("trained weights loaded")

    # compile the model
    model_val.compile(loss='mse', optimizer='adam')
    print("compiled the model")

    # load the json data, with image names and the speeds
    with open(data_json_path) as data_file:
        data = json.load(data_file)

    print("loading the test data...")
    if mode == "dense_optical_flow":
        x = load_data.load_XDenseOptFlowInput(data)
        y_actual = load_data.load_yDenseOptFlowLabels(data)
    else:
        x = load_data.load_xInput(data)
        y_actual = load_data.load_yLabels(data)

    # evaluate the weights
    print('evaluating....')
    score = model_val.evaluate(x, y_actual, 64, verbose=1)
    print('Evaluation loss: %f' % score)

    print('predicting.....')
    y = model_val.predict(x)
    y = y.flatten()
    y_predicted = y

    # smoothing the predicted data, can be removed
    if mode == "dense_optical_flow":
        # smooth using Savitzky-Golay filter
        y_predicted = scipy.signal.savgol_filter(
            y, 101, 3)  # 101 window length and fit using 3 order polynomial
    else:
        # smooth using Savitzky-Golay filter
        y_predicted = scipy.signal.savgol_filter(
            y, 51, 3)  # 51 window length and fit using 3 order polynomial

    # Plotting speed actual vs predicted
    plt.figure(0)
    #plt.plot(y, label = 'Training Prediction')
    plt.plot(y_predicted, label='Training Prediction smoothed')
    plt.plot(y_actual, label='Actual Dataset')
    plt.title('speed: Actual vs Predicted')
    plt.xlabel('Number of images')
    plt.ylabel('speed')
    plt.legend(loc='upper left')
    if mode == "dense_optical_flow":
        plt.savefig('speed predicted optical flow')
    else:
        plt.savefig('speed predicted rgb')
    print("Saved speed plot to disk")
    plt.close()

    # annotate the images and save
    if video_generation == "yes":
        print('generating video.....')

        for i in range(0, len(y_predicted)):
            if mode == "dense_optical_flow":
                cur_im = cv2.imread(images_extracted_path +
                                    "%f.jpg" % data[i][0])
                nxt_im = cv2.imread(images_extracted_path +
                                    "%f.jpg" % data[i + 1][0])

                xt = nxt_im.copy()
                cv2.putText(xt, 'Act Speed = ' + str(y_actual[i]), (10, 40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
                cv2.putText(xt, 'Pred Speed = ' + str(y_predicted[i]),
                            (10, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0),
                            2)
                error = y_actual[i] - y_predicted[i]
                cv2.putText(xt, 'Error = ' + str(error), (10, 120),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

                # get the visulaoisations for the flow
                vis_flow, vis_hsv, vis_rgb_hsv = drawDenseOptFlow(
                    cur_im, nxt_im)

                # merge all the vis
                merged1 = np.hstack((xt, vis_flow))
                merged2 = np.hstack((vis_hsv, vis_rgb_hsv))
                merged = np.vstack((merged1, merged2))

                # save the images
                cv2.imwrite(data_output_path + "%i.jpg" % i, merged)
            else:
                xt = cv2.imread(images_extracted_path + "%f.jpg" % data[i][0])
                cv2.putText(xt, 'Act Speed = ' + str(y_actual[i]), (10, 40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
                cv2.putText(xt, 'Pred Speed = ' + str(y_predicted[i]),
                            (10, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0),
                            2)
                error = y_actual[i] - y_predicted[i]
                cv2.putText(xt, 'Error = ' + str(error), (10, 120),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

                # save the images
                cv2.imwrite(data_output_path + "%i.jpg" % i, xt)

            if i % 1000 == 0:
                print("%d frames processed" % i)

        # generate the video, from the predicted annotated images
        vimages = [
            data_output_path + "%d.jpg" % i
            for i in range(0, len(y_predicted))
        ]
        clip = ImageSequenceClip(vimages, fps=25)
        if mode == "dense_optical_flow":
            clip.write_videofile('speed_predicted_optDense_flow.mp4', fps=25)
        else:
            clip.write_videofile('speed_predicted_rgb.mp4', fps=25)
Example #27
0
def make_video(img_folder, out="video.mp4"):
    imgs = [os.path.join(img_folder, img) for img in os.listdir(img_folder)]

    clip = ImageSequenceClip(imgs * 5, fps=10)
    clip.write_videofile(out)
Example #28
0
File: plot.py Project: yooerzf/fish
def nparray_to_video(
    fname,
    data,
    clim="auto",
    cmap="gray",
    codec="h264",
    fps=24,
    ffmpeg_params=["-pix_fmt", "yuv420p"],
):
    """
    Save 3D (t, y, x) numpy array to disk as movie. Uses matplotlib colormaps for rescaling / coloring data,
    and uses moviepy.editor.ImageSequenceClip for movie creation.

    Warning : this function duplicates the input data in memory.

    fname : string
        Filename with extension (.avi, .mp4, etc).

    data : 3D numpy array
        Each 2D array along the first axis of data will be a frame in the movie.

    clim : length-2 list, tuple, or ndarray, or string
        Upper and lower intensity limits to display from data. Defaults to 'auto'
        If clim='auto', the min and max of data will be used as the clim.
        Before applying the colormap, data will be clipped from clim[0] to clim[1].

    cmap : string denoting a matplotlib colormap
        Colormap used for displaying frames from data. Defaults to 'gray'.

    codec :  string
        Which video codec to use. Defaults to 'h264'. See moviepy.editor.ImageSequenceClip.writevideofile.

    fps : int or float
        Frames per second of the movie. Defaults to 24.

    ffmpeg_params : list of strings
        Arguments sent to ffmpeg during movie creation. Defaults to ['-pix_fmt', 'yuv420p'], which is necessary for
        creating movies that OSX understands.


    """
    from numpy import pad
    from moviepy.editor import ImageSequenceClip

    # ffmpeg errors if the dimensions of each frame are not divisible by 2
    if data.shape[1] % 2 == 1:
        data = pad(data, ((0, 0), (0, 1), (0, 0)), mode="minimum")

    if data.shape[2] % 2 == 1:
        data = pad(data, ((0, 0), (0, 0), (0, 1)), mode="minimum")

    data_rgba = apply_cmap(data, cmap=cmap, clim=clim)
    clip = ImageSequenceClip([d for d in data_rgba], fps=fps)
    clip.write_videofile(
        fname,
        audio=False,
        codec=codec,
        fps=fps,
        ffmpeg_params=ffmpeg_params,
        bitrate="50000k",
    )
Example #29
0
frames = []
radial_intensities = []

for i in sim_pos:
    a = scene(i).render(
        width=pov_width,
        height=pov_height,
        antialiasing=pov_antialiasing,
        remove_temp=False,
    )
    b = np.sum(a, axis=2)
    frames.append(a)
    print(i)

clip = ImageSequenceClip(frames, fps=5)
clip.write_gif("no_soller.gif")

##### render with soller

frames = []
radial_intensities = []

for i in sim_pos:
    a = scene(i, useSoller=True).render(
        width=pov_width,
        height=pov_height,
        antialiasing=pov_antialiasing,
        remove_temp=False,
    )
    b = np.sum(a, axis=2)
# Use the [moviepy](https://zulko.github.io/moviepy/) library to process images and create a video.
#

# In[ ]:


# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from moviepy.editor import ImageSequenceClip


# Define pathname to save the output video
output = './output/test_mapping.mp4'
data = Databucket()  # Re-initialize data in case you're running this cell multiple times
# Note: output video will be sped up because
clip = ImageSequenceClip(data.images, fps=60)
# recording rate in simulator is fps=25
# NOTE: this function expects color images!!
new_clip = clip.fl_image(process_image)
get_ipython().magic('time new_clip.write_videofile(output, audio=False)')


# ### This next cell should function as an inline video player
# If this fails to render the video, try running the following cell (alternative video rendering method).  You can also simply have a look at the saved mp4 in your `/output` folder

# In[ ]:


output = './output/test_mapping.mp4'
from IPython.display import HTML
HTML("""
Example #31
0
def getImageClips(pics, speed):
    return ImageSequenceClip(pics, fps=speed)
def run(outdir, train_mode):

  # Build network.
  initializer = tf.keras.initializers.VarianceScaling()
  X = tf.placeholder(tf.float32, shape=[None, n_inputs])
  hidden = tf.layers.dense(
      X, N_HIDDEN, activation=tf.nn.elu, kernel_initializer=initializer)
  logits = tf.layers.dense(hidden, n_outputs)
  outputs = tf.nn.sigmoid(logits)  # probability of action 0 (left)
  p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
  action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)

  # Optimizer, gradients.
  y = 1. - tf.to_float(action)
  cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(
      labels=y, logits=logits)
  optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
  grads_and_vars = optimizer.compute_gradients(cross_entropy)
  gradients = [grad for grad, variable in grads_and_vars]
  gradient_placeholders = []
  grads_and_vars_feed = []
  for grad, variable in grads_and_vars:
    gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
    gradient_placeholders.append(gradient_placeholder)
    grads_and_vars_feed.append((gradient_placeholder, variable))
  training_op = optimizer.apply_gradients(grads_and_vars_feed)

  # For TensorBoard.
  episode_reward = tf.placeholder(dtype=tf.float32, shape=[])
  tf.summary.scalar('reward', episode_reward)

  init = tf.global_variables_initializer()
  saver = tf.train.Saver()

  if train_mode:
    hp_save_dir = hp_directory(outdir)
    with tf.Session() as sess:
      init.run()
      # For TensorBoard.
      print('hp_save_dir')
      train_writer = tf.summary.FileWriter(hp_save_dir, sess.graph)
      for iteration in range(n_iterations):
        all_rewards = []
        all_gradients = []
        for game in range(N_GAMES_PER_UPDATE):
          current_rewards = []
          current_gradients = []
          obs = env.reset()
          for step in range(n_max_steps):
            action_val, gradients_val = sess.run(
                [action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
            obs, reward, done, info = env.step(action_val[0][0])
            current_rewards.append(reward)
            current_gradients.append(gradients_val)
            if done:
              break
          all_rewards.append(current_rewards)
          all_gradients.append(current_gradients)
        avg_reward = np.mean(([np.sum(r) for r in all_rewards]))

        print('\rIteration: {}, Reward: {}'.format(
            iteration, avg_reward, end=''))
        all_rewards = discount_and_normalize_rewards(
            all_rewards, discount_rate=DISCOUNT_RATE)
        feed_dict = {}
        for var_index, gradient_placeholder in enumerate(gradient_placeholders):
          mean_gradients = np.mean([
              reward * all_gradients[game_index][step][var_index]
              for game_index, rewards in enumerate(all_rewards)
              for step, reward in enumerate(rewards)
          ],
                                   axis=0)
          feed_dict[gradient_placeholder] = mean_gradients
        sess.run(training_op, feed_dict=feed_dict)
        if iteration % save_iterations == 0:
          print('Saving model to ', hp_save_dir)
          model_file = '{}/my_policy_net_pg.ckpt'.format(hp_save_dir)
          saver.save(sess, model_file)
          # Also save event files for TB.
          merge = tf.summary.merge_all()
          summary = sess.run(merge, feed_dict={episode_reward: avg_reward})
          train_writer.add_summary(summary, iteration)
      obs = env.reset()
      steps = []
      done = False
  else:  # Make a gif.
    from moviepy.editor import ImageSequenceClip
    model_file = '{}/my_policy_net_pg.ckpt'.format(outdir)
    with tf.Session() as sess:
      sess.run(tf.global_variables_initializer())
      saver.restore(sess, save_path=model_file)
      # Run model.
      obs = env.reset()
      done = False
      steps = []
      rewards = []
      while not done:
        s = env.render('rgb_array')
        steps.append(s)
        action_val = sess.run(action, feed_dict={X: obs.reshape(1, n_inputs)})
        obs, reward, done, info = env.step(action_val[0][0])
        rewards.append(reward)
      print('Final reward :', np.mean(rewards))
    clip = ImageSequenceClip(steps, fps=30)
    clip.write_gif('cartpole.gif', fps=30)
Example #33
0
# pip3 install moviepy
from moviepy.editor import ImageSequenceClip

frames = []

for i in range(180):
    frames.append("./globeframes/frame{0}.png".format((str(i).rjust(3, "0"))))


clip = ImageSequenceClip(frames, fps=20)


clip.write_videofile("SpinningGlobe.mp4", fps=20)  # export as video
# clip.speedx(0.5).write_gif("SpinningGlobe.gif", fps=20) # export as GIF (slow)
Example #34
0
	pub.publish(twist)
	
	#Send a second message to stop the wheel turning
	twist.angular.z = 0
	pub.publish(twist)
		
	key = '' #Reset the key to empty
		
	degChosen.append(deg) #Add the chosen angle to the appropriate array
	
	print round((time.time() - startTime) * 1000) #Print the total time it took for that frame to be processed
	tempAngle += deg #Update the angle
	
	actualDeg.append(tempAngle)
	
turnOff()
expertInput.join() #Have the expert input thread, which should be done, join back to the main thread
	
with open("../datasets/dataset_dagger/data.csv", 'wb') as file: #Open the csv file
	writer = csv.writer(file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) #Create a writer
	writer.writerow(['frame', 'machine angle', 'expert angle', 'chosen angle', 'angle']) #Write the first row that labels the columns
	for i in range (len(degChosen)): #For each frame
		writer.writerow([i, degMArray[i], degHArray[i], degChosen[i], actualDeg[i]]) #Write the angles to the csv file
		
#Remove the last frame in the case that there aren't enought angles recorded
if not len(frameArray) == len(actualDeg):
	frameArray.pop()

#Create a video using the frames collected
clip = ImageSequenceClip(frameArray, fps=15) 
clip.write_videofile('../datasets/dataset_dagger/dagger.mp4') #
Example #35
0
def create_video_clip_from_frames(frame_list, fps):
    """ Function takes a list of video frames and puts them together in a sequence"""
    visual_clip = ImageSequenceClip(
        frame_list, fps=fps)  #put frames together using moviepy
    return visual_clip  #return the ImageSequenceClip
        """
        self.frame_ct += 1

        self.current_heatmap = self.car_search_retinanet(img)
        self.update_history()  # update lane parameters
        # save the annotated images
        heatmap = apply_threshold(self.current_heatmap, self.heatmap_thresh)
        labels = label(heatmap)
        imcopy = np.copy(img)
        draw_labeled_bboxes(imcopy, labels)
        self.antn_img.append(imcopy)

    def update_history(self):
        if self.detected == False:
            self.recent_heatmap.append(np.copy(self.current_heatmap))
            self.detected = True
        else:
            self.current_heatmap = self.decay_factor * self.current_heatmap + \
                (1 - self.decay_factor) * self.recent_heatmap[-1]
            self.recent_heatmap.append(np.copy(self.current_heatmap))


if __name__ == "__main__":
    clip = VideoFileClip('project_video.mp4')
    clip = [frame for frame in clip.iter_frames()]
    ct = car_tracker()
    for img in tqdm(clip):
        ct.detect_car_from_img(img)
    gif_clip = ImageSequenceClip(ct.antn_img, fps=25)
    gif_clip.write_videofile('video_output.mp4')
Example #37
0
class DetectShots():
    def __init__(self, file_path):
        self.frame_diff_interval, self.total_pixels = 10, 320 * 240
        self.shot_similarity_threshold, self.abrupt_trans_cnt = 3, 8
        self.upper_bound, self.lower_bound, = 0.45, 0.25

        self.hist, self.fd, self.shots, = [], [], []
        self.total_shots, self.total_frames = 0, 0
        self.key_frames, self.out, self.D = [], [], []
        self.shot_scene, self.scenes = [], []
        self.avg_shot_length, self.shot_cut_freq = [], []

        self.file_path = file_path
        self.file = cv2.VideoCapture(file_path)
        self.filename = file_path[file_path.rfind('/') + 1:]
        self.fps = self.file.get(cv2.CAP_PROP_FPS)
        self.T = int(self.fps * 150)  #2.5min

    # def multiprocessed_fd_calc(self):

    # 	self.total_frames = int(self.file.get(cv2.CAP_PROP_FRAME_COUNT))
    # 	with mp.Pool(4) as p:
    # 		a = p.apply_async(
    # 			calc_frame_diff_v2,
    # 			args=(1, self.total_frames//4, self.file_path, self.frame_diff_interval, 1, self.total_pixels)
    # 		)

    # 		b = p.apply_async(
    # 			calc_frame_diff_v2,
    # 			args=(self.total_frames//4, self.total_frames//2, self.file_path, self.frame_diff_interval, 2, self.total_pixels)
    # 		)

    # 		c = p.apply_async(
    # 			calc_frame_diff_v2,
    # 			args=(self.total_frames//2, 3*self.total_frames//4, self.file_path, self.frame_diff_interval, 3, self.total_pixels)
    # 		)

    # 		d = p.apply_async(
    # 			calc_frame_diff_v2,
    # 			args=(3*self.total_frames//4, self.total_frames, self.file_path, self.frame_diff_interval, 4, self.total_pixels)
    # 		)

    # 		p.close()
    # 		p.join()

    # 	print("its done")
    # 	res1, res2, res3, res4 = a.get(), b.get(), c.get(), d.get()
    # 	self.fd = res1["fd"] + res2["fd"] + res3["fd"] + res4["fd"]
    # 	self.hist = res1["hist"] + res2["hist"] + res3["hist"] + res4["hist"]

    def multithreaded_fd_calc(self):
        """
		Creates threads and file pointers for multithreading frame
		difference calculation

		"""

        self.total_frames = int(self.file.get(cv2.CAP_PROP_FRAME_COUNT))
        self.hist = [0] * self.total_frames
        self.fd = [0] * self.total_frames

        # need different file pointers for multithreading
        tmp_file1 = cv2.VideoCapture(self.file_path)
        tmp_file2 = cv2.VideoCapture(self.file_path)

        # each function processes a third of the file
        f1 = lambda: self.calc_frame_diff(1, self.total_frames // 3, self.file)
        f2 = lambda: self.calc_frame_diff(self.total_frames // 3, 2 * self.
                                          total_frames // 3, tmp_file1)
        f3 = lambda: self.calc_frame_diff(2 * self.total_frames // 3, self.
                                          total_frames, tmp_file2)

        a = HelperThread("Frame Difference 1", f1)
        b = HelperThread("Frame Difference 2", f2)
        c = HelperThread("Frame Difference 3", f3)
        a.start(), b.start(), c.start()
        a.join(), b.join(), c.join()

        tmp_file2.release(), tmp_file1.release()

    def calc_frame_diff(self, st, end, file):
        """
		Calculates the histogram difference between frames 
		self.frame_diff_interval apart. Helps detect shot boundaries.

		"""

        # set the opencv file pointer to read frame no st next
        file.set(1, st - 1)

        # housekeeping
        counter = max(0, st - 1)

        # iterate for the entire range of frames (st, end)
        while file.isOpened() and counter < end:
            suc, fr = file.read()
            if not suc: break

            fr = cv2.resize(fr, (320, 240))
            gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)

            # the frame's histogram
            hist_fr = cv2.calcHist([gray_fr], [0], None, [256], [0, 256])

            # save the histogram/frame diff -
            # calculating it for frames self.frame_diff_interval apart
            # helps detect both gradual and abrupt transitions
            if counter >= self.frame_diff_interval:
                tmp = np.abs(hist_fr -
                             self.hist[counter - self.frame_diff_interval])
                fd = np.sum(tmp) / (2 * self.total_pixels)
                self.fd[counter - self.frame_diff_interval] = fd

            # save the histogram
            self.hist[counter] = hist_fr
            counter += 1

    def find_shots(self):
        """
		Using the frame difference calculated before, it finds shot 
		boundaries and saves the end frame no. of each shot in
		self.shots.

		"""

        counter = 0

        # a local maxima in FD indicates a potential gradual transition
        diff_fd = np.diff(self.fd)

        # iterate!
        for i in range(2, self.total_frames - 2):

            # naively checking for local maxima
            # in diff_fd, x corresponds to x-1
            local_max = diff_fd[i + 1] < 0 and diff_fd[i] > 0

            # if this FD is above a threshold and a local max, it
            # corresponds to a gradual transition boundary
            if self.fd[i] > self.upper_bound and local_max:

                # add only if shots are 8s apart
                if not search(self.shots, int(i - self.fps * 8),
                              int(i + self.fps * 8)):
                    self.shots.append(i - self.frame_diff_interval)

            # check for abrupt transition boundary
            else:

                # if a series of FD are above a certain threshold, it
                # is an abrupt transition
                if self.fd[i] > self.lower_bound and self.fd[
                        i - 1] > self.lower_bound:
                    counter += 1
                else:
                    counter = 0

                if counter >= self.abrupt_trans_cnt:
                    counter = 0
                    if not search(self.shots, i - self.fps * 8,
                                  i + self.fps * 8):
                        self.shots.append(i - self.abrupt_trans_cnt -
                                          self.frame_diff_interval)

        self.total_shots = len(self.shots)

    def find_key_frames(self):

        self.key_frames = []

        with mp.Pool(4) as p:

            args = [self.shots, self.hist, self.total_pixels]

            a = p.apply_async(find,
                              args=(
                                  *args,
                                  0,
                                  self.total_shots // 2,
                                  1,
                                  self.fps,
                              ))

            b = p.apply_async(find,
                              args=(
                                  *args,
                                  self.total_shots // 2,
                                  self.total_shots,
                                  2,
                                  self.fps,
                              ))

            p.close()
            p.join()

        self.key_frames = a.get() + b.get()

    def group_into_scenes(self):
        D = []

        for i in range(0, self.total_shots):
            lim, j = self.key_frames[i][0] + self.T, i + 1
            while j < self.total_shots and self.key_frames[j][0] <= lim:
                tmp = np.array([[0] * 256]).reshape((256, 1))
                for i_key_frame in self.key_frames[i]:
                    for j_key_frame in self.key_frames[j]:
                        x = np.abs(self.hist[i_key_frame] -
                                   self.hist[j_key_frame])
                        tmp = tmp + x

                tmp = np.sum(tmp) / (2 * self.total_pixels)
                D.append({'f1': i, 'f2': j, 'fd': tmp})
                j += 1

        self.shot_scene = [i for i in range(0, self.total_shots)]
        D.sort(key=lambda o: o["fd"])
        for ob in D:
            if ob['fd'] > self.shot_similarity_threshold:
                break
            self.shot_scene[ob['f1']] = ob['f1']
            self.shot_scene[ob['f2']] = ob['f1']

        def path_compress(index, arr):
            if arr[index] == index:
                return
            path_compress(arr[index], arr)
            arr[index] = arr[arr[index]]

        for i in range(0, self.total_shots):
            path_compress(i, self.shot_scene)

    def process(self):
        """
		This is what should be called to calculate stuff

		"""

        self.multithreaded_fd_calc()
        self.find_shots()
        self.find_key_frames()
        self.group_into_scenes()

    def get_shots(self):
        # convert = lambda a : a//60 + (a/60 - a//60)*0.6
        shots = [i / self.fps for i in self.shots]
        return {"timestamps": shots, "frames": self.shots}

    def get_key_frames(self):
        frame_time_stamps = []
        for shot in self.key_frames:
            frame_time_stamps.append([])
            for key_frame in shot:
                frame_time_stamps[-1].append(key_frame / self.fps)

        return {"timestamps": frame_time_stamps, "frames": self.key_frames}

    def get_scenes(self):
        self.scenes = [[] for i in range(0, self.total_shots)]

        i = 0
        while i < self.total_shots:

            # the ith shot belongs to self.shot_scene[i] root shot
            self.scenes[self.shot_scene[i]].append(i)
            i = i + 1

        # removing shots that weren't root thus getting scenes
        while self.scenes.count([]) > 0:
            self.scenes.remove([])

        return self.scenes

    def get_average_shot_length(self):
        self.avg_shot_length, mx = [], 0
        tot_scene = len(self.scenes)
        for i in range(0, tot_scene):
            l, cnt = 0, 0
            for ob in self.scenes[i]:
                pre = 0 if ob == 0 else self.shots[ob - 1]
                l += (self.shots[ob] - pre)
                cnt += 1

            l = l / cnt
            self.avg_shot_length.append(l)

        return np.array(self.avg_shot_length)

    def get_shot_cut_freq(self):
        self.shot_cut_freq = []
        for i in range(0, len(self.scenes)):
            self.shot_cut_freq.append(1 / len(self.scenes[i]))

        return self.shot_cut_freq

    def save_key_frames(self):
        frame_list = []
        for shot in self.key_frames:
            for fr in shot:
                self.file.set(1, fr - 1)
                fr = np.array(self.file.read()[1])
                fr = cv2.cvtColor(fr, cv2.COLOR_BGR2RGB)
                frame_list.append(fr)

        self.out = ImageSequenceClip(frame_list, fps=1)
        self.out.write_videofile('[FabBits] ' + self.filename, codec='libx264')

    def save(self):
        self.save_key_frames()
Example #38
0
from moviepy.editor import ImageSequenceClip

img_names = [str(i) + '.png' for i in range(10)]
clip = ImageSequenceClip(img_names, fps=2)
clip.write_gif('herd_immune.gif')
Example #39
0
    # Accumulator to smooth
    auxBox = boxes
    if not math.fmod(count, 10):
        auxBox[0:int(len(boxes) * 0.35)] = []
        releaseBox = auxBox

    # Detecting Heat labels
    heat = add_heat(heat, releaseBox)
    heat = apply_threshold(heat, 3)
    heatmap = np.clip(heat, 0, 255)
    labels = label(heatmap)
    draw_labeled, Labeled_bboxList = draw_labeled_bboxes(
        np.copy(C_frames), labels)

    bb = np.array(Labeled_bboxList)
    bb_NMS = non_max_suppression_fast(bb, 0.3)

    # Save Frame by Frame. Debug Pourpose
    draw_bboxes(C_frames_B, bb_NMS, (0, 0, 220))

    path = './temp/' + '{:05d}'.format(count) + 'frames' + '.png'
    cv2.imwrite(path, C_frames_B)

    count += 1

temp = glob.glob('temp/*.png')
clip = ImageSequenceClip(temp, fps=24)
clip.write_videofile('finalisimo000.mp4')

#video.write_videofile('video.mp4', audio=False)
            display=DISPLAY_FRAMES,
            pov_thread=THREAD_PER_AGENT,
        )
        with Pool(NUM_AGENT) as p:
            for message in p.imap_unordered(func, batch):
                # (TODO) POVray error within child process could be an issue
                pbar.update()
    else:
        for filename in batch:
            render(
                filename,
                width=WIDTH,
                height=HEIGHT,
                display=DISPLAY_FRAMES,
                pov_thread=multiprocessing.cpu_count(),
            )
            pbar.update()

    # Create Video using moviepy
    for view_name in stage_scripts.keys():
        imageset_path = os.path.join(OUTPUT_IMAGES_DIR, view_name)
        imageset = [
            os.path.join(imageset_path, path)
            for path in os.listdir(imageset_path)
            if path[-3:] == "png"
        ]
        imageset.sort()
        filename = OUTPUT_FILENAME + "_" + view_name + ".mp4"
        clip = ImageSequenceClip(imageset, fps=FPS)
        clip.write_videofile(filename, fps=FPS)
                    cv2.FONT_HERSHEY_SIMPLEX, 1.5, (200, 200, 200), 3)
        # Put the sub image in final image
        y_start = int(n * sub_img_size[1])
        inpaint[1080:1656, y_start:y_start + sub_img_size[1], :] = cv2.resize(
            sub_imgs[n], (sub_img_size[1], sub_img_size[0]))

    # Put Border for 16:9 scaling
    inpaint = cv2.resize(
        cv2.copyMakeBorder(inpaint, 0, 0, 512, 512, cv2.BORDER_CONSTANT),
        (1920, 1080))

    cv2.imshow('Lane Detection', inpaint)
    cv2.waitKey(1)

    # Save this Frame
    if os.path.exists(output_video_frames) is False:
        os.makedirs(output_video_frames)

    cv2.imwrite(
        os.path.join(output_video_frames,
                     'frame_' + str(frame_id).zfill(4) + '.png'), inpaint)

clip.release()

# Create output video
print('Creating Video.')

video_file = output_video_file
clip = ImageSequenceClip(output_video_frames, fps=25)
clip.write_videofile(video_file)
Example #42
0
def make_gif(files, name):
    from moviepy.editor import ImageSequenceClip
    clip = ImageSequenceClip(files, fps=5)
    clip.write_gif("{name}".format(name=name), fps=5)
Example #43
0
        in_file = os.path.join(input_dir, file_name)
        out_file = os.path.join(out_dir, file_name)
        print(out_file)
        iresult = np.zeros(img.shape)
        iimg = mpimg.imread(in_file)
        try:
            iresult = pipeline_final(iimg, mtx, dist, M, M_inv)
        except:
            print("Nije proslo za frame : " + str(i))
        mpimg.imsave(out_file, iresult)
        isorted.append(out_file)
print("Done storing images")

import os
from moviepy.editor import ImageSequenceClip
img_dir = "project_video29"
images = os.listdir(img_dir)
print(len(images))
image_list = []
count = len(images)
for i in range(1, count):
    file_name = str(i) + ".jpg"
    out_file = os.path.join(img_dir, file_name)
    image_list.append(out_file)
video_file = img_dir + "_video.mp4"
fps = 20
print("Creating video {}, FPS={}".format(video_file, fps))
clip = ImageSequenceClip(image_list, fps=fps)
clip.write_videofile(video_file)
print("Done creating video")
Example #44
0
def main():
    if not os.path.exists("bot-fights"):
        os.makedirs("bot-fights")

    h = html2text.HTML2Text()

    mastodon = Mastodon(client_id='pokefight.secret',
                        access_token='user_pokefight.secret',
                        api_base_url='https://social.wxcafe.net')

    if os.path.exists(".since_id") and open(".since_id").read().isdigit():
        since_id = int(open(".since_id").read())
    else:
        since_id = 0

    while 42:
        for i in reversed(mastodon.notifications(since_id=since_id)):
            # we don't care about the rest
            if i["type"] != "mention":
                continue

            status = i["status"]
            status_id = status["id"]
            visibility = status["visibility"]

            since_id = max(since_id, i["id"])

            message = h.handle(status["content"]).strip()

            # here html2text will remove the @domain.com from [email protected]
            # because it's hidden in the html, let's reput it
            for mention in status["mentions"]:
                link_re = '\[[^)]+\]\(' + re.escape(mention["url"]) + '\)'
                message = re.sub(link_re, mention["acct"], message)

            message = re.sub("\s+", " ", message.lower())

            print
            # repr to avoid writting on several lines
            print "[%s] %s" % (i["id"], repr(message))

            if not message.startswith(
                ("@pokefight", "pokefight", "[@pokefight")):
                # we ignore status where we aren't directly mentionned
                continue

            match = REGEX.search(message)

            if not match:
                # answer that you've failed
                print "message '%s' didn't matched regex" % message
                print mastodon.status_post(
                    "@%s sorry, I couldn't understand your command :(\n\nPlease send me a message in this form:\n\n    @pokefight [email protected] used some power on [email protected]\n\nOr:\n\n    @pokefight [email protected] used some power on [email protected], not effective\n\nIf you'd like the power to be not effective)"
                    % i["account"]["acct"],
                    in_reply_to_id=status_id,
                    visibility="direct",
                )["uri"]

                continue

            _, attacker, power, _, defender, effectiveness = match.groups()

            if attacker.startswith("["):
                print attacker, "->", attacker.split("(")[1][:-1]
                attacker = attacker.split("(")[1][:-1]

            if defender.startswith("["):
                print defender, "->", defender.split("(")[1][:-1]
                defender = defender.split("(")[1][:-1]

            effective = effectiveness in ("effective", None)

            try:
                attacker, defender = fill_users(mastodon, attacker, defender)

                action, result = generate_images(
                    attacker,
                    defender,
                    power,
                    text=("It's super-", "effective!") if effective else
                    ("It's not very", "effective..."))
            except Exception as e:
                import traceback
                traceback.print_exc()
                print e
                continue

            action_filename = "bot-fights/" + str(status_id) + "_action.png"
            result_filename = "bot-fights/" + str(status_id) + "_result.png"
            mp4_filename = "bot-fights/" + str(status_id) + ".mp4"

            action.save(action_filename)
            result.save(result_filename)
            print "-> %s %s" % (action_filename, result_filename)

            # mastodon compression/convertion algorithm seems to ignore the
            # last frame of a gif/mp4 (or at least readuce its duration of
            # something like 0.1 or 0.01 seconds for some reason
            # therefor a 2 frames gif looks like a 1 frame gif which sucks
            # to fix this, we repeat the image sequences several times and
            # remove the last frame, this seems to fix it (yes, that's ugly)
            CompositeVideoClip([
                ImageSequenceClip(
                    ([action_filename, result_filename] * 20)[:-1],
                    fps=(1. / 2.5))
            ]).write_videofile(mp4_filename, fps=(1 / 2.5))

            mp4_media_post = mastodon.media_post(mp4_filename)

            if i["account"]["acct"] not in (attacker.acct, defender.acct):
                text = ".@%s used %s on @%s! (from @%s)" % (
                    attacker.acct, power, defender.acct, i["account"]["acct"]),
            else:
                text = ".@%s used %s on @%s!" % (attacker.acct, power,
                                                 defender.acct),

            print mastodon.status_post(
                text,
                # ".%s used %s on %s! (from @%s)" % (attacker.acct, power, defender.acct, i["account"]["acct"]),
                in_reply_to_id=status_id,
                media_ids=[mp4_media_post],
                # don't spam global timeline
                visibility=visibility
                if visibility != "public" else "unlisted",
            )["uri"]

        open(".since_id", "w").write(str(since_id))
        time.sleep(5)
for root, dirs, files in os.walk(thumbnail_per_frame_dir):
    for fname in files:
        if fname.endswith(".jpg"):
            filepath = os.path.join(root, fname)
            try:
                key = int(fname.replace('.jpg', ''))
            except Exception as e:
                print(e)
                key = None

            if key is not None:
                directory[key] = filepath

new_paths = []
for k in sorted(directory.keys()):
    new_filepath = directory[k]
    new_paths.append(new_filepath)

clip1 = ImageSequenceClip(new_paths, fps=30)
clip1.write_videofile(output_video1)  # without audio

# can also convert each filepath into its own frame
my_clips = []
for path in new_paths:
    frame = ImageClip(path)
    # print(frame.img) numpy array
    my_clips.append(frame.img)

clip2 = ImageSequenceClip(my_clips, fps=30)
clip2.write_videofile(output_video2)  # without audio
Example #46
0
        print y_pos.shape
        print pol.shape

        (timestamps, x_pos,
         y_pos, pol) = dvsproc.clean_up_events(timestamps, x_pos,
                                               y_pos, pol, window=1000)

        frames, fs, _ = dvsproc.gen_dvs_frames(timestamps, x_pos, y_pos,
                                               pol, num_frames, fs=3)
        print "Length of produced frames: ", len(frames)
        new_frames = []
        for frame in frames:
            tmp_frame = (((frame+fs)/float(2*fs))*255).astype(np.uint8)
            new_frames.append(tmp_frame)

        clip = ImageSequenceClip(new_frames, fps=20)
        clip.write_gif(seq_save_path, fps=30)

        print "Sequence %s is saved at %s" % (img_name, seq_save_path)
elif option == "caltech256-ps":
    caltech_fn = "INI_Caltech256_10fps_20160424.hdf5"
    caltech_path = os.path.join(data_path, caltech_fn)
    caltech_db = h5py.File(caltech_path, mode="r")
    caltech_stats_path = os.path.join(stats_path, "caltech256_stats.pkl")
    caltech_save_path = os.path.join(data_path, "caltech256_ps.eps")
    img_num = 60

    f = file(caltech_stats_path, mode="r")
    caltech_stats = pickle.load(f)
    f.close()