Example #1
0
    def make(self):
        videos = []
        for notes in self.sheet.lines:
            clips = []
            for note in notes:
                if note[0] == 'rest':
                    img = ColorClip((720, 404), (0, 0, 0),
                                    duration=note[1] / 1000)
                    clips.append(img)
                    continue
                target_video = self.get_video(note[0], note[1], note[2])
                clips.append(target_video)

            video = concatenate_videoclips(clips).margin(10)
            videos.append(video)
        num_of_lines = len(self.sheet.lines)
        if num_of_lines == 1:
            result = videos[0]
        elif num_of_lines == 2:
            result = clips_array([[videos[0]], [videos[1]]])
        elif num_of_lines == 4:
            result = clips_array([[videos[0], videos[1]],
                                  [videos[2], videos[3]]])
        else:
            print("Num of lines should be 1, 2, or 4")
            exit(1)

        self.result = './result/' + self.name + '.mp4'
        result.write_videofile(self.result)
Example #2
0
def video_collage(vid_names, ordering, probs, correct_class_prob, labels):
    sel_clips = []
    for i in range(6):
        vname = vid_names[ordering[i]]
        # Load all the metadata for this video
        with open(vname.replace(
                'images/', 'scenes/').replace('.avi', '.json'), 'r') as fin:
            metadata = json.load(fin)
        gt = np.argmax(labels[ordering[i]])
        pred = np.argmax(probs[ordering[i]])
        pred_map = location_predictions_to_map(probs[ordering[i]], metadata)
        print(vname, gt, pred, correct_class_prob[ordering[i]])
        main_video = VideoFileClip(vname).margin(3)
        sel_clips.append(CompositeVideoClip([
            clips_array([[
                main_video,
                ImageSequenceClip(pred_map, fps=main_video.fps),
            ]]),
            TextClip(
                "GT {} Pred {}".format(gt, pred),
                font=MOVIEPY_FONT)
            .set_pos((10, 10))
            .set_duration(main_video.duration),
        ]))
    return clips_array(chunkify(sel_clips, 3))
Example #3
0
def write_video_summary(cm,
                        file_name,
                        p_frame,
                        p_save,
                        global_step=None,
                        fps=12):
    check_and_create_dir(p_save)
    for u in cm:
        for v in cm[u]:
            tag = "true_%d_prediction_%d" % (u, v)
            if global_step is not None:
                tag += "_step_%d" % global_step
            grid_x = []
            grid_y = []
            items = cm[u][v]
            for idx in items:
                frames = np.load(p_frame + file_name[idx] + ".npy")
                shape = frames.shape
                if shape[
                        3] == 2:  # this means that the file contains optical flow frames (x and y)
                    tmp = np.zeros((shape[0], shape[1], shape[2], 3),
                                   dtype=np.float64)
                    for i in range(shape[0]):
                        # To visualize the flow, we need to first convert flow x and y to hsv
                        flow_x = frames[i, :, :, 0]
                        flow_y = frames[i, :, :, 1]
                        magnitude, angle = cv.cartToPolar(flow_x / 255,
                                                          flow_y / 255,
                                                          angleInDegrees=True)
                        tmp[i, :, :,
                            0] = angle  # channel 0 represents direction
                        tmp[i, :, :, 1] = 1  # channel 1 represents saturation
                        tmp[i, :, :,
                            2] = magnitude  # channel 2 represents magnitude
                        # Convert the hsv to rgb
                        tmp[i, :, :, :] = cv.cvtColor(
                            tmp[i, :, :, :].astype(np.float32),
                            cv.COLOR_HSV2RGB)
                    frames = tmp
                else:  # this means that the file contains rgb frames
                    frames = frames / 255  # tensorboard needs the range between 0 and 1
                if frames.dtype != np.uint8:
                    frames = (frames * 255).astype(np.uint8)
                frames = ImageSequenceClip([I for I in frames], fps=12)
                grid_x.append(frames)
                if len(grid_x) == 8:
                    grid_y.append(grid_x)
                    grid_x = []
            if len(grid_x) != 0:
                grid_y.append(grid_x)
            if len(grid_y) > 1 and len(grid_y[-1]) != len(grid_y[-2]):
                grid_y = grid_y[:-1]
            try:
                clips_array(grid_y).write_videofile(p_save + tag + ".mp4")
            except Exception as ex:
                for a in grid_y:
                    print(len(a))
                print(ex)
Example #4
0
def side_by_side_videos(test_video_file_path, ref_video_file_path,
                        target_file_path):
    # aiming for 800 x 1200 pixels
    w0 = 800
    h0 = 1200
    desired_ratio = w0 / h0

    clip1 = VideoFileClip(test_video_file_path)
    clip2 = VideoFileClip(ref_video_file_path)

    w1 = clip1.w
    h1 = clip1.h
    if w1 / h1 > desired_ratio:
        # scale width
        clip1 = clip1.resize(w0 / w1)
    else:
        # scale height
        clip1 = clip1.resize(h0 / h1)

    w2 = clip2.w
    h2 = clip2.h
    if w2 / h2 > desired_ratio:
        clip2 = clip2.resize(w0 / w1)
    else:
        clip2 = clip2.resize(h0 / h2)
    video = clips_array([[clip1, clip2]])
    video.write_videofile(target_file_path)
Example #5
0
def collage(output_video, *input_videos):
    input_clips = []
    for path in input_videos:
        video_clip = VideoFileClip(path)
        _, _, amp = os.path.basename(path).partition("@")
        amp, _, _ = amp.partition('.')
        text_clip = (TextClip(
            txt='Amplified {}'.format(amp) if amp else 'Input',
            color='white',
            method='label',
            fontsize=32,
            font='Helvetica-Bold').set_duration(
                video_clip.duration).set_position(('center', 0.05),
                                                  relative=True))
        clip = CompositeVideoClip((video_clip, text_clip), use_bgclip=True)
        input_clips.append(clip)
    if len(input_clips) < 4:
        num_columns = 1
    elif len(input_clips) < 5:
        num_columns = 2
    else:
        num_columns = 3
    final_clip = clips_array([
        input_clips[i:i + num_columns]
        for i in range(0, len(input_clips), num_columns)
    ])
    final_clip.write_videofile(output_video, audio=False)
    return output_video
Example #6
0
def make_video_grid_from_filepaths(num_rows,
                                   num_cols,
                                   video_list,
                                   trgt_name,
                                   margin_color=(255, 255, 255),
                                   margin_width=0,
                                   column_wise=True):

    clip_array = [[] for _ in range(num_rows)]
    for col in range(num_cols):
        for row in range(num_rows):
            if column_wise:
                idx = col * num_rows + row
            else:
                idx = row * num_cols + col

            video_clip = VideoFileClip(video_list[idx]).margin(
                margin_width, color=margin_color)
            if margin_width > 0:
                video_clip = video_clip.margin(margin_width,
                                               color=margin_color)

            clip_array[row].append(video_clip)

    final_clip = clips_array(clip_array)
    save_video(final_clip, trgt_name)
def stage_mix_compare(args):
    audio_dir = set_audio_dir(args.audio_dir)
    out_fps = float(args.out_fps)
    out_dir = args.out_dir
    vid_h = args.vid_h
    vid_w = args.vid_w
    margin = args.margin

    files = [f for f in sorted(listdir(out_dir))]
    for vid_name in files:
        if "Fake" in vid_name:
            vid_name = join(out_dir, vid_name)
            Fake_vid = VideoFileClip(vid_name).margin(margin)
        elif "GT" in vid_name:
            vid_name = join(out_dir, vid_name)
            GT_vid = VideoFileClip(vid_name).margin(margin)

    # mixing video
    mixing_clip = clips_array([[GT_vid, Fake_vid]])
    # setting audio
    audioclip = VideoFileClip(audio_dir[0]).subclip(frame_to_time(audio_dir[1], out_fps),
                                                    frame_to_time(audio_dir[2], out_fps))
    audioclip = audioclip.audio
    mixing_clip.audio = audioclip
    out_vid_name = join(out_dir, args.vid_folder + '_.mp4')
    mixing_clip.resize(width=vid_w).write_videofile(out_vid_name, fps=out_fps)
    mixing_clip.close()
Example #8
0
 def array_clips(self, video_1: str, video_2: str,
                 destination: str) -> None:
     clip_1 = VideoFileClip(video_1)
     clip_2 = VideoFileClip(video_2)
     joined = clips_array([[clip_1, clip_2]])
     final = concatenate_videoclips([joined])
     final.write_videofile(destination)
def save_video(frames_dict, fps=13):
    frame_keys = sorted([key for key in six.iterkeys(frames_dict)])

    grid_dim = 2  # int(math.ceil(math.sqrt(len(frame_keys)) / 2.0))
    clips = []
    for frame_key in frame_keys:
        h, w, _ = frames_dict[frame_key][0].shape
        h += 40
        w += 40
        # title = TextClip(str(frame_key), color='black', fontsize=25, method='label')
        # title.set_position(("center", "top"))
        animation = ImageSequenceClip(frames_dict[frame_key], fps=fps)
        animation = animation.on_color(size=(w, h), color=(255, 255, 255))
        # animation.set_position(("center", "bottom"))
        # video = CompositeVideoClip([animation, title], size=(h, w))
        # clips.append(clips_array([[title], [animation]]))  # clips_array([[title], [animation]]))
        clips.append(animation)  # clips_array([[title], [animation]]))

    arranged_clips = []
    start = 0
    i = 0
    while start < len(clips):
        arranged_clips.append(clips[i * grid_dim:(i + 1) * grid_dim])
        i += 1
        start = i * grid_dim

    under = len(arranged_clips[0]) - len(arranged_clips[-1])
    if under > 0:
        for i in range(under):
            arranged_clips[-1].append(arranged_clips[-1][-1])
    final_clip = clips_array(arranged_clips)
    #final_clip.write_videofile("data_3_14_results_output.mp4", codec='mpeg4')
    final_clip.write_videofile("results_output.mp4", codec='libx264')
Example #10
0
def test_clips_array_duration():
    # NOTE: anyone knows what behaviour this sets ? If yes please replace
    # this comment.
    red = ColorClip((256, 200), color=(255, 0, 0))
    green = ColorClip((256, 200), color=(0, 255, 0))
    blue = ColorClip((256, 200), color=(0, 0, 255))

    video = clips_array([[red, green, blue]]).set_duration(5)
    with pytest.raises(AttributeError) as exc_info:
        video.write_videofile(join(TMP_DIR, "test_clips_array.mp4"))
    assert "No 'fps'" in str(exc_info.value)

    # this one should work correctly
    red.fps = green.fps = blue.fps = 30
    video = clips_array([[red, green, blue]]).set_duration(5)
    video.write_videofile(join(TMP_DIR, "test_clips_array.mp4"))
    close_all_clips(locals())
Example #11
0
def random_videos_collage(lbls_file, slide_size):
    with open(lbls_file, 'r') as fin:
        all_files = [el.split()[0] for el in fin.readlines()]
    # Take the top 8 for now
    res = clips_array(chunkify(
        [VideoFileClip(el).margin(3) for el in all_files[:12]], 3)).resize(
            slide_size)
    return res
Example #12
0
def video_array(clip1, clip2, audio):
    """generates new video clip which is an clips.array of the two video clips
    passed to the function. accepts audio parameter as well"""

    clip = mpe.clips_array([[clip1, clip2]])
    clip = clip.set_audio(audio)

    return clip
Example #13
0
def part1(vedio_path, output_path):
    print("part1 拼接中---------")
    clip1 = movie_upside(vedio_path)
    clip2 = movie_reverse(vedio_path)  # x轴镜像
    final_clip = clips_array([
        [clip1, clip2],
    ])
    final_clip.write_videofile(output_path)
    return output_path
 def process_by_script(main_clip):
     video = main_clip.fx(vfx.rotate, 90).fx(vfx.mirror_y)
     main_clip = video.margin(10)
     clip2 = main_clip.fx(vfx.mirror_x)
     clip3 = main_clip.fx(vfx.mirror_y)
     clip4 = main_clip.resize(0.60)
     final_clip = clips_array([[main_clip, clip2], [clip3, clip4]])
     final_clip.resize(width=480)
     return final_clip
Example #15
0
def addedVideos():
    clip1 = VideoFileClip("text.mp4").margin(15)
    clip2 = clip1.fx(vfx.mirror_x)  # X 轴镜像
    clip3 = clip1.fx(vfx.mirror_y)  # Y 轴镜像
    clip4 = clip1.resize(0.6)  # 尺寸等比例缩放0。6

    final_clip = clips_array([
        [clip1], [clip3]
    ])
    final_clip.write_videofile("my_stack.mp4")
def merge_cams(videos0, videos1, merged_folder):
    os.makedirs(merged_folder, exist_ok=True)
    for v0, v1 in zip(videos0, videos1):
        vidname = tools.videoname_from_regex(base.CAM_REGEX, v0)
        pn = os.path.join(merged_folder, vidname + '.mp4')
        if not os.path.exists(pn):
            clip1 = VideoFileClip(v0)
            clip2 = VideoFileClip(v1)
            final_clip = clips_array([[clip1, clip2]])
            final_clip.write_videofile(pn, codec='libx264')
 def process_package(source: str):
     log.info(''.join(["SOURCE: ", source]))
     main_clip = VideoFileClip(source).margin(10)
     clip2 = main_clip.fx(vfx.mirror_x)
     clip3 = main_clip.fx(vfx.mirror_y)
     clip4 = main_clip.resize(0.60)  # downsize 60%
     final_clip = clips_array([[main_clip, clip2],
                               [clip3, clip4]])
     final_clip.resize(width=480)
     return final_clip
Example #18
0
def _clips_array(request):
    files = request.FILES
    tr = files.get('top_right')
    tl = files.get('top_left')
    br = files.get('bottom_right')
    bl = files.get('bottom_left')

    with VideoFileClip(tr.temporary_file_path() if tr else DEFAULT_MEDIA_ROOT+'/color.mp4') as tr, VideoFileClip(tl.temporary_file_path() if tl else DEFAULT_MEDIA_ROOT+'/color.mp4') as tl, VideoFileClip(br.temporary_file_path() if br else DEFAULT_MEDIA_ROOT+'/color.mp4') as br, VideoFileClip(bl.temporary_file_path() if bl else DEFAULT_MEDIA_ROOT+'/color.mp4') as bl: 
        vd = clips_array([[tr,tl],[br,bl]]) # play 4 video parallel in a grid
        vd.write_videofile('media/res.mp4')

    return JsonResponse({'video':'http://127.0.0.1:8000/media/res.mp4'})
Example #19
0
def save_video(frames, name, fps=13):
    for rotation_deg in frames['intensity'].keys():
        intensity_animation = ImageSequenceClip(frames['intensity'][rotation_deg], fps=fps)
        distance_animation = ImageSequenceClip(frames['distance'][rotation_deg], fps=fps)
        h, w, d = frames['distance'][rotation_deg][0].shape
        blank = np.zeros([h / 2, w, d], dtype=np.uint8)
        blank[:, :] = [255, 255, 255]
        blank = ImageSequenceClip([blank] * (len(frames['intensity'][rotation_deg])), fps=fps)

        final_clip = clips_array([[intensity_animation], [blank], [distance_animation]])
        filename = "%s_rot%s.webm" % (name, rotation_deg)
        final_clip.write_videofile(filename, codec='libvpx')
Example #20
0
def test_clips_array():
    red = ColorClip((1024, 800), color=(255, 0, 0))
    green = ColorClip((1024, 800), color=(0, 255, 0))
    blue = ColorClip((1024, 800), color=(0, 0, 255))

    video = clips_array([[red, green, blue]])

    with pytest.raises(ValueError) as exc_info:
        video.resize(width=480).write_videofile(join(TMP_DIR, "test_clips_array.mp4"))
    assert str(exc_info.value) == "Attribute 'duration' not set"

    close_all_clips(locals())
Example #21
0
def makevideoclip(videofile, event, data, pre=10, length=30, savefile='output.mp4'):
    vidclip = mv.VideoFileClip(videofile).subclip(event-pre,event-pre+length)
    
    fig, ax = plt.subplots()
    fig.patch.set_facecolor('black')
    ax.set_facecolor('black')

    animation = mv.VideoClip(make_frame(ax), duration=length)

    combinedclip = mv.clips_array([[vidclip, animation]])
    combinedclip.write_videofile(savefile, fps=10)

    return combinedclip
Example #22
0
def main():
    if not os.path.isfile(args.video1) or not os.path.isfile(args.video2):
        print("Video files does not exists")
        return
    clip1 = VideoFileClip(args.video1)
    clip2 = VideoFileClip(args.video2)
    final_clip = clips_array([[clip1, clip2]])
    if clip1.duration > clip2.duration:
        final_clip.audio = clip1.audio
    else:
        final_clip.audio = clip2.audio
    print("Creating your video...")
    final_clip.write_videofile(args.target, verbose=False, logger=None)
Example #23
0
def text_to_clip(vname, sentence_description):

    video_path = os.path.join(VIDEO_PATH, vname)
    vname = video_to_frame(video_path)
    duration = VideoFileClip(video_path).duration
    global LONG_THRESHOLD
    global IS_LONG
    if duration <= LONG_THRESHOLD:
        print("Processing with a short video!")
        IS_LONG = False
    else:
        print("Processing with a long video!")
        IS_LONG = True
    frame_to_fts(vname)
    video_fts_path = os.path.join(FTS_PATH, vname + ".npy")

    if IS_LONG:
        pred_clip, pred_score = ExCL_Locate(video_fts_path,
                                            sentence_description, duration)
    else:
        pred_clip, pred_score = SCDM_Locate(video_fts_path,
                                            sentence_description, duration)

    if WEB_MODE:
        clips = []
        for i in range(3):
            clip_dict = {}
            clip_dict['left'] = round(pred_clip[i][0], 2)
            clip_dict['right'] = round(pred_clip[i][1], 2)
            clip_dict['name'] = vname + "_" + sentence_description.replace(
                ' ', '_') + "_clip%d.mp4" % (i + 1)
            clip_dict['score'] = round(pred_score[i], 2)
            clip = VideoFileClip(video_path).subclip(pred_clip[i][0],
                                                     pred_clip[i][1])
            path = os.path.join(CLIP_PATH, clip_dict['name'])
            if not os.path.exists(path):
                clip.write_videofile(path)
            clips.append(clip_dict)
        return clips

    else:
        pygame.display.set_caption('predicted clip')
        clip1 = VideoFileClip(video_path).subclip(pred_clip[0][0],
                                                  pred_clip[0][1])
        clip2 = VideoFileClip(video_path).subclip(pred_clip[1][0],
                                                  pred_clip[1][1])
        clip3 = VideoFileClip(video_path).subclip(pred_clip[2][0],
                                                  pred_clip[2][1])
        clip = clips_array([[clip1, clip2, clip3]]).resize(width=1000)
        clip.preview(fps=16, audio=False)
        pygame.quit()
Example #24
0
    def render(self, instrument_path, output_path):

        channel_clips = [None] * len(self.channels)
        ins = Instrument(folder=instrument_path, octaves = (1, 8))

        offset = [-1, -2, -2]  # pitch offset (multiplied by 12 semitones) for every channels
        for cn, channel in enumerate(self.channels):
            clips = []
            time = 0
            for i, note in enumerate(self.channels[channel]):
                if note['start'] > time:
                    clips.append(TextClip(" ",  # it's an empty black clip for the pause (when no note is playing)
                                          size=(640, 480),
                                          color='white',
                                          bg_color='black').\
                                 set_duration(note['start'] - time).\
                                 set_fps(30).\
                                 set_start(time))
                    time = note['start']
                clips.append(ins.get_clip(note['note'] + 12 * offset[cn]).\
                             subclip(0, note['end'] - note['start']).\
                             set_start(time))
                time = note['end']

            channel_clips[channel] = CompositeVideoClip(clips).\
                volumex(0.3).\
                resize((640, 480))

        title = " The musical offering \n Canon perpetuus super thema regium \n B.W.V 1079 - J. S. Bach "
        title_clip = TextClip(title,
                              size = (640, 480),
                              color="white",
                              bg_color="black").\
                              set_duration(max([c.duration for c in channel_clips])).\
                              set_fps(30)
        clips_array([[title_clip, channel_clips[1]],
                     [channel_clips[0], channel_clips[2]]]).\
                     write_videofile(output_path)
Example #25
0
 def convert(self):      
     '''Create stacked side-by-side video file with Pulfrich effect''' 
     
     left_viewport = VideoFileClip(self.file_name, audio=True)
     right_viewport = VideoFileClip(self.file_name, audio=False)
 
     first_frame_marker = (1/left_viewport.fps)
     right_viewport = right_viewport.subclip(first_frame_marker)
 
     stacked = clips_array([[left_viewport, right_viewport],])        
     stacked.write_videofile(self.file_name + '_sbs.mp4', codec='libx264', audio_codec='aac', temp_audiofile='temp-audio.m4a', remove_temp=True)
     
     left_viewport.close()
     right_viewport.close()
Example #26
0
def merge_3Dgenotype_fitnesshistogram_movie():
    from algorithm.parameters import params
    import moviepy.editor as mpy

    fps = 1
    __filename1 = params['FILE_PATH'] + str(
        params['TIME_STAMP']) + '/3Dgenotypes.mp4'
    __filename2 = params['FILE_PATH'] + str(
        params['TIME_STAMP']) + '/fitnessdistribution.mp4'
    __outputfilename = params['FILE_PATH'] + str(
        params['TIME_STAMP']) + '/merge_3Dgenotype_fitnesshistogram.mp4'
    clip_mayavi = mpy.VideoFileClip(__filename1)
    clip_mpl = mpy.VideoFileClip(__filename2)
    animation = mpy.clips_array([[clip_mpl, clip_mayavi]])
    animation.write_videofile(__outputfilename, fps=fps)
def stack(file1, file2, nameout, audio=False):
    """
    Stack two videos on top of each other.
    """
    clip1 = VideoFileClip(file1, audio=audio)
    clip2 = VideoFileClip(file2, audio=audio)
    final_clip = clips_array([
        [
            clip1
        ],  # First row (add to this list if you want more than one video in row)
        [
            clip2
        ]  # Second row (add to this list if you want more than one video in row)
    ])
    final_clip.write_videofile(nameout)
Example #28
0
    def create_clip_array(self):
        subclips_amount = len(self.__source_path)

        if subclips_amount != 3:
            print("Only works with exactly 3 clips!\n")
        else:
            subclips = []

            for cnt in range(0, 3):
                current_clip = VideoFileClip(self.__source_path[cnt])

                # Appends clips to the subclips vector
                subclips.append(current_clip)

            final_clip_array = clips_array(
                [[subclips[0], subclips[1], subclips[2]]])
            final_clip_array.write_videofile("arrayTest.mp4")
            os.startfile("arrayTest.mp4")
def display_animation(filename,
                      keypoints,
                      clip_height=768,
                      display_height=240,
                      temp_file='__temp__.avi',
                      include_source_video=True):
    """
    Display a side-by-side animation comprising the skeleton and (optionally) the source
    video.

    Parameters
    ----------
    filename : str
        The video file to read
    keypoints : numpy array
        Array of keypoints in the form [frame,landmark,coords]
    clip_height : int
        Desired clip height (applies to both source video and skeleton, the former is upscaled)
    display_height : int
        Desired display height
    temp_file : int
        Temporary file for transcoding
    include_source_video: bool
        Whether to include the source video
    """
    clip_original = VideoFileClip(filename)
    rescaling_factor = clip_height / clip_original.h
    clip_original = clip_original.resize(height=clip_height)
    keypoints = np.copy(keypoints) * rescaling_factor

    skeleton = Skeleton(target_width=clip_original.w,
                        target_height=clip_original.h)
    skeleton.animate(keypoints,
                     temp_file,
                     fps=len(keypoints) / clip_original.duration)
    clip_skeleton = VideoFileClip(temp_file)
    if include_source_video:
        clip = clips_array([[clip_original, clip_skeleton]])
    else:
        clip = clip_skeleton

    return clip.ipython_display(height=display_height,
                                rd_kwargs=dict(logger=None))
    def createClipArray(
            self
    ):  # Creates an array of input clips (only for exactly 3 clips)
        subclipsAmount = len(self.__sourcePath)

        if (subclipsAmount != 3):
            print("Only works with exactly 3 clips!\n")
        else:
            subclips = []

            for cnt in range(0, 3):
                currentClip = VideoFileClip(self.__sourcePath[cnt])
                subclips.append(
                    currentClip)  # Appends clips to the subclips vector

            finalClipArray = clips_array(
                [[subclips[0], subclips[1], subclips[2]]])
            finalClipArray.write_videofile("arrayTest.mp4")
            os.startfile("arrayTest.mp4")
Example #31
0
def main(args):
    print(args)
    clips = map(VideoFileClip, args.file)
    final_clip = clips_array([clips])
    final_clip.write_videofile(args.output, fps=15)
axB.axis('off')

duration = 2
delta_ang = 360/duration
elevation = 20  # Elevation of view
X, Y, Z = axes3d.get_test_data()
ax.plot_surface(X, Y, Z, cmap=cm.jet)

for i in np.arange(0, 360, 90):
    start_ang = i
    animation = mpy.VideoClip(make_frame_mpl, duration=duration)
    fname = "sinc_mpl_"+str(int(i))+".gif"
    animation.write_gif(fname, fps=20)

blackanim = mpy.VideoClip(make_black_frame, duration=duration)
blackanim.write_gif("sinc_mpl_black.gif", fps=20)

xanim = mpy.VideoClip(make_x_frame, duration=duration)
xanim.write_gif("sinc_mpl_x.gif", fps=20)
clip0 = mpy.VideoFileClip("sinc_mpl_0.gif")
clip1 = mpy.VideoFileClip("sinc_mpl_90.gif").rotate(90)
clip2 = mpy.VideoFileClip("sinc_mpl_180.gif").rotate(180)
clip3 = mpy.VideoFileClip("sinc_mpl_270.gif").rotate(-90)
clipB = mpy.VideoFileClip("sinc_mpl_black.gif")
clipX = mpy.VideoFileClip("sinc_mpl_x.gif")
hologram = mpy.clips_array([[clipB, clip2, clipB],
                            [clip3, clipX, clip1],
                            [clipB, clip0, clipB]])

hologram.write_gif("hologram_test_black.gif", fps=20)
videofile = sys.argv[2]

print ("Videofile %s" % str(videofile))

### START MIRROR EFFECT

if sys.argv[1] == '-m':

 from moviepy.editor import VideoFileClip, clips_array, vfx
 clip1 = VideoFileClip(sdir + videofile).margin(10) # add 10px contour
 clip2 = clip1.fx( vfx.mirror_x)
 clip3 = clip1.fx( vfx.mirror_y)
 clip4 = clip1.fx( vfx.mirror_y)
 clip4 = clip4.fx( vfx.mirror_x)
 #clip4 = clip1.resize(0.60) # downsize 60%
 final_clip = clips_array([[clip1, clip2],
                          [clip3, clip4]])
 final_clip.resize(width=1280).write_videofile(ddir + "mirror.mp4")


### END MIRROR EFFECT


### START EDIT MOVIE EDITS ALL IN ONE DIR CURRENTLY

if sys.argv[1] == '-e':

 os.listdir(sdir)

 from os import listdir
 from os.path import isfile, join
 onlyfiles = [ f for f in listdir(sdir) if isfile(join(sdir,f)) ]