Esempio n. 1
0
def show_padded_cuts(video, cuts, n_repeat=1, pad_sec=1.0):
    dt = 1.0 / video.fps
    black = mpe.ColorClip(video.size, (0, 0, 0))
    accept = [None for _ in cuts]
    i = 0
    while i < len(cuts):
        t = cuts[i]
        t0, t1 = t - pad_sec, t + pad_sec
        c = video.subclip(t0, t1)

        for _ in range(n_repeat):
            c.preview(fps=video.fps)

        u = get_user_decision(video.size)
        if u == 'y':
            print("accepting cut")
            accept[i] = True
            i += 1
        elif u == 'n':
            print("rejecting cut")
            accept[i] = False
            i += 1
        elif u == 'm':
            print("marking cut for manual attention")
            # TODO: do it
            i += 1
        elif u == 'r':
            print("replaying")
        else:
            assert False
    return accept
Esempio n. 2
0
def blueInfo(message,
             wsize,
             start=0,
             cenpos=None,
             duration=9,
             bcol=None,
             align='center'):
    sx, sy = wsize

    if cenpos == None:
        cenpos = (sx / 2, sy / 2)
    px, py = cenpos
    dx = min(px, sx - px)
    res = mped.TextClip(message,
                        font='Courier-bold',
                        fontsize=dx * 2.5 / longLine(message),
                        color="blue",
                        align=align).set_duration(duration)
    rsx, rsy = res.size

    if bcol == None:
        return res.set_position((px - rsx / 2, py - rsy / 2)).set_start(start)

    # add Background Square
    colClip = mped.ColorClip(res.size, bcol, duration=duration)

    return mped.CompositeVideoClip([colClip, res], size=res.size).set_position(
        (px - rsx / 2, py - rsy / 2)).set_start(start)
Esempio n. 3
0
def _MakeActivationVideoOneLayer(m, clip_dict, layer_no):
    labels = ["conv1", "conv2", "conv3", "fc", "output"]
    scales = [1.5, 2.0, 2.0, 0.5, 1.5]

    #get game frames
    clip1 = clip_dict['frames']

    #get activations from one layer
    layer_name = m.layers[layer_no]['name']
    clip2 = clip_dict[layer_name]
    clip2_scale = scales[layer_no]
    clip2 = clip2.resize(clip2_scale)

    #calculate size of background canvas
    total_size_x = clip1.size[0] + clip2.size[0]
    total_size_y = max(clip1.size[1], clip2.size[1])

    #create background canvas
    bg_clip = mpy.ColorClip(size=(total_size_x, total_size_y),
                            color=(255, 255, 255))

    duration = clip2.duration

    #align clips on canvas
    clip1 = clip1.set_position(pos=(0, "center"))
    clip2 = clip2.set_position(pos=((total_size_x - clip2.size[0], "center")))

    clip_list = [bg_clip, clip1, clip2]

    #composite together
    cc = mpy.CompositeVideoClip(clip_list,
                                (total_size_x, total_size_y)).subclip(
                                    0, duration)
    #cc.ipython_display()
    return cc
Esempio n. 4
0
 def make_frame(self, t):
     i, r = divmod(t, 1)
     i = int(i)
     tree = self.interpolate(i, i + 1, r)
     fg = self.tree_to_clip(tree)
     bg = mpy.ColorClip((2560, 1440), (.5, .5, .5), duration=1)
     return mpy.CompositeVideoClip([bg, fg]).make_frame(0)
Esempio n. 5
0
    def _create_background(self):
        """
        Get clip showing background color over the hole duration of the video.
        """

        clip = mpy.ColorClip(
            size=(gui_conv.splitXY(self._Settings['format'])),
            col=self._BgColor,
            duration=self._WpInst.getDuration()
        )
        return clip
Esempio n. 6
0
 def add_spacer(self, text: str, duration_s: float) -> None:
     """Add a text spacer to the start of the clip."""
     assert self.has_video
     # create a black screen, of duration_s seconds.
     color = med.ColorClip(size=(self.width, self.height), color=(0, 0, 0), duration=duration_s)
     color = color.set_fps(Moviepy.DEFAULT_FPS)  # pylint: disable=assignment-from-no-return
     spacer = Moviepy.draw_text_in_box_on_video(
         color, text, duration_s, self.width, self.height, box_height=self.get_box_height(),
         move=True, top=False, on_box=False
     )
     self.clip = med.concatenate_videoclips([spacer, self.clip])
Esempio n. 7
0
def codeBox(message, rect, fontsize, duration=9):
    x, y, w, h = rect
    bak = mped.ColorClip((w, h), (255, 255, 255), duration=duration)

    t = mped.TextClip(message,
                      font='Courier-bold',
                      fontsize=fontsize,
                      color="blue",
                      method="caption",
                      align="west").set_duration(duration)

    return mped.CompositeVideoClip([bak, t], size=(w, h)).set_pos((x, y))
Esempio n. 8
0
def _MakeActivationVideo(m, clip_dict):
    composite_size = (550, 1000)

    clip_list = []
    clip_list.append(mpy.ColorClip(size=composite_size, color=(255, 255, 255)))

    labels = ["obs", "conv1", "conv2", "conv3", "fc", "output"]
    scales = [1.0, 1.5, 2.0, 2.0, 0.5, 1.5]

    x_pos = 350
    y_pos = 25
    padding = 50
    label_fontsize = 20

    layers = m.layers.copy()
    layers.insert(0, {'name': 'observations'})

    for layer_idx in range(len(labels)):
        layer_name = layers[layer_idx]['name']

        #get clip and resize it
        clip = clip_dict[layer_name]
        clip = clip.resize(scales[layer_idx])

        #calculate where to place it
        _x_pos = x_pos - 0.5 * clip.size[0]
        _y_pos = y_pos
        clip = clip.set_position((_x_pos, _y_pos))

        txtClip = mpy.TextClip(labels[layer_idx],
                               color='black',
                               fontsize=label_fontsize)
        txtPos = (x_pos - 0.5 * txtClip.size[0], y_pos - txtClip.size[1])
        clip_list.append(txtClip.set_position(txtPos))

        #offset coordinates
        y_pos += clip.size[1]
        y_pos += padding
        clip_list.append(clip)

    duration = clip.duration

    clip_list.append(clip_dict['frames'].set_position((50, 580)))
    #clip_list.append(clip_dict['observations'].set_position((0,50)))

    cc = mpy.CompositeVideoClip(clip_list, composite_size).subclip(0, duration)
    #cc.ipython_display()
    return cc
    def apply(self, clip, startTime, asset):
        image_clip = moviepy.video.VideoClip.ImageClip(asset.url,
                                                       duration=self.duration)

        mask = mpe.ColorClip(image_clip.size,
                             color=1,
                             ismask=True,
                             duration=image_clip.duration)
        mask = moviepy.video.fx.all.fadein(
            mask, (self.duration - self.solid_duration) / 2, initial_color=0)
        mask = moviepy.video.fx.all.fadeout(
            mask, (self.duration - self.solid_duration) / 2, final_color=0)

        image_clip = image_clip.set_mask(mask)

        image_clip = image_clip.set_pos(('center', 'center'))
        image_clip = image_clip.set_start(startTime)
        return editor.CompositeVideoClip([clip, image_clip])
Esempio n. 10
0
def approve_clips(video, clips):
    black = mpe.ColorClip(video.size, (0, 0, 0))
    accept = [KEEP for _ in clips]
    for i, c in enumerate(clips):
        clip = video.subclip(*c)

        def event_handler(ev):
            if ev.type == pygame.KEYDOWN:
                if ev.key in [pygame.K_DELETE, pygame.K_BACKSPACE]:
                    print("rejecting video")
                    accept[i] = REJECT
                if ev.key == pygame.K_m:
                    print("marking video for editing")
                    accept[i] = EDIT

        clip.preview(fps=video.fps, event_handler=event_handler)
        black.show()
        time.sleep(1)
    return accept
Esempio n. 11
0
def generate_blank(clip_data, start=None, end=None, compressed=False):

    if start is None:
        start = (clip_data.get('startTime'))

    if end is None:
        end = (clip_data.get('endTime'))

    dur = end - start

    vid_size = (852, 480) if compressed else (1920, 1080)

    blank_clip = myp.ColorClip(size=vid_size, color=(0, 0, 0), duration=dur)
    audio = myp.AudioFileClip(os.path.join(resource_path, music_list[0]))

    blank_clip = blank_clip.set_audio(audio.set_duration(dur))
    blank_clip.fps = 24

    return blank_clip
Esempio n. 12
0
def movie_grid(clip_dict,x_labels,y_labels,grid_sz_x,grid_sz_y,label_padding=50,padding=5,label_fontsize=20):
    key = list(clip_dict.keys())[0]
    exemplar = clip_dict[key]
    size_x,size_y = exemplar.size
    duration = exemplar.duration

    x_step = (size_x+padding)
    y_step = (size_y+padding)

    composite_size = (label_padding + x_step * grid_sz_x), (label_padding + y_step * grid_sz_y)

    #load in all the movie clips
    for _x in range(grid_sz_x):
        for _y in range(grid_sz_y):
            pos =(label_padding + _x*x_step,label_padding + _y*y_step)
            clip_dict[(_x,_y)] = clip_dict[(_x,_y)].set_position(pos)
            #clip.write_gif(o_video_fn)

    clip_list = []
    #add background clip
    clip_list.append(mpy.ColorClip(size=composite_size, color=(255,255,255)))

    #now add x and y labels
    l_idx = 0
    if y_labels != None:
        for label in y_labels:
            txtClip = mpy.TextClip(label,color='black', fontsize=label_fontsize).set_position((0,label_padding+y_step*l_idx+(y_step/2)))
            l_idx+=1
            clip_list.append(txtClip)

    l_idx = 0
    if x_labels != None:
        for label in x_labels:
            txtClip = mpy.TextClip(label,color='black', fontsize=label_fontsize).set_position((label_padding+x_step*l_idx,label_padding/2))
            l_idx+=1
            clip_list.append(txtClip)
    
    for key in clip_dict:
        clip_list.append(clip_dict[key])
    
    cc = mpy.CompositeVideoClip(clip_list,composite_size)
    return cc
def side_by_side_clips(clip1, clip2):
    #calculate size of background canvas
    total_size_x = clip1.size[0] + clip2.size[0]
    total_size_y = max(clip1.size[1], clip2.size[1])

    #create background canvas
    bg_clip = mpy.ColorClip(size=(total_size_x, total_size_y),
                            color=(255, 255, 255))

    duration = clip2.duration

    #align clips on canvas
    clip1 = clip1.set_position(pos=(0, "center"))
    clip2 = clip2.set_position(pos=((total_size_x - clip2.size[0], "center")))

    clip_list = [bg_clip, clip1, clip2]

    #composite together
    cc = mpy.CompositeVideoClip(clip_list,
                                (total_size_x, total_size_y)).subclip(
                                    0, duration)
    return cc
Esempio n. 14
0
def createvid(description, image_temp_list, fps=24, duration=0.1):
    blackbg = me.ColorClip((720, 720), (0, 0, 0))

    clips = [
        me.ImageClip(m.name + ".png", duration=duration)
        for m in image_temp_list
    ]
    for img in image_temp_list:
        img.close()
    concat_clip = me.concatenate_videoclips(clips,
                                            method="compose").set_position(
                                                ('center', 'center'))
    if description == "start song":
        description = " "
    if len(description) > 35:
        description = fill(description, 35)

    txtClip = me.TextClip(description,
                          color='white',
                          fontsize=30,
                          font='Amiri-regular').set_position('center')
    txt_col = txtClip.on_color(size=(blackbg.w, txtClip.h + 10),
                               color=(0, 0, 0),
                               pos=('center', 'center'),
                               col_opacity=0.8)

    txt_mov = txt_col.set_position((0, blackbg.h - 20 - txtClip.h))
    comp_list = [blackbg, concat_clip, txt_mov]
    final = me.CompositeVideoClip(comp_list).set_duration(concat_clip.duration)

    with tempfile.NamedTemporaryFile() as video_tempfile:
        final.write_videofile(video_tempfile.name + ".mp4", fps=fps)
        video_tempfile.seek(0)

        for clip in clips:
            clip.close()
        for clip in comp_list:
            clip.close()
        return video_tempfile
Esempio n. 15
0
def convert(data, outfile='composition.mp4'):
    '''Converts popcorn track list into an mp4'''

    clips = []

    # generate background color clip
    # convert hex color from popcorn to an RGB tuple
    bg_color = data.get('backgroundColor', '#000000').lstrip('#')
    bg_color = tuple(int(bg_color[i:i + 2], 16) for i in (0, 2, 4))
    total_duration = float(data['duration'])
    background_clip = mpy.ColorClip(size=(WIDTH, HEIGHT),
                                    col=bg_color).set_duration(total_duration)
    clips.append(background_clip)

    # generate moviepy clips from popcorn options
    tracks = sorted(data['tracks'], key=lambda k: k['order'], reverse=True)
    for track in tracks:
        for event in track['trackEvents']:
            clip = create_clip(event['type'], event['popcornOptions'])
            clips.append(clip)

    # ignore track events that create_clip can't handle
    clips = [c for c in clips if c is not None]

    # composite the video
    video = mpy.CompositeVideoClip(clips, size=(1280, 720))

    tmpoutfile = outfile + '.temp.mp4'

    video.write_videofile(tmpoutfile,
                          fps=24,
                          codec='libx264',
                          audio_codec='aac',
                          temp_audiofile='temp-audio.m4a',
                          remove_temp=True)

    shutil.move(tmpoutfile, outfile)

    return outfile
def create_video(header, declarations, effects, output, music="music_1.mp3"):
    # header = ((800, 600), 10.0)
    # declaration = ("pizdec", asset.Image("img/02.jpg"))
    # effect = (0.5, 1, "pizdec", asset_effects.Fade(0.5, False))

    clip = mpe.ColorClip(header[0], color=(0, 0, 0), duration=header[1])
    clip.fps = header[2]

    all_assets = dict()
    for decl in declarations:
        if (decl[0] in all_assets):
            raise Exception("Asset IDs must be unique!")
        else:
            all_assets[decl[0]] = decl[1]

    effects.sort(key=operator.itemgetter(1, 0))

    for effect in effects:
        clip = effect[3].apply(clip, effect[0], all_assets[effect[2]])

    audio = mpe.AudioFileClip(music)
    audio = audio.set_duration(clip.duration + 0.5)
    clip = clip.set_audio(audio)
    clip.write_videofile(output)
Esempio n. 17
0
 def create_color_clip(size: Tuple[int, int], color: Tuple[int, int, int], duration: float) -> med.ColorClip:
     """Create a new color clip with a valid FPS."""
     clip = med.ColorClip(size=size, color=color, duration=duration)
     # Need to select something as the fps (colorclip has no inherent framerate)
     clip = clip.set_fps(Moviepy.DEFAULT_FPS)  # pylint: disable=assignment-from-no-return
     return clip
Esempio n. 18
0
def generate_blank_video(ar, rgb, dur):
    return mp.ColorClip(size=ar, color=rgb, duration=dur)
Esempio n. 19
0
def rollout_grid(env,algos,run_ids,tag='final',clip_resize=0.5,label_fontsize=20,out_fn="composite.mp4",video_cache=".",length=None):

    clip_dict = {}
    key = None
    for algo in algos:
        for run_id in run_ids:
            key = (algo,run_id)
            clip_dict[key] = load_clip_from_cache(algo,env,run_id,tag,video_cache).resize(clip_resize)
            
    exemplar = clip_dict[key]
    size_x,size_y = exemplar.size
    duration = exemplar.duration

    #labels for grid
    y_labels = [("R%d"% r) for r in run_ids] 
    x_labels= algos

    label_padding = 50
    padding = 5

    num_runs = len(run_ids)

    x_step = (size_x+padding)
    y_step = (size_y+padding)

    composite_size = (label_padding + x_step * len(algos), label_padding + y_step * num_runs)

    algo_idx = 0

    #load in all the movie clips
    for algo in algos:
        for run_id in run_ids:
            pos =(label_padding + algo_idx*x_step,label_padding + (run_id-1)*y_step)
            clip_dict[(algo,run_id)] = clip_dict[(algo,run_id)].set_position(pos)
            #clip.write_gif(o_video_fn)
        
            print(env,algo,run_id)
        
        algo_idx+=1
    

    clip_list = []
    #add background clip
    clip_list.append(mpy.ColorClip(size=composite_size, color=(255,255,255)))

    #now add x and y labels
    l_idx = 0
    for label in y_labels:
        txtClip = mpy.TextClip(label,color='black', fontsize=label_fontsize).set_position((0,label_padding+y_step*l_idx+(y_step/2)))
        l_idx+=1
        clip_list.append(txtClip)

    l_idx = 0
    for label in x_labels:
        txtClip = mpy.TextClip(label,color='black', fontsize=label_fontsize).set_position((label_padding+x_step*l_idx,label_padding/2))
        l_idx+=1
        clip_list.append(txtClip)

    
    for key in clip_dict:
        clip_list.append(clip_dict[key])
    
    cc = mpy.CompositeVideoClip(clip_list,composite_size)

    if length!=None:
        duration = length

    cc = cc.resize(1.0).subclip(0,duration)

    if out_fn != None:
        cc.write_videofile(out_fn)

    return cc,clip_dict
def make_video(config_file, *, framerate=None, sync=False):
    configuration = json.load(open(config_file))
    try:
        race_data = RaceData(configuration['source_telemetry'])
        result_data = RaceData(configuration['source_telemetry'])
    except KeyError:
        sys.exit("Configuration Error: Source Telemetry not found.")

    try:
        output_prefix = os.path.splitext(configuration['output_video'])[0]
    except KeyError:
        sys.exit("Configuration Error: Output Video not found.")

    try:
        champion = configuration['show_champion']
    except KeyError:
        champion = False

    if os.environ.get('HEADINGFONTOVERRIDE') is not None:
        configuration['heading_font'] = \
            os.environ['HEADINGFONTOVERRIDE']
    if os.environ.get('DISPLAYFONTOVERRIDE') is not None:
        configuration['font'] = os.environ['DISPLAYFONTOVERRIDE']

    try:
        video_skipstart = configuration['video_skipstart']
    except KeyError:
        video_skipstart = 0

    try:
        video_skipend = configuration['video_skipend']
    except KeyError:
        video_skipend = None

    if 'source_video' in configuration \
            and configuration['source_video'] is not None:
        source_video = mpy.VideoFileClip(
            configuration['source_video']
        ).subclip(
            video_skipstart,
            video_skipend)
        if framerate is None:
            framerate = source_video.fps
    else:
        time_data = RaceData(configuration['source_telemetry'])
        with tqdm(desc="Detecting Telemetry Duration") as progress:
            while True:
                try:
                    _ = time_data.get_data()
                    progress.update()
                except StopIteration:
                    break
        source_video = mpy.ColorClip((1280, 1024)).set_duration(
            time_data.elapsed_time)

        if framerate is None:
            framerate = 30

    pcre_standings = GTStandings(
        race_data,
        ups=framerate,
        **configuration)
    standings_clip_mask = mpy.VideoClip(
        make_frame=pcre_standings.make_mask_frame,
        ismask=True)
    standings_clip = mpy.VideoClip(
        make_frame=pcre_standings.make_frame
    ).set_mask(standings_clip_mask)

    if sync:
        def timecode_frame(time):
            """
            Custom make frame for timecode.
            """
            timecode_image = Image.new('RGB', (100, 40))
            draw = ImageDraw.Draw(timecode_image)
            draw.text((10, 10), "%.02f" % time)
            return PIL_to_npimage(timecode_image)

        timecode_clip = mpy.VideoClip(
            timecode_frame,
            duration=source_video.duration
        ).set_position(('center', 'top'))

        first_lap_data = RaceData(configuration['source_telemetry'])
        with tqdm(desc="Detecting Video Start") as progress:
            while not any(
                    [x.laps_complete > 0
                     for x in first_lap_data.drivers.values()]):
                _ = first_lap_data.get_data()
                progress.update()

        start_time = first_lap_data.elapsed_time - 10
        end_time = None

        with tqdm(desc="Detecting Video End") as progress:
            while not all(
                    [x.laps_complete > 0
                     for x in first_lap_data.drivers.values()]):
                try:
                    _ = first_lap_data.get_data()
                    progress.update()
                except StopIteration:
                    end_time = start_time + 60
                    break

        if end_time is None:
            end_time = first_lap_data.elapsed_time + 10

        main_event = mpy.CompositeVideoClip(
            [source_video, standings_clip, timecode_clip]
        ).set_duration(
            source_video.duration
        ).subclip(start_time, end_time)

    else:
        main_event = mpy.CompositeVideoClip(
            [source_video, standings_clip]
        ).set_duration(source_video.duration)

    pcre_starting_grid = StartingGrid(
        sorted(race_data.starting_grid, key=lambda x: x.position),
        size=source_video.size,
        **configuration)
    Image.fromarray(pcre_starting_grid.to_frame()).save(
        output_prefix + "_starting_grid.png")
    starting_grid = mpy.ImageClip(
        pcre_starting_grid.to_frame()).set_duration(5)

    while True:
        try:
            result_data.get_data()
        except StopIteration:
            break

    end_titles = list()

    if 'car_classes' in configuration and len(configuration['car_classes']):
        no_points_config = copy(configuration)
        try:
            del no_points_config['point_structure']
        except KeyError:
            pass

        pcre_results = RaceResultsWithChange(
            sorted(result_data.all_driver_classification,
                   key=lambda x: x.position),
            result_data.starting_grid,
            size=source_video.size,
            **no_points_config)
        Image.fromarray(pcre_results.to_frame()).save(
            output_prefix + '_results.png')
        results = mpy.ImageClip(pcre_results.to_frame()).set_duration(20)

        end_titles.append(results)

        for car_class_filter in [
                car_class for car_class in configuration['car_classes']
                if car_class != ""]:
            class_cars = [
                car for car_class, car_class_data
                in configuration['car_classes'].items()
                if car_class == car_class_filter
                for car in car_class_data['cars']]
            class_drivers = [
                driver for driver, data
                in configuration['participant_config'].items()
                if data['car'] in class_cars]

            class_classification = [
                classification for classification
                in result_data.all_driver_classification
                if classification.driver_name in class_drivers]
            for position, classification in enumerate(sorted(
                    class_classification,
                    key=lambda x: x.position), 1):
                classification.position = position

            class_starting_grid = [
                grid for grid
                in result_data.starting_grid
                if grid.driver_name in class_drivers]
            for position, grid in enumerate(sorted(
                    class_starting_grid,
                    key=lambda x: x.position), 1):
                grid.position = position

            pcre_results = RaceResultsWithChange(
                sorted(class_classification, key=lambda x: x.position),
                class_starting_grid,
                size=source_video.size,
                **configuration)
            Image.fromarray(pcre_results.to_frame()).save(
                output_prefix + '_' + car_class_filter + '_results.png')
            results = mpy.ImageClip(pcre_results.to_frame()).set_duration(20)
            end_titles.append(results)
    else:
        pcre_results = RaceResultsWithChange(
            sorted(result_data.all_driver_classification,
                   key=lambda x: x.position),
            result_data.starting_grid,
            size=source_video.size,
            **configuration)
        Image.fromarray(pcre_results.to_frame()).save(
            output_prefix + '_results.png')
        results = mpy.ImageClip(pcre_results.to_frame()).set_duration(20)
        end_titles.append(results)

    try:
        if any(configuration['point_structure']):
            if not any([
                    x['points'] for x
                    in configuration['participant_config'].values()]):
                if 'car_classes' in configuration \
                        and len(configuration['car_classes']):
                    for car_class_filter in [car_class for car_class in
                                             configuration['car_classes'] if
                                             car_class != ""]:
                        class_cars = [
                            car for car_class, car_class_data
                            in configuration['car_classes'].items()
                            if car_class == car_class_filter
                            for car in car_class_data['cars']]
                        class_drivers = [
                            driver for driver, data
                            in configuration['participant_config'].items()
                            if data['car'] in class_cars]

                        class_classification = [
                            classification for classification
                            in result_data.all_driver_classification
                            if classification.driver_name in class_drivers]
                        for position, classification in enumerate(sorted(
                                class_classification,
                                key=lambda x: x.position), 1):
                            classification.position = position

                        pcre_series_standings = SeriesStandings(
                            class_classification,
                            size=source_video.size,
                            **configuration)
                        Image.fromarray(pcre_series_standings.to_frame()).save(
                            output_prefix
                            + '_'
                            + car_class_filter
                            + '_series_standings.png')
                        series_standings = mpy.ImageClip(
                            pcre_series_standings.to_frame()).set_duration(20)
                        end_titles.append(series_standings)
                else:
                    pcre_series_standings = SeriesStandings(
                        result_data.all_driver_classification,
                        size=source_video.size,
                        **configuration)
                    Image.fromarray(pcre_series_standings.to_frame()).save(
                        output_prefix + '_series_standings.png')
                    series_standings = mpy.ImageClip(
                        pcre_series_standings.to_frame()).set_duration(20)
                    end_titles.append(series_standings)
            else:
                if 'car_classes' in configuration \
                        and len(configuration['car_classes']):
                    for car_class_filter in [car_class for car_class in
                                             configuration['car_classes'] if
                                             car_class != ""]:
                        class_cars = [
                            car for car_class, car_class_data
                            in configuration['car_classes'].items()
                            if car_class == car_class_filter
                            for car in car_class_data['cars']]
                        class_drivers = [
                            driver for driver, data
                            in configuration['participant_config'].items()
                            if data['car'] in class_cars]

                        class_classification = [
                            classification for classification
                            in result_data.all_driver_classification
                            if classification.driver_name in class_drivers]
                        for position, classification in enumerate(sorted(
                                class_classification,
                                key=lambda x: x.position), 1):
                            classification.position = position

                        pcre_series_standings = SeriesStandingsWithChange(
                            class_classification,
                            size=source_video.size,
                            **configuration)
                        Image.fromarray(pcre_series_standings.to_frame()).save(
                            output_prefix
                            + '_'
                            + car_class_filter
                            + '_series_standings.png')
                        series_standings = mpy.ImageClip(
                            pcre_series_standings.to_frame()).set_duration(20)
                        end_titles.append(series_standings)

                else:
                    pcre_series_standings = SeriesStandingsWithChange(
                        result_data.all_driver_classification,
                        size=source_video.size,
                        **configuration)
                    Image.fromarray(pcre_series_standings.to_frame()).save(
                        output_prefix + '_series_standings.png')
                    series_standings = mpy.ImageClip(
                        pcre_series_standings.to_frame()).set_duration(20)
                    end_titles.append(series_standings)
    except KeyError:
        try:
            _ = configuration['point_structure']
            pcre_series_standings = SeriesStandings(
                result_data.all_driver_classification,
                size=source_video.size,
                **configuration)

            Image.fromarray(pcre_series_standings.to_frame()).save(
                output_prefix + '_series_standings.png')
            series_standings = mpy.ImageClip(
                pcre_series_standings.to_frame()).set_duration(20)

            end_titles.append(series_standings)
        except KeyError:
            pass

    if champion:
        if 'car_classes' in configuration and len(configuration['car_classes']):
            for car_class_filter in [car_class for car_class in
                                     configuration['car_classes'] if
                                     car_class != ""]:
                class_cars = [
                    car for car_class, car_class_data
                    in configuration['car_classes'].items()
                    if car_class == car_class_filter
                    for car in car_class_data['cars']]
                class_drivers = [
                    driver for driver, data
                    in configuration['participant_config'].items()
                    if data['car'] in class_cars]

                class_classification = [
                    classification for classification
                    in result_data.all_driver_classification
                    if classification.driver_name in class_drivers]
                for position, classification in enumerate(sorted(
                        class_classification,
                        key=lambda x: x.position), 1):
                    classification.position = position

                pcre_series_standings = SeriesChampion(
                    class_classification,
                    size=source_video.size,
                    **configuration)
                Image.fromarray(pcre_series_standings.to_frame()).save(
                    output_prefix
                    + '_'
                    + car_class_filter
                    + '_series_champion.png')
                series_standings = mpy.ImageClip(
                    pcre_series_standings.to_frame()).set_duration(20)
                end_titles.append(series_standings)
        else:
            pcre_series_champion = SeriesChampion(
                result_data.all_driver_classification,
                size=source_video.size,
                **configuration)
            Image.fromarray(pcre_series_champion.to_frame()).save(
                output_prefix + '_series_champion.png')
            series_champion = mpy.ImageClip(
                pcre_series_champion.to_frame()).set_duration(20)
            end_titles.append(series_champion)

    output = mpy.concatenate_videoclips(
        [starting_grid.fadeout(1), main_event]
        + [clip.fadein(1).fadeout(1) for clip in end_titles[:-1]]
        + [end_titles[-1].fadein(1)], method="compose")

    output.write_videofile(configuration['output_video'], fps=float(framerate))
Esempio n. 21
0
def make_thumbnail(text, path, max_steps=30):
    print('Preparing tumbnail image.')
    asset_path = r'assets\thumbnail_images/'
    offset = 55
    text_clips = []
    blue_text_color = '#009dff'

    wrap_width = 40
    font_size = 70
    correct = False

    files = os.listdir(r'assets\thumbnail_images/')
    background_files = os.listdir(r'assets\thumbnail_backgrounds/')
    background = mpy.ColorClip((1280, 720), color=(26, 26, 27)).set_duration(3)
    background_image = vfx.mask_color(mpy.ImageClip(
        r'assets\thumbnail_backgrounds/' +
        background_files[np.random.randint(0, len(background_files))]),
                                      color=[26, 26, 27])

    image = vfx.mask_color(
        mpy.ImageClip(asset_path +
                      files[np.random.randint(0, len(files))]).set_position(
                          ('right',
                           'center')).set_duration(background.duration),
        color=[26, 26, 27])

    title_text = mpy.TextClip('r/Askreddit asks:',
                              fontsize=120,
                              color=blue_text_color,
                              font='Verdana-bold',
                              stroke_color='black',
                              stroke_width=100 * 0.06).set_position((50, 20))
    colors = ['#CF6363', '#71DEC6', 'yellow', 'orange']
    # max_steps = max_steps
    step = 0
    if text[-1] != '?':
        text += "?"

    while not correct:
        selected_color = colors[np.random.randint(0, len(colors))]
        text_clips = []
        max_xpos = 0
        xpos = offset
        ypos = title_text.h + offset
        wrapped_text = ' <NEWLINE> '.join(textwrap.wrap(text,
                                                        width=wrap_width))
        word_split_text = wrapped_text.split(' ')
        quote = False

        for i in range(len(word_split_text)):
            # print(word_split_text[i])
            if word_split_text[i].__contains__('<NEWLINE>'):
                # print('newline detected')
                xpos = offset
                ypos += text_clips[-1].h
            else:
                # TODO: add a special color for anything in quotation marks

                if len(word_split_text[i]) > 5:
                    color = selected_color
                else:
                    color = 'white'
                text_clips.append(
                    mpy.TextClip((word_split_text[i] + ' '),
                                 font='Verdana-bold',
                                 fontsize=font_size,
                                 color=color,
                                 align='west',
                                 stroke_color='black',
                                 stroke_width=font_size * 0.06).set_duration(
                                     background.duration).set_pos(
                                         (xpos, ypos)))

                xpos += text_clips[-1].w
                if xpos > max_xpos:
                    max_xpos = xpos

        step += 1
        # print('step: ', step, '   X and Y: ', max_xpos, ypos)
        max_ypos = 600
        if step >= max_steps:
            print('Max steps reached')
            break
        elif max_xpos > 1000:
            wrap_width -= 3
        elif ypos > max_ypos:
            font_size -= 3
            # print('font size = ', font_size)
        elif ypos < max_ypos - 100:
            font_size += 2
        elif xpos < 1000 - 100:
            wrap_width += 1
        else:
            correct = True

    clip_list = [background] + [title_text] + [background_image
                                               ] + [image] + text_clips
    out = mpy.CompositeVideoClip(clip_list)
    out.save_frame(path + 'thumbnail.png', t=2)
Esempio n. 22
0

# comment_list = remove_greater_depth_comments(submission.comments.list())
post_data = get_top_comments_and_replies(
    remove_greater_depth_comments(submission.comments.list()))
post_data = post_data[0:number_of_comments]

iv, ia, start_time, intro_image, auth_text = prepare_intro_clips(
    YouBOT_session_data['post_data'])

v, a, up_img, down_img, bottom_bar_img, auth, corner_logo, trans_clips, ups_text, t, = prepare_all_comment_clips(
    post_data['comments'].to_list(), post_data['replies'].to_list(),
    start_time)

ov, oa, ot, outro_image = prepare_outro_clips(t)
background = mpy.ColorClip(
    (width, height), color=(26, 26, 27)).set_duration(t + outro_image.duration)

combined = mpy.CompositeVideoClip([background] + corner_logo + v + [iv] +
                                  [ov] + [auth_text] + [intro_image] +
                                  [outro_image] + up_img + down_img +
                                  bottom_bar_img + auth + ups_text +
                                  trans_clips).set_audio(
                                      mpy.CompositeAudioClip(a + [ia] + [oa]))

background_music = prepare_background_music(combined.duration)

final_audio = mpy.CompositeAudioClip([combined.audio, background_music])
combined = combined.set_audio(final_audio)

# title = subreddit_name + ', asks: ' + YouBOT_session_data['post_data'].title
Esempio n. 23
0
    def draw_text_in_box_on_video(video: med.VideoFileClip, video_text: str,
                                  length: float, width: int, height: int, box_height: int, move: bool,
                                  top: bool, on_box: bool = True, center: bool = False,
                                  vpos: Optional[float] = None,
                                  interval: Optional[Tuple[float, float]] = None,
                                  fontsize=30) -> med.CompositeVideoClip:
        """Draws a semi-transparent box either at the top or bottom and writes text in it, optionally scrolling by"""
        clips = []

        y_location = 0 if top else height - box_height
        y_location = height // 2 if center else y_location

        if vpos is not None:
            y_location = int(height * vpos)

        video_w, _ = video.size

        if on_box:
            color_clip = med.ColorClip(size=(video_w, box_height), color=(0, 0, 0))
            color_clip = color_clip.set_fps(Moviepy.DEFAULT_FPS)  # pylint: disable=assignment-from-no-return

            color_clip = color_clip.set_opacity(0.5)  # pylint: disable=assignment-from-no-return
            color_clip = color_clip.set_position(pos=(0, y_location))
            clips.append(color_clip)

        stroke_color = 'black' if not on_box else None
        txt = med.TextClip(video_text, font='Bauhaus-93', color='white', stroke_color=stroke_color, fontsize=fontsize)

        txt_y_location = (box_height - txt.h) // 2 + y_location

        if center:
            txt_left_margin = (width - txt.w) // 2
        else:
            txt_left_margin = 20

        # pylint: disable=assignment-from-no-return
        if move:
            txt_mov = txt.set_position(lambda t: (max(txt_left_margin,
                                                      round(video_w - video_w * t / float(length))), txt_y_location))
        else:
            txt_mov = txt.set_position((txt_left_margin, txt_y_location))
        # pylint: enable=assignment-from-no-return
        txt_mov = txt_mov.set_fps(Moviepy.DEFAULT_FPS)  # pylint: disable=assignment-from-no-return

        if interval:
            # Fade text in and out
            fade_duration = 1  # second
            txt_mov = txt_mov.set_duration(interval[1] - interval[0] + fade_duration * 2)
            txt_mov = txt_mov.set_start(max(0, interval[0] - fade_duration))

            txt_mov = txt_mov.fx(vfx.fadein, fade_duration).\
                fx(vfx.fadeout, fade_duration)

        clips.append(txt_mov)

        duration = video.duration
        # Add the input video as the first in the list
        clips = [video] + clips

        # Build a new composition out of the original clip and the text overlay.
        # video = med.CompositeVideoClip(clips, use_bgclip=interval is not None)
        video = med.CompositeVideoClip(clips)
        video.duration = duration
        return video
Esempio n. 24
0
    def export_mv(self, w=1280, h=720, bgm_src='default'):
        crs_info = self.read_crs_info()

        clip_02_src = os.path.join(self.pic_dir, self.crs_name,
                                   self.crs_name + '_clip_02.mp4')
        _clip_02_src = mpy.VideoFileClip(clip_02_src,
                                         target_resolution=(h, w)).set_start(0)
        target_sec = 56 - int(_clip_02_src.duration)

        if _clip_02_src.duration > 44:
            print('第二段影片大于44秒,请先剪裁。')
            sys.exit(0)

        building_ani_src = os.path.join(
            'i:\\乐高\\图纸', self.crs_name,
            self.crs_name + '_building_animation_only.mp4')
        if os.path.exists(building_ani_src):
            print('目录中已存在搭建动画,将用于合并生成视频号影片。')
            clip_01_src = os.path.join(
                self.pic_dir, self.crs_name,
                self.crs_name + '_building_animation_only.mp4')
            _clip_01 = mpy.VideoFileClip(clip_01_src,
                                         target_resolution=(h, w)).set_start(0)
            acc_clip_01 = _clip_01.fl_time(
                lambda t: _clip_01.duration / target_sec * t,
                apply_to=['mask', 'audio'])
            clip_01 = acc_clip_01.set_duration(target_sec)
        else:
            print('目录中无搭建动画,正在生成搭建动画序列……')
            building_ani = BuildAnimation(crs_name=self.crs_name, save_yn='no')
            # _clip_01=building_ani.exp_building_movie(exptype='part',total_sec_for_part=target_sec)
            clip_01 = building_ani.exp_building_movie(
                exptype='part', total_sec_for_part=target_sec)

        # target_sec=10

        # acc_clip_01 = _clip_01.fl_time(lambda t:  _clip_01.duration/target_sec*t, apply_to=['mask', 'audio'])
        # clip_01=acc_clip_01.set_duration(target_sec)

        clip_02 = mpy.VideoFileClip(
            clip_02_src, target_resolution=(h, w)).set_start(clip_01.duration)

        # bg_time=int(clip_01.duration+clip_02.duration)-2*clip_01.duration/_clip_01.duration
        bg_time = int(clip_01.duration + clip_02.duration) - 2
        bg = mpy.ColorClip((430, 720),
                           color=(0, 0, 0),
                           ismask=False,
                           duration=bg_time).set_opacity(0.5).set_position(
                               (850, 0)).set_start(2)

        bg_left = mpy.ColorClip((300, 56),
                                color=(51, 149, 255),
                                ismask=False,
                                duration=bg_time).set_position(
                                    (275, 15)).set_start(2)

        txt_left = '科学机器人课'
        txt_title = self.crs_name[4:]
        txt_tool = '教具:' + crs_info['教具'].values.tolist()[0]
        txt_big_klg = '课程知识点'
        txt_klg = crs_info['知识点'].values.tolist()[0].split('\n')

        clip_left = mpy.TextClip(txt_left,
                                 fontsize=40,
                                 font='j:/fonts/hongMengHei.ttf',
                                 color='#ffffff').set_position(
                                     (310,
                                      18)).set_duration(bg_time).set_start(2)
        clip_title = mpy.TextClip(
            txt_title,
            fontsize=54,
            font='j:/fonts/yousheTitleHei.ttf',
            color='#ffff00').set_position(
                (int(430 / 2) - int(len(txt_title) * 54 / 2) + 860,
                 22)).set_duration(bg_time).set_start(2)
        clip_tool = mpy.TextClip(
            txt_tool,
            fontsize=26,
            font='j:/fonts/yousheTitleHei.ttf',
            color='#ffff00').set_position(
                (int(430 / 2) - int(len(txt_tool) * 26 / 2) + 880,
                 110)).set_duration(bg_time).set_start(2)
        clip_big_klg = mpy.TextClip(
            txt_big_klg,
            fontsize=46,
            font='j:/fonts/HYXinHaiXingKaiW.ttf',
            color='#ffffff').set_position(
                (int(430 / 2) - int(len(txt_big_klg) * 46 / 2) + 850,
                 350)).set_duration(bg_time).set_start(2)
        clip_logo = mpy.ImageClip(self.logo_src).set_fps(25).set_position(
            (20, 650)).set_duration(56).resize(
                (110, int(110 * 253 / 425))).set_start(0)

        clips = [
            clip_01, clip_02, bg, clip_logo, clip_title, clip_tool,
            clip_big_klg
        ]

        for n, text in enumerate(txt_klg):
            clip_klg = mpy.TextClip(
                text,
                fontsize=30,
                font='j:/fonts/HYXinHaiXingKaiW.ttf',
                color='#ffffff',
                align='West').set_position(
                    (890, 430 + n * 48)).set_duration(bg_time).set_start(2)
            clips.append(clip_klg)

        clip_end = mpy.VideoFileClip(
            self.end_clip_src, target_resolution=(h, w)).set_start(bg_time + 2)

        clips_rear = [bg_left, clip_left, clip_end]
        clips.extend(clips_rear)
        finalclip = mpy.CompositeVideoClip(clips)

        if bgm_src == 'default':
            bgm = mpy.AudioFileClip(
                self.bgm_src).set_duration(finalclip.duration -
                                           clip_end.duration).fx(
                                               afx.audio_fadeout, 0.8)
        else:
            bgm = mpy.AudioFileClip(bgm_src).set_duration(
                finalclip.duration - clip_end.duration).fx(
                    afx.audio_fadeout, 0.8)
        final_audio = mpy.CompositeAudioClip([bgm, finalclip.audio])
        mix = finalclip.set_audio(final_audio)

        out_mv = os.path.join(self.pic_dir, self.crs_name,
                              self.crs_name + '_视频号.mp4')
        mix.write_videofile(out_mv)
Esempio n. 25
0
view = canvas.central_widget.add_view()

view.camera = 'turntable'
view.camera.fov = 40
view.camera.distance = 10

xx, yy = np.arange(-1, 1, .02), np.arange(-1, 1, .02)
X, Y = np.meshgrid(xx, yy)
R = np.sqrt(X**2 + Y**2)
Z = lambda t: 0.1 * np.sin(10 * R - 2 * np.pi * t)
surface = scene.visuals.SurfacePlot(x=xx - 0.1,
                                    y=yy + 0.2,
                                    z=Z(0),
                                    shading='smooth',
                                    color=(0.5, 0.5, 1, 1))
view.add(surface)
canvas.show()


def make_frame(t):
    surface.set_data(z=Z(t))  # Update the mathematical surface
    canvas.on_draw(None)  # Update the image on Vispy's canvas
    return _screenshot((0, 0, size_pixels[0], size_pixels[1]))


vispy_clip = jmpy.RGBAVideoClip(make_frame, duration=duration)
bg_clip = mpy.ColorClip(size_pixels, color=(255, 0, 0), duration=duration)

composite = mpy.CompositeVideoClip([bg_clip, vispy_clip])
composite.preview()
Esempio n. 26
0
def background():
    return mpy.ColorClip(size=SIZE,
                         color=BACKGROUND).set_duration(DURATION_TOTAL)