def create_timelapse_video(input_image, image_id, path):

    print("Matched with one of the frame of video with id: " + image_id)

    images = load_frames(path + image_id + ".mp4")
    total_frame = len(images)

    image_files = []
    count = 0

    printProgressBar(0,
                     total_frame,
                     prefix='Progress:',
                     suffix='Complete',
                     length=50)
    for image in images:
        count += 1
        printProgressBar(count + 1,
                         total_frame,
                         prefix='Progress:',
                         suffix='Complete',
                         length=50)
        if not image is None:
            styled_image = color_transfer(image, input_image)
            name = "styled_image" + str(count) + ".jpg"
            cv2.imwrite(name, styled_image)
            read_frame = cv2.imread(name)
            image_files.append(name)

    clip = ImageSequenceClip(image_files, fps=10)
    clip.write_videofile('output.mp4')
    cv2.destroyAllWindows()
    for k in image_files:
        os.system("rm " + k)
    def run(self):
        clip = ImageSequenceClip(self.list, fps=self.fps)
        clip.write_videofile(self.path)  # to video
        # clip.write_gif(self.path.replace("mp4", "gif"))  # to gif

        # """生成gif"""
        # imageio.mimsave('what.gif',self.gif_list)
        self.trigger.emit()
        shutil.rmtree('temp')
示例#3
0
    def test(self):
        total_reward = 0
        for i in range(self.config.conf['test-num']):#
            quat = self.ref_motion.euler_to_quat(0,0,0)
            _ = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'], base_pos_nom=[0,0,1.575], base_orn_nom=quat, fixed_base=True)
            # state = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'], base_pos_nom=[0, 0, 1.175], fixed_base=False)
            q_nom = self.ref_motion.ref_motion_dict()
            base_orn_nom = self.ref_motion.get_base_orn()

            # state = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'], q_nom=q_nom, base_orn_nom=base_orn_nom, base_pos_nom=[0, 0, 1.175], fixed_base=False)
            # self.env._setupCamera()
            self.env.startRendering()
            self.env._startLoggingVideo()

            for step in range(self.max_step_per_episode):
                if step>=2*self.network_freq and step<4*self.network_freq:
                    action = [0,0,0,0,0,0,0,0,0.1,0,0]
                else:
                    action = [0,0,0,0,0,0,0,0,0,0,0]
                # action = np.clip(action, self.config.conf['actor-output-bounds'][0],
                #                  self.config.conf['actor-output-bounds'][1])
                action = np.array([action]) if len(np.shape(action)) == 0 else np.array(action)

                rgb=self.env._render(roll=0,pitch=0,yaw=90)
                print(rgb.shape)
                self.image_list.append(rgb)
                for i in range(self.sampling_skip):
                    # action = self.control.rescale(ref_action, self.config.conf['action-bounds'],
                    #                               self.config.conf['actor-output-bounds'])
                    _,_,_,_ = self.env._step(action)

                    self.logging.add_run('action', action)

                    joint_angle = self.control.get_joint_angle()
                    self.logging.add_run('joint_angle', joint_angle)
                    readings = self.env.getExtendedReading()
                    ob = self.env.getObservation()
                    for l in range(len(ob)):
                        self.logging.add_run('observation' + str(l), ob[l])
                    # for key, value in readings.items():
                    #     self.logging.add_run(key, value)

            self.env._stopLoggingVideo()
            self.env.stopRendering()

        ave_reward = total_reward/self.config.conf['test-num']
        print(ave_reward)
        self.logging.save_run()


        clip = ImageSequenceClip(self.image_list, fps=25)
        clip.write_gif(self.dir_path+'/test.gif')
        clip.write_videofile(self.dir_path+'/test.mp4', fps=25, audio=False)
def make_movie(movie_name,
               input_folder,
               output_folder,
               file_format,
               fps,
               output_format='mp4',
               reverse=False):
    """
    function that makes the movie of the images data

    :param movie_name: name of the movie
    :type movie_name: string
    :param input_folder: folder where the image series is located
    :type input_folder: string
    :param output_folder: folder where the movie will be saved
    :type output_folder: string
    :param file_format: sets the format of the files to import
    :type file_format: string
    :param fps: frames per second
    :type fps: numpy, int
    :param output_format: sets the format for the output file
                          supported types .mp4 and gif
                          animated gif create large files
    :type output_format: string (, optional)
    :param reverse: sets if the movie will be one way of there and back
    :type reverse: bool  (, optional)

    """

    # searches the folder and finds the files
    file_list = glob.glob('./' + input_folder + '/*.' + file_format)

    # Sorts the files by number makes 2 lists to go forward and back
    list.sort(file_list)
    file_list_rev = glob.glob('./' + input_folder + '/*.' + file_format)
    list.sort(file_list_rev, reverse=True)

    # combines the file list if including the reverse
    if reverse:
        new_list = file_list + file_list_rev
    else:
        new_list = file_list

    if output_format == 'gif':
        # makes an animated gif from the images
        clip = ImageSequenceClip(new_list, fps=fps)
        clip.write_gif(output_folder + '/{}.gif'.format(movie_name), fps=fps)
    else:
        # makes and mp4 from the images
        clip = ImageSequenceClip(new_list, fps=fps)
        clip.write_videofile(output_folder + '/{}.mp4'.format(movie_name),
                             fps=fps)
示例#5
0
def test_1():
    images = []
    durations = []

    for i in range(5):
        durations.append(i)
        images.append("media/python_logo.png")
        durations.append(i)
        images.append("media/python_logo_upside_down.png")

    clip = ImageSequenceClip(images, durations=durations)
    assert clip.duration == sum(durations)
    clip.write_videofile("/tmp/ImageSequenceClip1.mp4", fps=30)
def test_1():
    images=[]
    durations=[]

    for i in range(5):
        durations.append(i)
        images.append("media/python_logo.png")
        durations.append(i)
        images.append("media/python_logo_upside_down.png")

    clip = ImageSequenceClip(images, durations=durations)
    assert clip.duration == sum(durations)
    clip.write_videofile("/tmp/ImageSequenceClip1.mp4", fps=30)
示例#7
0
def do_actual_lapse(lapse_instance_id,
                    fps,
                    output_size,
                    image_path_list=[],
                    image_id_list=[]):
    image_path_list = [str(i) for i in image_path_list
                       ]  #forcing str moviepy/issues/293
    # print image_path_list
    try:
        clip = ImageSequenceClip(image_path_list, fps=fps)
    except ValueError as exc:
        [generate_thumbs.delay(i) for i in image_id_list]
        do_actual_lapse.retry(kwargs={
            "lapse_instance_id": lapse_instance_id,
            "fps": fps,
            "output_size": output_size,
            "image_path_list": image_path_list,
            "image_id_list": image_id_list
        },
                              exc=exc,
                              countdown=15)
    lapse_instance = AutoLapseInstance.objects.get(pk=lapse_instance_id)
    uuid = shortuuid.uuid()

    alfile = AutoLapseInstanceFile.objects.create(instance=lapse_instance,
                                                  output_size=output_size,
                                                  uuid=uuid)
    path_prefix = target_path_generator(alfile, prefix=settings.MEDIA_ROOT)
    if not os.path.exists(path_prefix):
        os.makedirs(path_prefix)
    # print path_prefix
    if clip.h % 2 != 0:
        clip.size = (clip.w, clip.h - 1)
    if clip.w % 2 != 0:
        clip.size = (clip.w - 1, clip.h)

    clip.write_videofile(
        video_mp4_name_generator(alfile, prefix=settings.MEDIA_ROOT))
    clip.write_videofile(
        video_webm_name_generator(alfile, prefix=settings.MEDIA_ROOT))
    clip.write_gif(gif_name_generator(alfile, prefix=settings.MEDIA_ROOT))

    alfile.file_video_mp4 = video_mp4_name_generator(alfile)
    alfile.file_video_webm = video_webm_name_generator(alfile)
    alfile.file_video_gif = gif_name_generator(alfile)

    alfile.save()

    lapse_instance.status = LapseInstanceStatus.COMPLETED
    lapse_instance.save()
def gen_video(data_dir, output_data_dir, sess, image_shape, input_image, keep_prob, logits):
    if os.path.exists(output_data_dir):
        shutil.rmtree(output_data_dir)
    os.makedirs(output_data_dir, exist_ok=True)

    for name, image in gen_test_output(sess, logits, keep_prob, input_image, data_dir, image_shape):
        file_name = os.path.join(output_data_dir, name)
        scipy.misc.imsave(file_name, image)

    vid_clip = ImageSequenceClip(output_data_dir, fps=10)
    result_video = os.path.join(output_data_dir, "result.mp4")
    vid_clip.write_videofile(result_video)

    return result_video
示例#9
0
def job(item):
    fn = item
    outpath = os.path.join(fn, 'flow.mp4')
    if not os.path.exists(outpath):
        flows = torch.stack([
            torch.from_numpy(read_flow(_))
            for _ in glob(os.path.join(fn, '*.flo'))
        ])
        flows = list(normalize_flows(flows))
        flows = list(flows)
        rgb_flows = [make_uint8(flow2rgb(_.numpy())) for _ in flows]
        vid = ImageSequenceClip(rgb_flows, fps=8)
        vid.write_videofile(outpath, fps=8, verbose=False, logger=None)
        vid.close()
示例#10
0
def make_video(curdir, nbjobs=1):
    print("looking up files in ", curdir)
    rep_out = re.match(r".*rep(?P<num>\d+).*", curdir)
    if rep_out:
        rep = int(rep_out.group('num'))
    else:
        rep = 0

    allfiles = glob(j(curdir, 'screenshot_custom_*.png'))
    if rep < replim:  # Only deal with the first reps
        filesbygen = defaultdict(list)
        for fname in allfiles:
            name = basename(fname)
            out = re.match(r"""screenshot_custom_.+_
                           gen_(?P<gen>\d+)
                           (?:_ind_(?P<ind>\d+))?
                           .*\.png""", name, re.VERBOSE)
            if out:
                gen = out.group("gen")
                if out.group("ind"):
                    gen += "i" + out.group("ind")
                filesbygen[gen].append(fname)
        for gen, files in filesbygen.items():
            print("making movie for {}".format(gen))
            outname = j(curdir, '../mov_{}.mp4'.format(gen))
            try:
                newmov = ImageSequenceClip(sorted(files), fps=60)
            except:
                print(files)
                raise
            if exists(outname):
                print("{} already found, concatenating.".format(basename(outname)))
                prev = VideoFileClip(outname)
                newmov = concatenate_videoclips([prev, newmov])
            verbose = sys.stdout.isatty()
            newmov.write_videofile(outname, fps=60, verbose=verbose, progress_bar=verbose, threads=nbjobs)
            print("{} created".format(basename(outname)))
            for pngfile in files:
                os.remove(pngfile)
    else:
        print("Do not make movie for this rep {}, already have others".format(rep))
        for pngfile in allfiles:
            os.remove(pngfile)
示例#11
0
def save_video(frames, path, fps=15):
    from moviepy.video.io.ImageSequenceClip import ImageSequenceClip

    temp_dir = tempfile.TemporaryDirectory()
    logger.info("saving video",
                num_frames=len(frames),
                fps=fps,
                path=path,
                temp_dir=temp_dir.name)
    try:
        for i, frame in enumerate(tqdm(frames)):
            if torch.is_tensor(frame):
                frame = frame.permute(1, 2, 0).detach().cpu().numpy()
            frame_path = Path(temp_dir.name, f'{i:08d}.jpg')
            imageio.imsave(frame_path, (frame * 255).astype(np.uint8))

        video = ImageSequenceClip(temp_dir.name, fps=fps)
        video.write_videofile(str(path), preset='ultrafast', fps=fps)
    finally:
        temp_dir.cleanup()
def merge_images_and_audio(images: DataList, audio: np.ndarray,
                           video_duration: float, sound_hz: int,
                           video_name: str):
    """
    Creates video with sound from image list and music.

    Args:
        images: List of images represented by a h x w x 3 numpy array.
        audio: A Numpy array representing the sound, of size Nx1 for mono, Nx2 for stereo.
        video_duration: Duration of the video in seconds (should be the same as the audio file).
        sound_hz: The hz of the audio file.
        video_name: The name of the resulting video file
    """
    # todo there is still a problem with the audio here
    # the audio should always contain two channels
    # then the hz should also work for mono and dual
    clip = ImageSequenceClip(images,
                             durations=[video_duration / len(images)] *
                             len(images))
    s = audio.reshape((len(audio), 2))  # transform it from (N) to (N, 2)
    audio = AudioArrayClip(s, sound_hz)
    clip = clip.set_audio(audio)
    clip.write_videofile(video_name, fps=len(images) / video_duration)
示例#13
0
def do_actual_lapse(lapse_instance_id, fps, output_size, image_path_list=[], image_id_list=[]):
    image_path_list = [str(i) for i in image_path_list] #forcing str moviepy/issues/293
    # print image_path_list
    try:
        clip = ImageSequenceClip(image_path_list, fps=fps)
    except ValueError as exc:
        [generate_thumbs.delay(i) for i in image_id_list]
        do_actual_lapse.retry(kwargs={"lapse_instance_id":lapse_instance_id, "fps":fps, "output_size":output_size, "image_path_list":image_path_list, "image_id_list":image_id_list}, exc=exc, countdown=15)
    lapse_instance = AutoLapseInstance.objects.get(pk=lapse_instance_id)
    uuid = shortuuid.uuid()


    alfile = AutoLapseInstanceFile.objects.create(instance=lapse_instance, output_size=output_size, uuid=uuid)
    path_prefix = target_path_generator(alfile, prefix=settings.MEDIA_ROOT)
    if not os.path.exists(path_prefix):
        os.makedirs(path_prefix)
    # print path_prefix
    if clip.h % 2 != 0:
        clip.size = (clip.w, clip.h -1)
    if clip.w % 2 != 0:
        clip.size = (clip.w - 1, clip.h)



    clip.write_videofile(video_mp4_name_generator(alfile, prefix=settings.MEDIA_ROOT))
    clip.write_videofile(video_webm_name_generator(alfile, prefix=settings.MEDIA_ROOT))
    clip.write_gif(gif_name_generator(alfile, prefix=settings.MEDIA_ROOT))

    alfile.file_video_mp4 = video_mp4_name_generator(alfile)
    alfile.file_video_webm = video_webm_name_generator(alfile)
    alfile.file_video_gif = gif_name_generator(alfile)

    alfile.save()

    lapse_instance.status = LapseInstanceStatus.COMPLETED
    lapse_instance.save()
示例#14
0
img2_triangles = img2_triangles[:, [1, 0, 3, 2, 5, 4]]

Transforms = np.zeros((len(img1_triangles), 3, 3), dtype=float)

for i in range(len(img1_triangles)):
    source = img1_triangles[i]
    target = img2_triangles[i]
    Transforms[i] = calc_transform(source, target)

#(A)We have to determine the transformation between source annd target matrices,
#in order to apply the transform at every step.

morphs = []

#(B)
#The t value should be assigned so that convex combination cam be made.
#Inside the loop the by source image blends into the target image
#The function generates a smooth transition from source to target image

for t in np.arange(0, 1.0001, 0.02):
    print("processing:\t", t * 100, "%")
    morphs.append(
        image_morph(image1, image2, img1_triangles, img2_triangles, Transforms,
                    t)[:, :, ::-1])

frames = []
for i in range(len(morphs)):
    frames.append(morphs[i])
clip = ImageSequenceClip(frames, fps=12.0)
clip.write_videofile('video3.mp4', codec='mpeg4')
    y = int(current_hand_points[int(current_hand_points.shape[0] / 2)][0][1])

    vecX = result_vec1[0][y][x] * 5
    vecY = result_vec1[1][y][x] * 5

    #print("-----------------------------")

    current_hand_area = cv2.arrowedLine(
        current_hand_area, (x, y), (int((avX * 10) + x), int((avY * 10) + y)),
        (0, 0, 0), 1)

    image_for_temp = current_hand_area.copy()
    image_for_temp = cv2.cvtColor(image_for_temp, cv2.COLOR_GRAY2BGR)
    frames_for_video.append(image_for_temp)

    #cv2.imshow("asdasd", current_hand_area)
    #cv2.waitKey()
#endregion

#region VIDEO RECORDING

clip = ImageSequenceClip(frames_for_video, fps=5)  # for slow mo

try:
    clip.write_videofile('outputs/part1.mp4', codec="mpeg4")
except:
    os.mkdir('outputs')
    clip.write_videofile('outputs/part1.mp4', codec="mpeg4")

#endregion
示例#16
0
from moviepy import *
from moviepy.editor import *
from moviepy.video.VideoClip import TextClip
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
from moviepy.video.io.ImageSequenceClip import ImageSequenceClip
import os

from moviepy.video.io.VideoFileClip import VideoFileClip

basic_directory = 'img'
basic_files = os.listdir(basic_directory)
clip = ImageSequenceClip([
    '{0}/{1}'.format(basic_directory, basic_files[0]),
    '{0}/{1}'.format(basic_directory, basic_files[1]),
    '{0}/{1}'.format(basic_directory, basic_files[2]),
    '{0}/{1}'.format(basic_directory, basic_files[3]),
],
                         fps=0.5)
clip.write_videofile("myHolidays_edited.mp4", audio='music/sunny.mp3')
clip = VideoFileClip("myHolidays_edited.mp4").subclip(0, 15)
clip.write_videofile("myHolidays_edited.mp4", codec='mpeg4')
示例#17
0
                        aTa[0, 0] += IxIx
                        aTa[0, 1] += IxIy
                        aTa[1, 0] += IxIy
                        aTa[1, 1] += IyIy

                        aTb[0, 0] += IxIt
                        aTb[1, 0] += IyIt

                #aTa . uv = aTb
                aTa_inv = np.linalg.pinv(aTa)
                uv = -1 * np.dot(aTa_inv, aTb)

                uv_vector = (uv[0, 0], uv[1, 0])

                dist = np.linalg.norm(uv_vector)
                results.append(uv)

        #find average motion vector
        results = np.array(results)
        avg = (np.average(results, axis=0) * 10).astype(int)

        # draw that vector onto the original image
        bipedframe_original = cv2.arrowedLine(
            bipedframe_original, hand_pos,
            (hand_pos[0] - math.floor(avg[1, 0]),
             hand_pos[1] - math.floor(avg[0, 0])), (255, 0, 0), 4)
        result_images.append(bipedframe_original)
    #make_video(result_images)
    clip = ImageSequenceClip(result_images, fps=10)
    clip.write_videofile("part1.mp4")
示例#18
0
    def test(self):
        total_reward = 0
        for i in range(self.config.conf['test-num']):  #
            # _ = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'], base_pos_nom=[0,0,1.5], fixed_base=True)
            state = self.env._reset(Kp=self.config.conf['Kp'],
                                    Kd=self.config.conf['Kd'],
                                    base_pos_nom=[0, 0, 1.175],
                                    fixed_base=False)
            q_nom = self.ref_motion.ref_motion_dict()
            base_orn_nom = self.ref_motion.get_base_orn()

            # state = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'], q_nom=q_nom, base_orn_nom=base_orn_nom, base_pos_nom=[0, 0, 1.175], fixed_base=False)
            # self.env._setupCamera()
            self.env.startRendering()
            self.env._startLoggingVideo()
            self.ref_motion.reset(index=0)
            # self.ref_motion.random_count()

            self.control.reset(
                w_imitation=self.config.conf['imitation-weight'],
                w_task=self.config.conf['task-weight'])

            for step in range(self.max_step_per_episode):
                # self.env._setupCamera()
                t = time.time()
                gait_phase = self.ref_motion.count / self.ref_motion.dsr_length
                ref_angle = self.ref_motion.ref_joint_angle()
                ref_vel = self.ref_motion.ref_joint_vel()

                self.env.checkSelfContact()

                state = self.env.getExtendedObservation()
                state = np.squeeze(state)
                state = np.append(state, [
                    np.sin(np.pi * 2 * gait_phase),
                    np.cos(np.pi * 2 * gait_phase)
                ])
                # state = np.append(state,[0,0])

                action, actor_info = self.agent.agent.actor.get_action(state)
                mean = actor_info['mean']
                logstd = actor_info['logstd']
                action = mean
                # action = np.clip(action, self.config.conf['actor-output-bounds'][0],
                #                  self.config.conf['actor-output-bounds'][1])
                action = np.array([action]) if len(
                    np.shape(action)) == 0 else np.array(action)

                f = self.env.rejectableForce_xy(1.0 / self.network_freq)
                rgb = self.env._render()
                print(rgb.shape)
                self.image_list.append(rgb)

                # action = self.control.rescale(ref_action, self.config.conf['action-bounds'],
                #                               self.config.conf['actor-output-bounds'])
                self.control.update_ref(ref_angle, ref_vel, [])
                next_state, reward, terminal, info = self.control.control_step(
                    action, self.force, gait_phase)
                self.ref_motion.index_count()

                total_reward += reward

                ob = self.env.getObservation()
                ob_filtered = self.env.getFilteredObservation()
                # for l in range(len(ob)):
                #     self.logging.add_run('observation' + str(l), ob[l])
                #     self.logging.add_run('filtered_observation' + str(l), ob_filtered[l])
                self.logging.add_run('action', action)
                self.logging.add_run('ref_action', ref_angle)
                joint_angle = self.control.get_joint_angle()
                self.logging.add_run('joint_angle', joint_angle)
                readings = self.env.getExtendedReading()
                # for key, value in readings.items():
                #     self.logging.add_run(key, value)
                self.logging.add_run('task_reward', info['task_reward'])
                self.logging.add_run('imitation_reward',
                                     info['imitation_reward'])
                self.logging.add_run('total_reward', info['total_reward'])
                #
                # while 1:
                #     if(time.time()-t)>1.0/self.network_freq:
                #         break

                if terminal:
                    break
            self.env._stopLoggingVideo()
            self.env.stopRendering()

        clip = ImageSequenceClip(self.image_list, fps=25)
        clip.write_gif(self.dir_path + '/test.gif')
        clip.write_videofile(self.dir_path + '/test.mp4', fps=25, audio=False)
        ave_reward = total_reward / self.config.conf['test-num']

        print(ave_reward)
        self.logging.save_run()
def main(
    input_file: str,
    base_file: str,
    place_name: str,
    x0: int,
    x1: int,
    y0: int,
    y1: int,
    scaling_factor: int,
):
    print('Reading location data JSON...')
    location_data = json.loads(open(input_file).read())['locations']
    print('Data imported. Processing...')

    bins = list(range(1, 100, 1))
    minutes_step = 15
    # weight of previous frames over new one. The inverse of the decay factor
    frame_persistence_factor = 4

    all_minutes_starts = list(range(0, 24 * 60, minutes_step))
    base_map = np.mean(img.imread(base_file), axis=-1)
    base_map = np.stack([base_map, base_map, base_map], axis=-1)
    moving_average_frame = None
    quintiles = None
    filenames = []
    fig = None
    for frame_idx, selected_minute in enumerate([None] + all_minutes_starts):
        print(f'frame {frame_idx} of {len(all_minutes_starts)}')
        place_map, processed, skipped = get_locations(
            location_data,
            x0,
            x1,
            y0,
            y1,
            scaling_factor,
            minutes_since_last_midnight_filter=(
                (selected_minute, selected_minute +
                 minutes_step) if selected_minute is not None else None))

        place_map_draw = None

        if processed == skipped:
            print('no points for this map, generating an empty one')
            place_map_draw = place_map
        else:
            place_map_blurred = ndimage.filters.gaussian_filter(place_map, 1)
            flattened = place_map_blurred.flatten()
            if selected_minute is None:
                # the first iteration is over non-time filtered point
                # and is used to generate the bin once for all
                quintiles = np.percentile(flattened[np.nonzero(flattened)],
                                          bins)
            place_map_draw = np.searchsorted(quintiles,
                                             place_map_blurred) / len(bins)

        if base_map.shape != place_map_draw.shape:
            base_map = resize(base_map,
                              place_map_draw.shape,
                              anti_aliasing=True)

        if moving_average_frame is None:
            moving_average_frame = place_map_draw
        else:
            moving_average_frame = (
                (moving_average_frame +
                 place_map_draw * frame_persistence_factor) /
                (1 + frame_persistence_factor))
        print('min/avg/max of original matrix:'
              f'{np.min(place_map_draw,axis=(0,1))}/'
              f'{np.average(place_map_draw,axis=(0,1))}/'
              f'{np.max(place_map_draw,axis=(0,1))}')
        my_dpi = 90
        if fig is None:
            fig = plt.figure(figsize=(place_map_draw.shape[1] / my_dpi,
                                      place_map_draw.shape[0] / my_dpi),
                             dpi=my_dpi)
        if selected_minute is not None:
            plt.title(f'Location history for zone: {place_name} and'
                      f' hour {int(selected_minute / 60)}:'
                      f'{(selected_minute % 60):02}'
                      f' + {minutes_step} minutes (UTC)')
        else:
            plt.title(f'Location history for zone: {place_name}'
                      ' at any moment of the day')

        plt.xlabel('Longitude')
        plt.ylabel('Latitude')
        # extent is used to show the coordinates in the axis
        plt.imshow(base_map,
                   extent=[v / 10000000 for v in [x0, x1, y0, y1]],
                   alpha=0.48)
        plt.imshow(moving_average_frame,
                   cmap=plt.cm.Spectral,
                   extent=[v / 10000000 for v in [x0, x1, y0, y1]],
                   origin='lower',
                   alpha=0.5)
        if selected_minute is not None:
            # note the :04 to add the trailing 0s
            # so the lexicographic order is numeric as well
            # and the subsequent command line command follows it
            frame_file = (f'locations_in_{place_name}_time_'
                          f'{frame_idx:04}.png')
            plt.savefig(frame_file)
            filenames.append(frame_file)
        else:
            plt.savefig(f'locations_in_{place_name}' '_all_time_weighted.png')
        if selected_minute is None:
            # for simplicity, everything is drawn on the same matrix
            # it has to be "cleared" to avoid a "flash" on the first frame
            moving_average_frame = None
            # then, also move the quintiles so that the expected
            # distribution of values in a frame in normalized
            # for the total time, otherwise, calculating the
            # quintiles over the whole day, every frame would be dark
            quintiles = quintiles * len(all_minutes_starts)
        # clear the figure, faster than deleting and recreating a new one
        plt.clf()
    print('generating the GIF...')
    with imageio.get_writer(
            f'{place_name}.gif',
            mode='I',
            duration=0.3,
            subrectangles=True,
    ) as writer:
        for filename in filenames:
            print(f'Appending frame {filename} to the GIF')
            image = imageio.imread(filename)
            # GIF is quite space hungry
            if image.shape[0] > 500:
                factor = image.shape[0] / 500
                image = resize(
                    image, (round(image.shape[0] / factor),
                            round(image.shape[1] / factor), image.shape[2]),
                    anti_aliasing=True)
            writer.append_data(image)

    print('generating the video...')
    isc = ImageSequenceClip(filenames, fps=4)
    isc.write_videofile(f'{place_name}.webm')
示例#20
0
def make_video(images, output="part2.mp4"):
    clip = ImageSequenceClip(images, 10)
    clip.write_videofile(output)
示例#21
0
def test(loader, save_flag, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    sup_losses = AverageMeter()
    selfsup_acc = AverageMeter()
    selfsup_losses = AverageMeter()
    accs = AverageMeter()
    if not args.save_attn:
        model.eval()
    else:
        # ipdb.set_trace()
        model.module.fc = selfsup_model[0]
        gc = LayerGradCam(model, model.module.layer4)

    loss_fn = nn.CrossEntropyLoss(ignore_index=-1)
    if args.selfsup_loss == 'pred_middle':
        selfsup_loss_fn = nn.MSELoss()
    elif args.selfsup_loss == 'sort' or args.selfsup_loss == 'fps':
        selfsup_loss_fn = loss_fn
    elif args.selfsup_loss == 'ctc':
        selfsup_loss_fn = ctc_loss
    end = time.time()

    if save_flag:
        results = [['y', 'y_hat_vec', 'y_hat', 'viz_fn', 'fn', 't_start', 't_end']]
        if args.flow_histogram:
            results = [['x', 'y', 'y_hat_vec', 'y_hat', 'viz_fn', 'fn', 't_start', 't_end']]

    featsarr = []
    predsarr =[]

    with tqdm(loader, desc="Test batch iteration", disable=args.local_rank > 0) as t:
        for batch_idx, (xs, ys, (fns, t_starts, t_ends, selfsup_info, *_)) in enumerate(t):
            data_time.update(time.time() - end)

            xs = xs.to(device)
            ys = ys.to(device)

            if args.get_features:
                _, feats = model(xs)
                for feat, fn, t_start, t_end in zip(feats.detach().cpu(), fns, t_starts, t_ends):
                    featsarr.append((feat, fn, t_start, t_end))
                continue

            if args.save_preds:
                _, feats = model(xs)
                pred_fps = selfsup_model(feats).argmax(1)
                for pred, fn, t_start, t_end in zip(pred_fps.detach().cpu(), fns, t_starts, t_ends):
                    predsarr.append((pred.item(), fn, t_start.item(), t_end.item()))
                continue

            if args.save_attn:
                with torch.no_grad():
                    y_hats = model(xs)
                    if args.local_rank <= 0: ipdb.set_trace()
                    yh_argmax = y_hats.argmax(dim=1)
                xs.requires_grad = True
                fps_ys = torch.LongTensor([args.fps_list.index(_) for _ in selfsup_info]).to(device)
                attr = gc.attribute(xs, yh_argmax)
                up_attr = LayerAttribution.interpolate(attr, (16, 112, 112), interpolate_mode='trilinear').to(torch.float)
                xs_ = torch.stack([unnormalize(x.cpu()) for x in xs])
                acts = xs_.cpu() * up_attr.cpu()
                acts = acts.cpu().detach().clamp(min=0)
                for act, fn, t_s, t_e, yh, y in zip(acts, fns, t_starts, t_ends, yh_argmax.tolist(), fps_ys.tolist()):
                    # if args.local_rank <= 0: ipdb.set_trace()
                    save_image(act.permute(1, 0, 2, 3), os.path.join(args.save_path, 'input', f'{os.path.splitext(os.path.basename(fn))[0]}_{int(1000*t_s)}_{int(1000*t_e)}_pred{yh}_gt{y}.png'),  normalize=True)
                accs.update(accuracy(y_hats, fps_ys)[0].item(), len(fps_ys))
                t.set_postfix(
                    Acc=accs.avg
                )
                continue

            if args.selfsup_loss:
                if args.selfsup_loss == 'pred_middle' or args.selfsup_loss == 'ctc':
                    _, prev_feats = model(xs[:, 0])
                    y_hats, mid_feats = model(xs[:, 1])
                    _, next_feats = model(xs[:, 2])
                    feats = torch.cat((prev_feats, next_feats), dim=1)
                    pred_mid_feats = selfsup_model(feats)
                    valid_pred_locs = (xs[:, 0].mean(dim=(1, 2, 3, 4)) > -0.999) & (
                            xs[:, 2].mean(dim=(1, 2, 3, 4)) > -0.999)
                    if args.selfsup_loss == 'pred_middle':
                        selfsup_loss = selfsup_loss_fn(pred_mid_feats[valid_pred_locs], mid_feats[valid_pred_locs])
                    elif args.selfsup_loss == 'ctc':
                        selfsup_loss = selfsup_loss_fn(pred_mid_feats[valid_pred_locs], mid_feats[valid_pred_locs],
                                                       feats[valid_pred_locs])
                    selfsup_len = valid_pred_locs.sum().item()
                elif args.selfsup_loss == 'sort':
                    sort_ys = torch.zeros_like(ys)
                    valid_pred_locs = (xs[:, 0].mean(dim=(1, 2, 3, 4)) > -0.999) & (
                            xs[:, 2].mean(dim=(1, 2, 3, 4)) > -0.999)

                    for i in range(len(xs)):
                        p = torch.randperm(3)
                        xs[i] = xs[i][p]
                        s = ''.join(map(str, p.tolist()))
                        try:
                            sort_ys[i] = sort_y_vocab.index(s)
                        except:
                            sort_ys[i] = sort_y_vocab.index(s[::-1])

                    _, prev_feats = model(xs[:, 0])
                    y_hats, mid_feats = model(xs[:, 1])  # nonsense, can't co train with sort for now
                    _, next_feats = model(xs[:, 2])
                    feats = torch.stack((prev_feats, mid_feats, next_feats), dim=1)
                    pred_perms = selfsup_model(feats)
                    sort_ys[~valid_pred_locs] = -1
                    selfsup_loss = selfsup_loss_fn(pred_perms, sort_ys)
                    selfsup_len = valid_pred_locs.sum().item()
                    selfsup_acc.update(accuracy(pred_perms[valid_pred_locs], sort_ys[valid_pred_locs])[0].item(),
                                       selfsup_len)
                elif args.selfsup_loss == 'fps':
                    fps_ys = torch.LongTensor([args.fps_list.index(_) for _ in selfsup_info]).to(device)
                    y_hats, feats = model(xs)
                    pred_fps = selfsup_model(feats)
                    selfsup_loss = selfsup_loss_fn(pred_fps, fps_ys)
                    selfsup_len = len(ys)
                    selfsup_acc.update(accuracy(pred_fps, fps_ys)[0].item(), selfsup_len)
                suploss = loss_fn(y_hats, ys)
                loss = suploss + args.selfsup_lambda * selfsup_loss
            else:
                y_hats = model(xs)
                suploss = loss_fn(y_hats, ys)
                loss = suploss
                # print(loss, y_hats, ys)

            if n_gpu > 1:
                loss = loss.mean()  # mean() to average on multi-gpu.

            losses.update(loss.item(), len(ys))
            if args.selfsup_loss:
                if (ys != -1).sum() > 0:
                    sup_losses.update(suploss.item(), (ys != -1).sum().item())
                    accs.update(accuracy(y_hats[ys != -1], ys[ys != -1])[0].item(), len(ys))
                selfsup_losses.update(selfsup_loss.item(), selfsup_len)
            else:
                accs.update(accuracy(y_hats[ys != -1], ys[ys != -1])[0].item(), len(ys))

            batch_time.update(time.time() - end)
            end = time.time()

            d = 0

            if save_flag:
                # TODO for self-supervised losses
                for x, y, y_hat, fn, t_start, t_end in zip(xs, ys,
                                                           F.softmax(y_hats, dim=1),
                                                           fns, t_starts, t_ends):
                    fn_ = fn
                    fn = '{0:02}_{1:010}.mp4'.format(
                        args.local_rank, batch_idx * args.batch_size + d)
                    other = ()
                    if args.flow_histogram:
                        other = (x.tolist(),)
                    results.append(
                        (
                            *other, y.item(), y_hat.tolist(), y_hat.argmax().item(), fn, fn_, t_start.item(),
                            t_end.item()))
                    if args.save_test_vids:
                        x = unnormalize(x.cpu()).permute(1, 2, 3, 0).numpy()
                        tt = ImageSequenceClip(list(x), fps=args.fps).fl_image(make_uint8)
                        tt.write_videofile(os.path.join(args.save_path, 'input', fn), logger=None)
                        tt.close()
                    d += 1

            postfix_kwargs = {}
            if args.selfsup_loss:
                postfix_kwargs = {'SelfsupLoss': selfsup_losses.avg, 'SupLoss': sup_losses.avg}
                if args.selfsup_loss == 'sort' or args.selfsup_loss == 'fps':
                    postfix_kwargs['SelfsupAcc'] = selfsup_acc.avg

            t.set_postfix(
                DataTime=data_time.avg,
                BatchTime=batch_time.avg,
                Loss=losses.avg,
                Acc=accs.avg,
                **postfix_kwargs
            )

    if args.get_features:
        torch.save(featsarr, os.path.join(args.save_path, 'input', 'features_and_fns.pt'))

    if args.save_preds:
        torch.save(predsarr, os.path.join(args.save_path, 'input', 'preds_and_fns.pt'))

    if save_flag == True:
        with open(os.path.join(args.save_path, 'results_{0:06}_{1:03}.csv'.format(args.local_rank, epoch)), 'w') as f:
            wr = csv.writer(f)
            wr.writerows(results)

    if args.selfsup_loss == 'ctc':
        return selfsup_losses.avg * -1, selfsup_losses.count

    if accs.count > 0:
        return accs.avg, accs.count
    else:
        return selfsup_acc.avg, selfsup_acc.count
示例#22
0
for r, (inputData, outputData, t0, t1, v0, v1, neuron_data) in enumerate(simulated):
    times = [t for t, v in neuron_data.values()[0]]
    nodes = list(neuron_data.keys())
    time_data = []
    for i, t in enumerate(times):
        tdata = {}
        for n in nodes:
            tdata[n] = neuron_data[n][i][1]

        tdata[0] = 0.0 if inputData[0] == 0 else -75.0
        tdata[1] = 0.0 if inputData[1] == 0 else -75.0

        time_data.append(tdata)

    for ti, (t, tdata) in enumerate(zip(times, time_data)):
        node_colors = {}
        for n, v in tdata.items():
            node_colors[n] = voltage_to_color(v)

        fn = 'spiking-{0:04d}'.format(len(filenames))
        dot = visualize.draw_net(winner, filename=fn, view=False, node_names=node_names, node_colors=node_colors,
                                 fmt='png', show_disabled=False, prune_unused=True)
        filenames.append(dot.filename + '.png')

clip = ImageSequenceClip(filenames, fps=30)
clip.write_videofile('spiking.mp4', codec="mpeg4", bitrate="2000k")

for fn in filenames:
    os.unlink(fn)
    os.unlink(fn[:-4])
示例#23
0
def createTimeLapse(dir, filename, fpsValue):
    # First argument has several options, in this case I used directory path of images
    clip = ImageSequenceClip(dir, fps=fpsValue)

    # Export a timelapse video
    clip.write_videofile(filename)
示例#24
0
    def test(self):
        total_reward = 0
        for i in range(self.config.conf['test-num']):  #
            _ = self.env._reset(Kp=self.config.conf['Kp'],
                                Kd=self.config.conf['Kd'],
                                base_pos_nom=[0, 0, 1.5],
                                fixed_base=True)
            # self.env._setupCamera()
            self.env.startRendering()
            self.env._startLoggingVideo()
            self.control.reset(w_imitation=0.5, w_task=0.5)

            for step in range(self.max_step_per_episode):
                # self.env._setupCamera()
                t = time.time()
                state = self.env.getExtendedObservation()

                action, actor_info = self.agent.agent.actor.get_action(state)
                mean = actor_info['mean']
                logstd = actor_info['logstd']
                action = mean
                # action = np.clip(action, self.config.conf['actor-output-bounds'][0],
                #                  self.config.conf['actor-output-bounds'][1])
                action = np.array([action]) if len(
                    np.shape(action)) == 0 else np.array(action)

                f = self.env.rejectableForce_xy(1.0 / self.network_freq)
                rgb = self.env._render()
                print(rgb.shape)
                self.image_list.append(rgb)
                # self.image.set_data(rgb)
                # self.ax.plot([0])
                # plt.pause(0.005)

                # if step == 5 * self.network_freq:
                #     if f[1] == 0:
                #         self.force = np.array([600.0 * self.network_freq / 10.0, 0.0, 0.0])
                #     else:
                #         self.force = np.array([1.0 * f[1], 0, 0])
                #     print(self.force)
                # elif step == 11 * self.network_freq:
                #     if f[0] == 0:
                #         self.force = np.array([-600.0 * self.network_freq / 10.0, 0.0, 0.0])
                #     else:
                #         self.force = [1.0 * f[0], 0, 0]
                #     print(self.force)
                # elif step == 17 * self.network_freq:
                #     if f[2] == 0:
                #         self.force = np.array([0.0, -800.0 * self.network_freq / 10.0, 0.0])
                #     else:
                #         self.force = [0, 1.0 * f[2], 0]
                #     print(self.force)
                # elif step == 23 * self.network_freq:
                #     if f[3] == 0:
                #         self.force = np.array([0.0, 800.0 * self.network_freq / 10.0, 0.0])
                #     else:
                #         self.force = [0, 1.0 * f[3], 0]
                #     print(self.force)
                # else:
                #     self.force = [0, 0, 0]

                next_state, reward, done, info = self.control.control_step(
                    action, self.force,
                    np.zeros((len(self.config.conf['controlled-joints']), )))

                total_reward += reward

                ob = self.env.getObservation()
                ob_filtered = self.env.getFilteredObservation()
                for l in range(len(ob)):
                    self.logging.add_run('observation' + str(l), ob[l])
                    self.logging.add_run('filtered_observation' + str(l),
                                         ob_filtered[l])
                self.logging.add_run('action', action)
                readings = self.env.getExtendedReading()
                for key, value in readings.items():
                    self.logging.add_run(key, value)
                self.logging.add_run('task_reward', info['task_reward'])
                self.logging.add_run('imitation_reward',
                                     info['imitation_reward'])
                self.logging.add_run('total_reward', info['total_reward'])
                #
                # while 1:
                #     if(time.time()-t)>1.0/self.network_freq:
                #         break

                if done:
                    break
            self.env._stopLoggingVideo()
            self.env.stopRendering()

        clip = ImageSequenceClip(self.image_list, fps=25)
        clip.write_gif(self.dir_path + '/test.gif')
        clip.write_videofile(self.dir_path + '/test.mp4', fps=25, audio=False)
        ave_reward = total_reward / self.config.conf['test-num']

        print(ave_reward)
        self.logging.save_run()
示例#25
0
                'image': open('./image_data/frame' + str(currentframe) + '.jpg', 'rb'),
            },
            headers={'api-key': 'PUT API KEY FOR DEEPAI HERE'}
        )
        response = r.json()
        print(response)

        imagelink = requests.get(response['output_url'])

        file = open("./deep_image/" + str(currentframe) + '.jpg', "wb")
        file.write(imagelink.content)
        file.close()
        currentframe += 1
    else:
        break

cap = cv2.VideoCapture("video.mp4")

fpsa = cap.get(cv2.CAP_PROP_FPS)
print(fpsa)
im = Image.open('./deep_image/0.jpg')

print(im.size)
print(type(im.size))
w, h = im.size

clip = ImageSequenceClip("./deep_image/", fps = fpsa)

clip.write_videofile("deep_video.mp4", fps=clip.fps,
                      audio_bitrate="1000k", bitrate="4000k")
示例#26
0
    def test(self):
        total_reward = 0
        for i in range(self.config.conf['test-num']):  #
            self.control.reset()
            self.motion.reset(index=0)
            self.motion.count = 0
            # self.motion.random_count()
            q_nom = self.motion.ref_motion_dict()

            print(self.motion.get_base_orn())
            # print(q_nom['torsoPitch'])
            # print(self.motion.ref_motion())
            print(q_nom)
            base_orn_nom = self.motion.get_base_orn(
            )  #[0.000,0.078,0.000,0.997]#[0,0,0,1]
            print(base_orn_nom)
            _ = self.env._reset(Kp=self.config.conf['Kp'],
                                Kd=self.config.conf['Kd'],
                                base_pos_nom=[0, 0, 1.5],
                                fixed_base=False,
                                q_nom=q_nom,
                                base_orn_nom=base_orn_nom)
            left_foot_link_state = p.getLinkState(
                self.env.r,
                self.env.jointIdx['leftAnkleRoll'],
                computeLinkVelocity=0)
            left_foot_link_dis = np.array(left_foot_link_state[0])
            right_foot_link_state = p.getLinkState(
                self.env.r,
                self.env.jointIdx['rightAnkleRoll'],
                computeLinkVelocity=0)
            right_foot_link_dis = np.array(right_foot_link_state[0])
            print(left_foot_link_dis - right_foot_link_dis)
            # ref_action = self.motion.ref_motion()
            # for i in range(len(self.config.conf['controlled-joints'])):
            #     q_nom.update({self.config.conf['controlled-joints'][i]:ref_action[i]})
            #
            # # _ = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'])
            # _ = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'], q_nom=q_nom, base_orn_nom=base_orn_nom)
            # self.env._setupCamera()
            self.env.startRendering()
            self.env._startLoggingVideo()

            print(self.motion.index)

            for step in range(self.max_step_per_episode):
                # self.env._setupCamera()
                t = time.time()
                state = self.env.getExtendedObservation()

                # action = self.motion.ref_motion_avg()
                # ref_angle, ref_vel = self.motion.ref_motion()
                ref_angle = self.motion.ref_joint_angle()
                ref_vel = self.motion.ref_joint_vel()
                action = self.control.rescale(
                    ref_angle, self.config.conf['action-bounds'],
                    self.config.conf['actor-output-bounds'])

                # rgb=self.env._render(pitch=0)
                # # print(rgb.shape)
                # self.image_list.append(rgb)

                next_state, reward, done, info = self.control.control_step(
                    action, self.force)
                self.motion.index_count()

                total_reward += reward
                self.logging.add_run('ref_angle', np.squeeze(ref_angle))
                self.logging.add_run('ref_vel', np.squeeze(ref_vel))
                # self.logging.add_run('measured_action', np.squeeze(self.control.get_joint_angles()))
                ob = self.env.getObservation()
                ob_filtered = self.env.getFilteredObservation()
                for l in range(len(ob)):
                    self.logging.add_run('observation' + str(l), ob[l])
                    self.logging.add_run('filtered_observation' + str(l),
                                         ob_filtered[l])
                self.logging.add_run('action', action)
                # readings = self.env.getExtendedReading()
                # for key, value in readings.items():
                #     self.logging.add_run(key, value)
                #
                # while 1:
                #     if(time.time()-t)>1.0/self.network_freq:
                #         break

                if done:
                    break
                # print(time.time()-t)
            self.env._stopLoggingVideo()
            self.env.stopRendering()

        ave_reward = total_reward / self.config.conf['test-num']

        clip = ImageSequenceClip(self.image_list, fps=25)
        clip.write_gif('test.gif')
        clip.write_videofile('test.mp4', fps=25, audio=False)

        print(ave_reward)
        self.logging.save_run()
    for i in range(len(black_coordinates)):
        blank_image_result[(black_coordinates[i][0]), (
            black_coordinates[i][1])] = histYellow[(black_coordinates[i][0]),
                                                   (black_coordinates[i][1])]

    for i in range(len(red_coordinates)):
        blank_image_result[(red_coordinates[i][0]), (
            red_coordinates[i][1])] = histRed[(red_coordinates[i][0]),
                                              (red_coordinates[i][1])]

    for i in range(len(green_coordinates)):
        blank_image_result[(green_coordinates[i][0]), (
            green_coordinates[i][1])] = histBlue[(green_coordinates[i][0]),
                                                 (green_coordinates[i][1])]

    blank_image_result = cv2.cvtColor(blank_image_result, cv2.COLOR_BGR2RGB)
    result_images.append(blank_image_result)

height, width, layers = image_list[0].shape

cv2.destroyAllWindows()
clip = ImageSequenceClip(result_images, fps=25)

try:
    clip.write_videofile('part3Results/part3_' + image_sequence_name + '.mp4',
                         codec="mpeg4")
except:
    os.mkdir('part3Results')
    clip.write_videofile('part3Results/part3_' + image_sequence_name + '.mp4',
                         codec="mpeg4")
示例#28
0
def test(loader, save_flag, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    accs = AverageMeter()
    model.eval()

    gc = LayerGradCam(model, model.layer4)

    loss_fn = nn.CrossEntropyLoss()

    end = time.time()

    if save_flag:
        results = [[
            'y', 'y_hat_vec', 'y_hat', 'viz_fn', 'fn', 't_start', 't_end'
        ]]

    with tqdm(loader, desc="Test batch iteration",
              disable=args.local_rank > 0) as t:
        for batch_idx, (xs, ys, (fns, t_starts, t_ends)) in enumerate(t):
            data_time.update(time.time() - end)

            xs = xs.to(device)
            ys = ys.to(device)

            y_hats = model(xs)
            loss = loss_fn(y_hats, ys)

            if n_gpu > 1:
                loss = loss.mean()  # mean() to average on multi-gpu.

            losses.update(loss.item(), len(ys))
            accs.update(accuracy(y_hats, ys)[0].item(), len(ys))

            batch_time.update(time.time() - end)
            end = time.time()

            d = 0

            if save_flag:
                for x, y, y_hat, fn, t_s, t_e in zip(xs, ys,
                                                     F.softmax(y_hats, dim=1),
                                                     fns, t_starts, t_ends):
                    x = unnormalize(x.cpu()).permute(1, 2, 3, 0).numpy()
                    fn_ = fn
                    fn = '{0:02}_{1:010}.mp4'.format(
                        args.local_rank, batch_idx * args.batch_size + d)
                    results.append(
                        (y.item(), y_hat.tolist(), y_hat.argmax().item(), fn,
                         fn_, t_s.item(), t_e.item()))
                    clip = ImageSequenceClip(list(x),
                                             fps=args.fps).fl_image(make_uint8)
                    clip.write_videofile(os.path.join(args.save_path, 'input',
                                                      fn),
                                         logger=None)
                    clip.close()
                    d += 1

            t.set_postfix(DataTime=data_time.avg,
                          BatchTime=batch_time.avg,
                          Loss=losses.avg,
                          Acc=accs.avg)

    if save_flag == True:
        with open(
                os.path.join(
                    args.save_path,
                    'results_{0:06}_{1:03}.csv'.format(args.local_rank,
                                                       epoch)), 'w') as f:
            wr = csv.writer(f)
            wr.writerows(results)

    return accs.avg, accs.count
 def generateClip(self, filename):
     glutHideWindow()
     self.runSimulation()
     clip = ImageSequenceClip(self.frames, fps=self.FPS)
     clip.write_videofile(filename, fps=self.FPS)