def gif(filename, array, fps=10):
    """Creates a gif given a stack of images using moviepy
    Notes
    -----
    works with current Github version of moviepy (not the pip version)
    https://github.com/Zulko/moviepy/commit/d4c9c37bc88261d8ed8b5d9b7c317d13b2cdf62e
    Usage
    -----
    >>> X = randn(100, 64, 64)
    >>> gif('test.gif', X)
    Parameters
    ----------
    filename : string
        The filename of the gif to write to
    array : array_like
        A numpy array that contains a sequence of images
    fps : int
        frames per second (default: 10)
    """
    from moviepy.video.io.ImageSequenceClip import ImageSequenceClip

    # ensure that the file has the .gif extension
    fname, _ = os.path.splitext(filename)
    filename = fname + '.gif'

    # copy into the color dimension if the images are black and white
    if array.ndim == 3:
        array = array[..., np.newaxis] * np.ones(3)

    # make the moviepy clip
    clip = ImageSequenceClip(list(array), fps=fps)
    clip.write_gif(filename, fps=fps)
    return clip
Esempio n. 2
0
def write_gif(filepath, images, fps=24):
    """Saves a sequence of images as an animated GIF.
    Parameters
    ----------
    filepath: str
        The filepath ending with *.gif where to save the file.
    images: list(3-D array) or 4-D array
        A list of images or a 4-D array where the first dimension
        represents the time axis.
    fps: int, optional
        The frame rate.
    """
    # to list
    if not isinstance(images, list):
        splitted = np.split(images, images.shape[0])
        images = [np.squeeze(s, axis=(0, )) for s in splitted]
    elif len(images) == 0:
        return

    # ensure directory exists
    dirpath = os.path.dirname(filepath)
    if not os.path.exists(dirpath):
        os.makedirs(dirpath)

    # scale factor for float
    factor = 1
    if tt.utils.image.is_float_image(images[0]):
        factor = 255

    clip = ImageSequenceClip([img * factor for img in images], fps=fps)
    clip.write_gif(filepath, verbose=False)
Esempio n. 3
0
    def test(self):
        total_reward = 0
        for i in range(self.config.conf['test-num']):#
            quat = self.ref_motion.euler_to_quat(0,0,0)
            _ = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'], base_pos_nom=[0,0,1.575], base_orn_nom=quat, fixed_base=True)
            # state = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'], base_pos_nom=[0, 0, 1.175], fixed_base=False)
            q_nom = self.ref_motion.ref_motion_dict()
            base_orn_nom = self.ref_motion.get_base_orn()

            # state = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'], q_nom=q_nom, base_orn_nom=base_orn_nom, base_pos_nom=[0, 0, 1.175], fixed_base=False)
            # self.env._setupCamera()
            self.env.startRendering()
            self.env._startLoggingVideo()

            for step in range(self.max_step_per_episode):
                if step>=2*self.network_freq and step<4*self.network_freq:
                    action = [0,0,0,0,0,0,0,0,0.1,0,0]
                else:
                    action = [0,0,0,0,0,0,0,0,0,0,0]
                # action = np.clip(action, self.config.conf['actor-output-bounds'][0],
                #                  self.config.conf['actor-output-bounds'][1])
                action = np.array([action]) if len(np.shape(action)) == 0 else np.array(action)

                rgb=self.env._render(roll=0,pitch=0,yaw=90)
                print(rgb.shape)
                self.image_list.append(rgb)
                for i in range(self.sampling_skip):
                    # action = self.control.rescale(ref_action, self.config.conf['action-bounds'],
                    #                               self.config.conf['actor-output-bounds'])
                    _,_,_,_ = self.env._step(action)

                    self.logging.add_run('action', action)

                    joint_angle = self.control.get_joint_angle()
                    self.logging.add_run('joint_angle', joint_angle)
                    readings = self.env.getExtendedReading()
                    ob = self.env.getObservation()
                    for l in range(len(ob)):
                        self.logging.add_run('observation' + str(l), ob[l])
                    # for key, value in readings.items():
                    #     self.logging.add_run(key, value)

            self.env._stopLoggingVideo()
            self.env.stopRendering()

        ave_reward = total_reward/self.config.conf['test-num']
        print(ave_reward)
        self.logging.save_run()


        clip = ImageSequenceClip(self.image_list, fps=25)
        clip.write_gif(self.dir_path+'/test.gif')
        clip.write_videofile(self.dir_path+'/test.mp4', fps=25, audio=False)
def make_movie(movie_name,
               input_folder,
               output_folder,
               file_format,
               fps,
               output_format='mp4',
               reverse=False):
    """
    function that makes the movie of the images data

    :param movie_name: name of the movie
    :type movie_name: string
    :param input_folder: folder where the image series is located
    :type input_folder: string
    :param output_folder: folder where the movie will be saved
    :type output_folder: string
    :param file_format: sets the format of the files to import
    :type file_format: string
    :param fps: frames per second
    :type fps: numpy, int
    :param output_format: sets the format for the output file
                          supported types .mp4 and gif
                          animated gif create large files
    :type output_format: string (, optional)
    :param reverse: sets if the movie will be one way of there and back
    :type reverse: bool  (, optional)

    """

    # searches the folder and finds the files
    file_list = glob.glob('./' + input_folder + '/*.' + file_format)

    # Sorts the files by number makes 2 lists to go forward and back
    list.sort(file_list)
    file_list_rev = glob.glob('./' + input_folder + '/*.' + file_format)
    list.sort(file_list_rev, reverse=True)

    # combines the file list if including the reverse
    if reverse:
        new_list = file_list + file_list_rev
    else:
        new_list = file_list

    if output_format == 'gif':
        # makes an animated gif from the images
        clip = ImageSequenceClip(new_list, fps=fps)
        clip.write_gif(output_folder + '/{}.gif'.format(movie_name), fps=fps)
    else:
        # makes and mp4 from the images
        clip = ImageSequenceClip(new_list, fps=fps)
        clip.write_videofile(output_folder + '/{}.mp4'.format(movie_name),
                             fps=fps)
Esempio n. 5
0
def do_actual_lapse(lapse_instance_id,
                    fps,
                    output_size,
                    image_path_list=[],
                    image_id_list=[]):
    image_path_list = [str(i) for i in image_path_list
                       ]  #forcing str moviepy/issues/293
    # print image_path_list
    try:
        clip = ImageSequenceClip(image_path_list, fps=fps)
    except ValueError as exc:
        [generate_thumbs.delay(i) for i in image_id_list]
        do_actual_lapse.retry(kwargs={
            "lapse_instance_id": lapse_instance_id,
            "fps": fps,
            "output_size": output_size,
            "image_path_list": image_path_list,
            "image_id_list": image_id_list
        },
                              exc=exc,
                              countdown=15)
    lapse_instance = AutoLapseInstance.objects.get(pk=lapse_instance_id)
    uuid = shortuuid.uuid()

    alfile = AutoLapseInstanceFile.objects.create(instance=lapse_instance,
                                                  output_size=output_size,
                                                  uuid=uuid)
    path_prefix = target_path_generator(alfile, prefix=settings.MEDIA_ROOT)
    if not os.path.exists(path_prefix):
        os.makedirs(path_prefix)
    # print path_prefix
    if clip.h % 2 != 0:
        clip.size = (clip.w, clip.h - 1)
    if clip.w % 2 != 0:
        clip.size = (clip.w - 1, clip.h)

    clip.write_videofile(
        video_mp4_name_generator(alfile, prefix=settings.MEDIA_ROOT))
    clip.write_videofile(
        video_webm_name_generator(alfile, prefix=settings.MEDIA_ROOT))
    clip.write_gif(gif_name_generator(alfile, prefix=settings.MEDIA_ROOT))

    alfile.file_video_mp4 = video_mp4_name_generator(alfile)
    alfile.file_video_webm = video_webm_name_generator(alfile)
    alfile.file_video_gif = gif_name_generator(alfile)

    alfile.save()

    lapse_instance.status = LapseInstanceStatus.COMPLETED
    lapse_instance.save()
Esempio n. 6
0
def do_actual_lapse(lapse_instance_id, fps, output_size, image_path_list=[], image_id_list=[]):
    image_path_list = [str(i) for i in image_path_list] #forcing str moviepy/issues/293
    # print image_path_list
    try:
        clip = ImageSequenceClip(image_path_list, fps=fps)
    except ValueError as exc:
        [generate_thumbs.delay(i) for i in image_id_list]
        do_actual_lapse.retry(kwargs={"lapse_instance_id":lapse_instance_id, "fps":fps, "output_size":output_size, "image_path_list":image_path_list, "image_id_list":image_id_list}, exc=exc, countdown=15)
    lapse_instance = AutoLapseInstance.objects.get(pk=lapse_instance_id)
    uuid = shortuuid.uuid()


    alfile = AutoLapseInstanceFile.objects.create(instance=lapse_instance, output_size=output_size, uuid=uuid)
    path_prefix = target_path_generator(alfile, prefix=settings.MEDIA_ROOT)
    if not os.path.exists(path_prefix):
        os.makedirs(path_prefix)
    # print path_prefix
    if clip.h % 2 != 0:
        clip.size = (clip.w, clip.h -1)
    if clip.w % 2 != 0:
        clip.size = (clip.w - 1, clip.h)



    clip.write_videofile(video_mp4_name_generator(alfile, prefix=settings.MEDIA_ROOT))
    clip.write_videofile(video_webm_name_generator(alfile, prefix=settings.MEDIA_ROOT))
    clip.write_gif(gif_name_generator(alfile, prefix=settings.MEDIA_ROOT))

    alfile.file_video_mp4 = video_mp4_name_generator(alfile)
    alfile.file_video_webm = video_webm_name_generator(alfile)
    alfile.file_video_gif = gif_name_generator(alfile)

    alfile.save()

    lapse_instance.status = LapseInstanceStatus.COMPLETED
    lapse_instance.save()
Esempio n. 7
0
    def test(self):
        total_reward = 0
        for i in range(self.config.conf['test-num']):  #
            # _ = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'], base_pos_nom=[0,0,1.5], fixed_base=True)
            state = self.env._reset(Kp=self.config.conf['Kp'],
                                    Kd=self.config.conf['Kd'],
                                    base_pos_nom=[0, 0, 1.175],
                                    fixed_base=False)
            q_nom = self.ref_motion.ref_motion_dict()
            base_orn_nom = self.ref_motion.get_base_orn()

            # state = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'], q_nom=q_nom, base_orn_nom=base_orn_nom, base_pos_nom=[0, 0, 1.175], fixed_base=False)
            # self.env._setupCamera()
            self.env.startRendering()
            self.env._startLoggingVideo()
            self.ref_motion.reset(index=0)
            # self.ref_motion.random_count()

            self.control.reset(
                w_imitation=self.config.conf['imitation-weight'],
                w_task=self.config.conf['task-weight'])

            for step in range(self.max_step_per_episode):
                # self.env._setupCamera()
                t = time.time()
                gait_phase = self.ref_motion.count / self.ref_motion.dsr_length
                ref_angle = self.ref_motion.ref_joint_angle()
                ref_vel = self.ref_motion.ref_joint_vel()

                self.env.checkSelfContact()

                state = self.env.getExtendedObservation()
                state = np.squeeze(state)
                state = np.append(state, [
                    np.sin(np.pi * 2 * gait_phase),
                    np.cos(np.pi * 2 * gait_phase)
                ])
                # state = np.append(state,[0,0])

                action, actor_info = self.agent.agent.actor.get_action(state)
                mean = actor_info['mean']
                logstd = actor_info['logstd']
                action = mean
                # action = np.clip(action, self.config.conf['actor-output-bounds'][0],
                #                  self.config.conf['actor-output-bounds'][1])
                action = np.array([action]) if len(
                    np.shape(action)) == 0 else np.array(action)

                f = self.env.rejectableForce_xy(1.0 / self.network_freq)
                rgb = self.env._render()
                print(rgb.shape)
                self.image_list.append(rgb)

                # action = self.control.rescale(ref_action, self.config.conf['action-bounds'],
                #                               self.config.conf['actor-output-bounds'])
                self.control.update_ref(ref_angle, ref_vel, [])
                next_state, reward, terminal, info = self.control.control_step(
                    action, self.force, gait_phase)
                self.ref_motion.index_count()

                total_reward += reward

                ob = self.env.getObservation()
                ob_filtered = self.env.getFilteredObservation()
                # for l in range(len(ob)):
                #     self.logging.add_run('observation' + str(l), ob[l])
                #     self.logging.add_run('filtered_observation' + str(l), ob_filtered[l])
                self.logging.add_run('action', action)
                self.logging.add_run('ref_action', ref_angle)
                joint_angle = self.control.get_joint_angle()
                self.logging.add_run('joint_angle', joint_angle)
                readings = self.env.getExtendedReading()
                # for key, value in readings.items():
                #     self.logging.add_run(key, value)
                self.logging.add_run('task_reward', info['task_reward'])
                self.logging.add_run('imitation_reward',
                                     info['imitation_reward'])
                self.logging.add_run('total_reward', info['total_reward'])
                #
                # while 1:
                #     if(time.time()-t)>1.0/self.network_freq:
                #         break

                if terminal:
                    break
            self.env._stopLoggingVideo()
            self.env.stopRendering()

        clip = ImageSequenceClip(self.image_list, fps=25)
        clip.write_gif(self.dir_path + '/test.gif')
        clip.write_videofile(self.dir_path + '/test.mp4', fps=25, audio=False)
        ave_reward = total_reward / self.config.conf['test-num']

        print(ave_reward)
        self.logging.save_run()
Esempio n. 8
0
    def test(self):
        total_reward = 0
        for i in range(self.config.conf['test-num']):  #
            self.control.reset()
            self.motion.reset(index=0)
            self.motion.count = 0
            # self.motion.random_count()
            q_nom = self.motion.ref_motion_dict()

            print(self.motion.get_base_orn())
            # print(q_nom['torsoPitch'])
            # print(self.motion.ref_motion())
            print(q_nom)
            base_orn_nom = self.motion.get_base_orn(
            )  #[0.000,0.078,0.000,0.997]#[0,0,0,1]
            print(base_orn_nom)
            _ = self.env._reset(Kp=self.config.conf['Kp'],
                                Kd=self.config.conf['Kd'],
                                base_pos_nom=[0, 0, 1.5],
                                fixed_base=False,
                                q_nom=q_nom,
                                base_orn_nom=base_orn_nom)
            left_foot_link_state = p.getLinkState(
                self.env.r,
                self.env.jointIdx['leftAnkleRoll'],
                computeLinkVelocity=0)
            left_foot_link_dis = np.array(left_foot_link_state[0])
            right_foot_link_state = p.getLinkState(
                self.env.r,
                self.env.jointIdx['rightAnkleRoll'],
                computeLinkVelocity=0)
            right_foot_link_dis = np.array(right_foot_link_state[0])
            print(left_foot_link_dis - right_foot_link_dis)
            # ref_action = self.motion.ref_motion()
            # for i in range(len(self.config.conf['controlled-joints'])):
            #     q_nom.update({self.config.conf['controlled-joints'][i]:ref_action[i]})
            #
            # # _ = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'])
            # _ = self.env._reset(Kp=self.config.conf['Kp'], Kd=self.config.conf['Kd'], q_nom=q_nom, base_orn_nom=base_orn_nom)
            # self.env._setupCamera()
            self.env.startRendering()
            self.env._startLoggingVideo()

            print(self.motion.index)

            for step in range(self.max_step_per_episode):
                # self.env._setupCamera()
                t = time.time()
                state = self.env.getExtendedObservation()

                # action = self.motion.ref_motion_avg()
                # ref_angle, ref_vel = self.motion.ref_motion()
                ref_angle = self.motion.ref_joint_angle()
                ref_vel = self.motion.ref_joint_vel()
                action = self.control.rescale(
                    ref_angle, self.config.conf['action-bounds'],
                    self.config.conf['actor-output-bounds'])

                # rgb=self.env._render(pitch=0)
                # # print(rgb.shape)
                # self.image_list.append(rgb)

                next_state, reward, done, info = self.control.control_step(
                    action, self.force)
                self.motion.index_count()

                total_reward += reward
                self.logging.add_run('ref_angle', np.squeeze(ref_angle))
                self.logging.add_run('ref_vel', np.squeeze(ref_vel))
                # self.logging.add_run('measured_action', np.squeeze(self.control.get_joint_angles()))
                ob = self.env.getObservation()
                ob_filtered = self.env.getFilteredObservation()
                for l in range(len(ob)):
                    self.logging.add_run('observation' + str(l), ob[l])
                    self.logging.add_run('filtered_observation' + str(l),
                                         ob_filtered[l])
                self.logging.add_run('action', action)
                # readings = self.env.getExtendedReading()
                # for key, value in readings.items():
                #     self.logging.add_run(key, value)
                #
                # while 1:
                #     if(time.time()-t)>1.0/self.network_freq:
                #         break

                if done:
                    break
                # print(time.time()-t)
            self.env._stopLoggingVideo()
            self.env.stopRendering()

        ave_reward = total_reward / self.config.conf['test-num']

        clip = ImageSequenceClip(self.image_list, fps=25)
        clip.write_gif('test.gif')
        clip.write_videofile('test.mp4', fps=25, audio=False)

        print(ave_reward)
        self.logging.save_run()
Esempio n. 9
0
    def test(self):
        total_reward = 0
        for i in range(self.config.conf['test-num']):  #
            _ = self.env._reset(Kp=self.config.conf['Kp'],
                                Kd=self.config.conf['Kd'],
                                base_pos_nom=[0, 0, 1.5],
                                fixed_base=True)
            # self.env._setupCamera()
            self.env.startRendering()
            self.env._startLoggingVideo()
            self.control.reset(w_imitation=0.5, w_task=0.5)

            for step in range(self.max_step_per_episode):
                # self.env._setupCamera()
                t = time.time()
                state = self.env.getExtendedObservation()

                action, actor_info = self.agent.agent.actor.get_action(state)
                mean = actor_info['mean']
                logstd = actor_info['logstd']
                action = mean
                # action = np.clip(action, self.config.conf['actor-output-bounds'][0],
                #                  self.config.conf['actor-output-bounds'][1])
                action = np.array([action]) if len(
                    np.shape(action)) == 0 else np.array(action)

                f = self.env.rejectableForce_xy(1.0 / self.network_freq)
                rgb = self.env._render()
                print(rgb.shape)
                self.image_list.append(rgb)
                # self.image.set_data(rgb)
                # self.ax.plot([0])
                # plt.pause(0.005)

                # if step == 5 * self.network_freq:
                #     if f[1] == 0:
                #         self.force = np.array([600.0 * self.network_freq / 10.0, 0.0, 0.0])
                #     else:
                #         self.force = np.array([1.0 * f[1], 0, 0])
                #     print(self.force)
                # elif step == 11 * self.network_freq:
                #     if f[0] == 0:
                #         self.force = np.array([-600.0 * self.network_freq / 10.0, 0.0, 0.0])
                #     else:
                #         self.force = [1.0 * f[0], 0, 0]
                #     print(self.force)
                # elif step == 17 * self.network_freq:
                #     if f[2] == 0:
                #         self.force = np.array([0.0, -800.0 * self.network_freq / 10.0, 0.0])
                #     else:
                #         self.force = [0, 1.0 * f[2], 0]
                #     print(self.force)
                # elif step == 23 * self.network_freq:
                #     if f[3] == 0:
                #         self.force = np.array([0.0, 800.0 * self.network_freq / 10.0, 0.0])
                #     else:
                #         self.force = [0, 1.0 * f[3], 0]
                #     print(self.force)
                # else:
                #     self.force = [0, 0, 0]

                next_state, reward, done, info = self.control.control_step(
                    action, self.force,
                    np.zeros((len(self.config.conf['controlled-joints']), )))

                total_reward += reward

                ob = self.env.getObservation()
                ob_filtered = self.env.getFilteredObservation()
                for l in range(len(ob)):
                    self.logging.add_run('observation' + str(l), ob[l])
                    self.logging.add_run('filtered_observation' + str(l),
                                         ob_filtered[l])
                self.logging.add_run('action', action)
                readings = self.env.getExtendedReading()
                for key, value in readings.items():
                    self.logging.add_run(key, value)
                self.logging.add_run('task_reward', info['task_reward'])
                self.logging.add_run('imitation_reward',
                                     info['imitation_reward'])
                self.logging.add_run('total_reward', info['total_reward'])
                #
                # while 1:
                #     if(time.time()-t)>1.0/self.network_freq:
                #         break

                if done:
                    break
            self.env._stopLoggingVideo()
            self.env.stopRendering()

        clip = ImageSequenceClip(self.image_list, fps=25)
        clip.write_gif(self.dir_path + '/test.gif')
        clip.write_videofile(self.dir_path + '/test.mp4', fps=25, audio=False)
        ave_reward = total_reward / self.config.conf['test-num']

        print(ave_reward)
        self.logging.save_run()