示例#1
0
 def temporal_kernel_regression(self):
     print '--- assembly ---'
     new = np.zeros_like(self.seq_trans[0])
     # smoothing param
     mu = 1.
     for x in xrange(new.shape[0]):
         for y in xrange(new.shape[1]):
             center_x = self.patch_size/2 if x > self.patch_size/2 else x
             center_y = self.patch_size/2 if y > self.patch_size/2 else y
             lower_bounds_x = x - center_x
             upper_bounds_x = x + self.patch_size/2 - 1 if x+self.patch_size/2 <= new.shape[0] else new.shape[0]
             lower_bounds_y = y - center_y
             upper_bounds_y = y + self.patch_size/2 - 1 if y+self.patch_size/2 <= new.shape[1] else new.shape[1]
             var = []
             patches = self.seq_trans[:,lower_bounds_x:upper_bounds_x,lower_bounds_y:upper_bounds_y]
             for patch in patches:
                 variance = np.sum(np.square(patch - np.mean(patch)))
                 L2 = patch.shape[0]*patch.shape[1]
                 var.append(variance*1./(L2-1))
             ref_patch = patches[np.argmax(var)]
             Us = []
             Urs = []
             for patch in patches:
                 U = np.sum(np.square(patch - ref_patch))
                 U /= L2
                 sigma_n2 =  median_absolute_deviation(patch)
                 U -= 2*sigma_n2
                 Ux = np.exp(-U/(mu**2))
                 Us.append(Ux)
                 Urs.append(Ux*patch[center_x,center_y])
             Us = np.array(Us)
             Urs = np.array(Urs)
             new[x,y] = np.sum(Urs)/np.sum(Us)
     writer = imageio.get_writer('diflim.jpeg',quality=100)
     writer.append_data(new)
     writer.close()
     writer = imageio.get_writer('diflim.tif')
     writer.append_data(new)
     writer.close()
     fig = plt.figure(figsize=(20,8), frameon=False)
     fig.subplots_adjust(hspace=0)
     fig.subplots_adjust(wspace=0)
     ax1 = fig.add_subplot(1, 3, 1)
     ax1.imshow(self.average, cmap='Greys_r',interpolation='nearest')
     ax1.set_title('average')
     ax2 = fig.add_subplot(1, 3, 2)
     ax2.imshow(new, cmap='Greys_r',interpolation='nearest')
     ax2.set_title('new')
     ax3 = fig.add_subplot(1, 3, 3)
     ax3.plot(var)
     fig.set_tight_layout(True)
     plt.show()
示例#2
0
def test_writer_pixelformat_size_verbose(tmpdir):
    # Check that video pixel format and size get written as expected.
    need_internet()
    # Make sure verbose option works and that default pixelformat is yuv420p
    tmpf = tmpdir.join('test.mp4', fps=30)
    W = imageio.get_writer(str(tmpf), ffmpeg_log_level='debug')
    nframes = 4  # Number of frames in video
    for i in range(nframes):
        # Use size divisible by 16 or it gets changed.
        W.append_data(np.zeros((64, 64, 3), np.uint8))
    W.close()

    # Check that video is correct size & default output video pixel format
    # is correct
    W = imageio.get_reader(str(tmpf))
    assert len(W) == nframes
    assert "64x64" in W._stderr_catcher.header
    assert "yuv420p" in W._stderr_catcher.header

    # Now check that macroblock size gets turned off if requested
    W = imageio.get_writer(str(tmpf), macro_block_size=None,
                           ffmpeg_log_level='debug')
    for i in range(nframes):
        W.append_data(np.zeros((100, 106, 3), np.uint8))
    W.close()
    W = imageio.get_reader(str(tmpf))
    assert len(W) == nframes
    assert "106x100" in W._stderr_catcher.header
    assert "yuv420p" in W._stderr_catcher.header

    # Now double check values different than default work
    W = imageio.get_writer(str(tmpf), macro_block_size=4,
                           ffmpeg_log_level='debug')
    for i in range(nframes):
        W.append_data(np.zeros((64, 65, 3), np.uint8))
    W.close()
    W = imageio.get_reader(str(tmpf))
    assert len(W) == nframes
    assert "68x64" in W._stderr_catcher.header
    assert "yuv420p" in W._stderr_catcher.header

    # Now check that the macroblock works as expected for the default of 16
    W = imageio.get_writer(str(tmpf), ffmpeg_log_level='debug')
    for i in range(nframes):
        W.append_data(np.zeros((111, 140, 3), np.uint8))
    W.close()
    W = imageio.get_reader(str(tmpf))
    assert len(W) == nframes
    # Check for warning message with macroblock
    assert "144x112" in W._stderr_catcher.header
    assert "yuv420p" in W._stderr_catcher.header
示例#3
0
def test_writer_pixelformat_size_verbose(tmpdir):
    # Check that video pixel format and size get written as expected.

    # Make sure verbose option works and that default pixelformat is yuv420p
    tmpf = tmpdir.join("test.mp4")
    W = imageio.get_writer(str(tmpf), ffmpeg_log_level="warning")
    nframes = 4  # Number of frames in video
    for i in range(nframes):
        # Use size divisible by 16 or it gets changed.
        W.append_data(np.zeros((64, 64, 3), np.uint8))
    W.close()

    # Check that video is correct size & default output video pixel format
    # is correct
    W = imageio.get_reader(str(tmpf))
    assert W.count_frames() == nframes
    assert W._meta["size"] == (64, 64)
    assert "yuv420p" == W._meta["pix_fmt"]

    # Now check that macroblock size gets turned off if requested
    W = imageio.get_writer(str(tmpf), macro_block_size=1, ffmpeg_log_level="warning")
    for i in range(nframes):
        W.append_data(np.zeros((100, 106, 3), np.uint8))
    W.close()
    W = imageio.get_reader(str(tmpf))
    assert W.count_frames() == nframes
    assert W._meta["size"] == (106, 100)
    assert "yuv420p" == W._meta["pix_fmt"]

    # Now double check values different than default work
    W = imageio.get_writer(str(tmpf), macro_block_size=4, ffmpeg_log_level="warning")
    for i in range(nframes):
        W.append_data(np.zeros((64, 65, 3), np.uint8))
    W.close()
    W = imageio.get_reader(str(tmpf))
    assert W.count_frames() == nframes
    assert W._meta["size"] == (68, 64)
    assert "yuv420p" == W._meta["pix_fmt"]

    # Now check that the macroblock works as expected for the default of 16
    W = imageio.get_writer(str(tmpf), ffmpeg_log_level="debug")
    for i in range(nframes):
        W.append_data(np.zeros((111, 140, 3), np.uint8))
    W.close()
    W = imageio.get_reader(str(tmpf))
    assert W.count_frames() == nframes
    # Check for warning message with macroblock
    assert W._meta["size"] == (144, 112)
    assert "yuv420p" == W._meta["pix_fmt"]
示例#4
0
def writeVideo(filename, images):
    size = images[0].size
    writer = imageio.get_writer(filename, fps=30)
    for image in images:
        writer.append_data(np.asarray(image))
    writer.close()
    print("DONE")
示例#5
0
    def avi(self, path, filtered=True, override=False):
        """Exports filtered event images to an avi file

        Parameters
        ----------
        path: str
            Path to a .tsv file. The ending .tsv is added automatically.
        filtered: bool
            If set to `True`, only the filtered data (index in ds._filter)
            are used.
        override: bool
            If set to `True`, an existing file ``path`` will be overridden.
            If set to `False`, raises `OSError` if ``path`` exists.

        Notes
        -----
        Raises OSError if current dataset does not contain image data
        """
        path = pathlib.Path(path)
        ds = self.rtdc_ds
        # Make sure that path ends with .avi
        if path.suffix != ".avi":
            path = path.with_name(path.name + ".avi")
        # Check if file already exist
        if not override and path.exists():
            raise OSError("File already exists: {}\n".format(
                str(path).encode("ascii", "ignore")) +
                "Please use the `override=True` option.")
        # Start exporting
        if "image" in ds:
            # Open video for writing
            vout = imageio.get_writer(uri=path,
                                      format="FFMPEG",
                                      fps=25,
                                      codec="rawvideo",
                                      pixelformat="yuv420p",
                                      macro_block_size=None,
                                      ffmpeg_log_level="error")
            # write the filtered frames to avi file
            for evid in np.arange(len(ds)):
                # skip frames that were filtered out
                if filtered and not ds._filter[evid]:
                    continue
                try:
                    image = ds["image"][evid]
                except BaseException:
                    warnings.warn("Could not read image {}!".format(evid),
                                  NoImageWarning)
                    continue
                else:
                    if np.isnan(image[0, 0]):
                        # This is a nan-valued image
                        image = np.zeros_like(image, dtype=np.uint8)
                # Convert image to RGB
                image = image.reshape(image.shape[0], image.shape[1], 1)
                image = np.repeat(image, 3, axis=2)
                vout.append_data(image)
        else:
            msg = "No image data to export: dataset {} !".format(ds.title)
            raise OSError(msg)
示例#6
0
def image_gif_exporter(images, out_path, fps=30, loop=0, duration=None,
                       **kwargs):
    r"""
    Uses imageio to export the images to a GIF. Please see the imageio
    documentation for more information.

    Parameters
    ----------
    fps : `float`, optional
        The number of frames per second. If ``duration`` is not given, the
        duration for each frame is set to 1/fps.
    loop : `int`, optional
        The number of iterations. 0 means loop indefinitely
    duration : `float` or list of `float`, optional
        The duration (in seconds) of each frame. Either specify one value
        that is used for all frames, or one value for each frame.
    """
    import imageio

    writer = imageio.get_writer(str(out_path), mode='I', fps=fps,
                                loop=loop, duration=duration)

    for v in images:
        v = v.as_imageio()
        writer.append_data(v)
    writer.close()
示例#7
0
def test_series_unclosed():
    im1 = imageio.imread("imageio:chelsea.png")
    ims1 = [im1, im1 * 0.8, im1 * 0.5]

    fname = os.path.join(test_dir, "chelseam.bsdf")
    w = imageio.get_writer(fname)
    for im in ims1:
        w.append_data(im)
    w._close = lambda: None  # nope, leave stream open
    w.close()

    # read non-streaming, reads all frames on opening (but skips over blobs
    r = imageio.get_reader(fname)
    assert r.get_length() == 3  # not np.inf because not streaming

    # read streaming and get all
    r = imageio.get_reader(fname, random_access=False)
    assert r.get_length() == np.inf
    #
    ims2 = [im for im in r]
    assert len(ims2) == 3 and all(np.all(ims1[i] == ims2[i]) for i in range(3))

    # read streaming and read one
    r = imageio.get_reader(fname, random_access=False)
    assert r.get_length() == np.inf
    #
    assert np.all(ims1[2] == r.get_data(2))
示例#8
0
    def record_animation(self, name, n_pic=10, bgcolor=None):
        """Record an animated object and save as a *.gif file.

        Note that this method :

            * Can only be used with 3D objects.
            * Requires the python package imageio

        Parameters
        ----------
        name : string
            Name of the gif file (e.g 'myfile.gif')
        n_pic : int | 10
            Number of pictures to use to render the gif.
        bgcolor : string, tuple, list | None
            Background color.
        """
        import imageio
        writer = imageio.get_writer(name)
        canvas = self._get_parent(bgcolor, False, False)
        for k in range(n_pic):
            im = canvas.canvas.render()
            writer.append_data(im)
            self.camera.azimuth += 360. / n_pic
        writer.close()
示例#9
0
文件: video.py 项目: vestri/menpo
def image_gif_exporter(images, out_path, fps=30, loop=0, duration=None,
                       **kwargs):
    r"""
    Uses imageio to export the images to a GIF. Please see the imageio
    documentation for more information.

    Parameters
    ----------
    images : `list` of :map:`Image`
        List of Menpo images to export as a video.
    out_path : `Path`
        Path to save the video to.
    fps : `float`, optional
        The number of frames per second. If ``duration`` is not given, the
        duration for each frame is set to 1/fps.
    loop : `int`, optional
        The number of iterations. 0 means loop indefinitely
    duration : `float` or list of `float`, optional
        The duration (in seconds) of each frame. Either specify one value
        that is used for all frames, or one value for each frame.
    """
    import imageio

    writer = imageio.get_writer(str(out_path), mode='I', fps=fps,
                                loop=loop, duration=duration)

    for v in images:
        v = v.pixels_with_channels_at_back(out_dtype=np.uint8)
        writer.append_data(v)
    writer.close()
示例#10
0
	def generate_animation(self, output_suffix):
		if not _imageio_imported:
			return
		with imageio.get_writer('./visualize/'+self.file_prefix+"-"+output_suffix+".gif", mode='I', duration = 0.15) as writer:
			for image_path in self.images:
				image = imageio.imread(image_path)
				writer.append_data(image)
示例#11
0
def video_saving_IO(fileName, imgSequence):
    assert(fileName.split('.')[-1]=='avi')
    writer = imageio.get_writer(fileName)
    for image in imgSequence:
        writer.append_data(image)
    # Release everything if job is finished
    writer.close()
    print ('[*] Finish Saving {} at {}'.format(fileName, os.pardir.join([os.getcwd(),fileName])))
示例#12
0
def save_video(queue, filename, fps):
    writer = imageio.get_writer(filename, fps=fps)
    while True:
        frame = queue.get()
        if frame is None:
            break
        writer.append_data(frame)
    writer.close()
示例#13
0
 def __init__(self, path):
     """
     Args:
         path: The file path to save gif.
     """
     _check_imageio()
     if not path.endswith('.gif'):
         path += '.gif'
     self.writer = imageio.get_writer(path, mode='I', loop=1)
示例#14
0
def bounds2video(bounds_file,video_in, video_out, subsampleRate, speedup):
    """
        This function reads a .csv file with bound and creates a split video based on them.
    """

    # reading the bounds
    bounds = pd.read_csv(bounds_file, index_col = None)

    fig = plt.figure(figsize = (15,10))

    # setting up the video reader and writer
    vid_in = imageio.get_reader(video_in,'ffmpeg')
    vid_out = imageio.get_writer(video_out,fps = 30/subsampleRate*speedup)

    # create a binary indicator for where the scenes are
    frame_idx = np.arange(0,len(vid_in),subsampleRate)
    binary = np.zeros((len(frame_idx),))

    print(subsampleRate)

    for lb,ub in zip(bounds['LB'],bounds['UB']):
        binary[round(lb/subsampleRate*10):round(ub/subsampleRate*10)] = 1




    for i in range(len(frame_idx)):
    # for i in range(100):

        print(i)


        im = vid_in.get_data(frame_idx[i])
        plt.subplot(211)
        plt.imshow(im, aspect = 'equal')
        plt.axis('off')
        plt.title('Raw Video')


        plt.subplot(212)
        plt.imshow(im*int(binary[i]), aspect = 'equal')
        plt.axis('off')
        plt.title('Static Scenes')


        # convert the plot
        fig.canvas.draw()
        fig_data = np.fromstring(fig.canvas.tostring_rgb(),dtype = np.uint8, sep='')
        fig_data = fig_data.reshape(fig.canvas.get_width_height()[::-1] + (3,))

        # vid_out.append_data(fig_data.astype(np.dtype('uint8')))
        vid_out.append_data(fig_data)
        # vid_out.append_data(im)
        plt.clf()

    vid_in.close()
    vid_out.close()
def createGif (folder='figs', subfolder=""):
    f=folder + "/" + subfolder + "/"
    with imageio.get_writer(f+'movie.gif', mode='I') as writer:
        for file in os.listdir(f):
            if file.endswith(".png"):
                filename = f + str(file)
                print filename
                image = imageio.imread(filename)
                writer.append_data(image)
示例#16
0
 def setup_recording_params(self, save_name):
     """Initializes recording parameters for camera"""
     if self.cmr_type == FIREFLY_CAMERA:
         save_name = save_name.encode()
         self.fc_context.openAVI(save_name, 30, 1000000)
         self.fc_context.set_strobe_mode(3, True, 1, 0, 10)
     elif self.cmr_type == MINIMIC_CAMERA:
         self.mini_mic_video_writer = imageio.get_writer(save_name, mode='I', fps=30, codex='ffv1', quality=10,
                                                         pixelformat='yuv420p', macro_block_size=None,
                                                         ffmpeg_log_level='error')
示例#17
0
def test_writer_ffmpeg_params(tmpdir):
    need_internet()
    # Test optional ffmpeg_params with a valid option
    tmpf = tmpdir.join('test.mp4')
    W = imageio.get_writer(str(tmpf), ffmpeg_params=['-vf', 'scale=320:240'])
    for i in range(10):
        W.append_data(np.zeros((100, 100, 3), np.uint8))
    W.close()
    W = imageio.get_reader(str(tmpf))
    # Check that the optional argument scaling worked.
    assert "320x240" in W._stderr_catcher.header
示例#18
0
def convert_png2mp4(imgdir, filename, fps, delete_imgdir=False):
    dirname = os.path.dirname(filename)
    if not os.path.exists(dirname):
        os.makedirs(dirname)

    try:
        writer = imageio.get_writer(filename, fps=fps)
    except Exception:
        imageio.plugins.ffmpeg.download()
        writer = imageio.get_writer(filename, fps=fps)

    imgs = sorted(glob("{}/*.png".format(imgdir)))
    # print(imgs)
    for img in imgs:
        im = imageio.imread(img)
        writer.append_data(im)
    
    writer.close()
    
    if delete_imgdir: shutil.rmtree(imgdir)
示例#19
0
def test_writer_wmv(tmpdir):
    # WMV has different default codec, make sure it works.
    tmpf = tmpdir.join("test.wmv")
    W = imageio.get_writer(str(tmpf), ffmpeg_params=["-v", "info"])
    for i in range(10):
        W.append_data(np.zeros((100, 100, 3), np.uint8))
    W.close()

    W = imageio.get_reader(str(tmpf))
    # Check that default encoder is msmpeg4 for wmv
    assert W._meta["codec"].startswith("msmpeg4")
示例#20
0
 def make_mp4(ims, name="", fps=20, scale=1, extend=30):
     print("Making mp4...")
     ims += [ims[-1]] * extend
     with imageio.get_writer("{}.mp4".format(name), mode='I', fps=fps) as writer:
         for im in ims:
             if scale != 1:
                 new_shape = (int(im.shape[1] * scale), int(im.shape[0] * scale))
                 interpolation = cv2.INTER_CUBIC if scale > 1 else cv2.INTER_AREA
                 im = cv2.resize(im, new_shape, interpolation=interpolation)
             writer.append_data(im[..., ::-1])
     print("Done")
示例#21
0
def test_writer_ffmpeg_params(tmpdir):
    # Test optional ffmpeg_params with a valid option
    # Also putting in an image size that is not divisible by macroblock size
    # To check that the -vf scale overwrites what it does.
    tmpf = tmpdir.join("test.mp4")
    W = imageio.get_writer(str(tmpf), ffmpeg_params=["-vf", "scale=320:240"])
    for i in range(10):
        W.append_data(np.zeros((100, 100, 3), np.uint8))
    W.close()
    W = imageio.get_reader(str(tmpf))
    # Check that the optional argument scaling worked.
    assert W._meta["size"] == (320, 240)
示例#22
0
def test_writer_wmv(tmpdir):
    need_internet()
    # WMV has different default codec, make sure it works.
    tmpf = tmpdir.join('test.wmv')
    W = imageio.get_writer(str(tmpf), ffmpeg_params=['-v', 'info'])
    for i in range(10):
        W.append_data(np.zeros((100, 100, 3), np.uint8))
    W.close()

    W = imageio.get_reader(str(tmpf))
    # Check that default encoder is msmpeg4 for wmv
    assert "msmpeg4" in W._stderr_catcher.header
示例#23
0
文件: video.py 项目: dmaccarthy/sc8pr
 def encodef(fn, dest, fps=None, progress=None):
     "Convert an s8v file to using ffmpeg"
     vid = Video(fn, start=0, end=1)
     if fps is None: fps = vid.meta.get("frameRate")
     if fps is None: fps = 30
     i = 0
     with im.get_writer(dest, fps=fps) as writer:
         for img in Video._iter(fn):
             writer.append_data(ImageIO.frameData(img))
             if progress:
                 progress(i)
                 i += 1
示例#24
0
文件: video.py 项目: dmaccarthy/sc8pr
 def encodev(vid, dest, fps=None, progress=None):
     "Save a movie file from a Video instance"
     if isinstance(vid, Video): vid = vid.scaleFrames()
     i, n = 1, len(vid)
     if fps is None: fps = vid.meta.get("frameRate")
     if fps is None: fps = 30
     with im.get_writer(dest, fps=fps) as writer:
         for img in vid.frames():
             writer.append_data(ImageIO.frameData(img))
             if progress:
                 progress(i, n)
                 i += 1
示例#25
0
def test_writer_file_properly_closed(tmpdir):
    # Test to catch if file is correctly closed.
    # Otherwise it won't play in most players. This seems to occur on windows.
    tmpf = tmpdir.join("test.mp4")
    W = imageio.get_writer(str(tmpf))
    for i in range(12):
        W.append_data(np.zeros((100, 100, 3), np.uint8))
    W.close()
    W = imageio.get_reader(str(tmpf))
    # If Duration: N/A reported by ffmpeg, then the file was not
    # correctly closed.
    # This will cause the file to not be readable in many players.
    assert 1.1 < W._meta["duration"] < 1.3
    def convert_to_gif(inputpath, target_format):
        """Reference: http://imageio.readthedocs.io/en/latest/examples.html#convert-a-movie"""
        outputpath = os.path.splitext(inputpath)[0] + target_format

        reader = imageio.get_reader(inputpath)
        fps = reader.get_meta_data()['fps']

        writer = imageio.get_writer(outputpath, fps=fps)
        for i, im in enumerate(reader):
            sys.stdout.write("\rframe {0}".format(i))
            sys.stdout.flush()
            writer.append_data(im)
        writer.close()
def saveTimelapse(video_timelapse,filename):
    """
        saveTimelapse converts a video (list of frames) into an .mp4 video
    """
    import imageio

    imageio.plugins.ffmpeg.download()

    writer = imageio.get_writer(filename, fps=1)

    for im in video_timelapse:
        writer.append_data(im)
    writer.close()
示例#28
0
def test_writer_ffmpeg_params(tmpdir):
    need_internet()
    # Test optional ffmpeg_params with a valid option
    # Also putting in an image size that is not divisible by macroblock size
    # To check that the -vf scale overwrites what it does.
    tmpf = tmpdir.join('test.mp4')
    W = imageio.get_writer(str(tmpf), ffmpeg_params=['-vf', 'scale=320:240'])
    for i in range(10):
        W.append_data(np.zeros((100, 100, 3), np.uint8))
    W.close()
    W = imageio.get_reader(str(tmpf))
    # Check that the optional argument scaling worked.
    assert "320x240" in W._stderr_catcher.header
示例#29
0
def saveVideo_IO(vid, fileName, StartFrameID, EndFrameID, Label_ID=True):
	assert(fileName.split('.')[-1]=='avi')
	writer = imageio.get_writer(fileName)
	bar = Bar('Processing', max=(EndFrameID - StartFrameID)) 
	for i in range(StartFrameID, EndFrameID):
		img = vid.get_data(i)
		if Label_ID:
			cv2.rectangle(img,(0,0),(350,75),(0,0,0),-1)
			cv2.putText(img, 'FrameId({})'.format(str(i)), (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 255, 255), 4)
		writer.append_data(img) # Write out frame to video  
		bar.next()
	bar.finish()
	writer.close()
	print ('[*] Finish Saving {} at {}'.format(fileName, os.getcwd()))
示例#30
0
def test_reverse_read(tmpdir):
    # Ensure we can read a file in reverse without error.

    tmpf = tmpdir.join("test_vid.mp4")
    W = imageio.get_writer(str(tmpf))
    for i in range(120):
        W.append_data(np.zeros((64, 64, 3), np.uint8))
    W.close()

    W = imageio.get_reader(str(tmpf))
    for i in range(W.count_frames() - 1, 0, -1):
        print("reading", i)
        W.get_data(i)
    W.close()
        print('Training (iteration {0})...'.format(it + 1))
        train_loss = ppo_agent.train(replay_buffer.gather_all())
        step = ppo_agent.train_step_counter.numpy()
        print('Step = {0}: Loss = {1}'.format(step, train_loss.loss))
        # Save to checkpoint
        checkpointer.save(global_step)
        if it % eval_interval == 0:
            reward = compute_total_reward(eval_env, ppo_agent.policy)
            print('Step = {0}: Average reward = {1}'.format(step, reward))
            rewards.append([reward])
            # Save policy
            policy_saver.save(os.path.relpath('ppo_policy'))
            # View a video of the robot
            video_filename = 'ppo_minitaur_{0}.mp4'.format(video_num)
            print('Creating video...')
            writer = imageio.get_writer(video_filename, fps=30)
            ts = eval_env.reset()
            writer.append_data(eval_py_env.render())
            while not ts.is_last():
                ts = eval_env.step(ppo_agent.policy.action(ts).action)
                writer.append_data(eval_py_env.render())
            writer.close()
            # Show the video
            os.startfile(video_filename)
            # Increment counter
            video_num += 1

    # View the average reward over training time
    steps = range(0, num_iter + 1, eval_interval)
    plt.plot(steps, rewards)
    plt.ylabel('Average reward')
示例#32
0
import os
import imageio
import glob

## Make movie
writer = imageio.get_writer('fire_sa_10.mp4', fps=12)

path = '/Users/jordanwalker/Downloads/lance-modis-sa-name/'

filelist = glob.glob(os.path.join(path, '*.jpg'))
filenames = sorted(x for x in filelist)

for im in filenames:
    writer.append_data(imageio.imread(im))
writer.close()
def spectra_plot_diff(pair):
    pri_wl, pri_spec, pri_wl_uv, pri_spec_uv, sec_wl, sec_spec, sec_wl_uv, sec_spec_uv, pair = chooser(
        pair)
    print('sec spec uv', sec_spec_uv)
    fig, ax = plt.subplots(1, 1, figsize=(10, 10))
    i = 0
    start = pri_spec[0]
    start_uv = pri_spec_uv[0]
    while i < 1000:
        row = pri_spec[i]
        row_uv = pri_spec_uv[i]
        temp = []
        temp_uv = []
        out = []
        out_uv = []
        for num in row:
            f_num = float(num)
            temp.append(f_num)
        temp_uv = []
        for num_uv in row_uv:
            f_num_uv = float(num_uv)
            temp_uv.append(f_num_uv)
        fig, ax = plt.subplots(1, 1, figsize=(10, 10))
        zip_object = zip(start, temp)
        for a, b in zip_object:
            #           print((a-b))
            out.append(a / b)
        zip_object_uv = zip(start_uv, temp_uv)
        for a, b in zip_object_uv:
            #         print((a/b))
            out_uv.append(a / b)
        ax.plot(pri_wl, out)
        ax.plot(pri_wl_uv, out_uv)
        ax.set_xlim(0.1, 5)
        ax.set_ylim(0.8, 1.2)
        fig.savefig(
            '/gscratch/vsm/mwjl/projects/binary/scripts/scratch/wn_rowP_diff' +
            str(i) + str(pair) + '.png')
        i = i + 10

    data = []
    i = 0
    start_sec = sec_spec[0]
    start_sec_uv = sec_spec_uv[0]
    fig, ax = plt.subplots(1, 1, figsize=(10, 10))
    while i < 1000:
        row = sec_spec[i]
        row_uv = sec_spec_uv[i]
        temp = []
        temp_uv = []
        out = []
        out_uv = []
        for num in row:
            f_num = float(num)
            temp.append(f_num)
        for num in row_uv:
            f_num = float(num)
            temp_uv.append(f_num)
        fig, ax = plt.subplots(1, 1, figsize=(10, 10))
        zip_object = zip(start_sec, temp)
        for a, b in zip_object:
            #           print((a-b))
            out.append(a / b)


#        print('************* temp uv *********', np.shape(temp_uv))
        zip_object_uv = zip(start_sec_uv, temp_uv)
        for a, b in zip_object_uv:
            #           print('ab', a,b)
            out_uv.append(a / b)
        fig, ax = plt.subplots(1, 1, figsize=(10, 10))
        ax.plot(sec_wl, out)
        ax.plot(sec_wl_uv, out_uv)
        ax.set_xlim(0.1, 5)
        ax.set_ylim(0.8, 1.2)
        fig.savefig(
            '/gscratch/vsm/mwjl/projects/binary/scripts/scratch/wn_rowS_diff' +
            str(i) + str(pair) + '.png')
        i = i + 10
    nums = range(0, 1000, 10)
    inputs2 = []
    gif_path2 = '/gscratch/vsm/mwjl/projects/binary/plots/sec_star_diff' + str(
        pair) + '.gif'
    for i in nums:
        name = "/gscratch/vsm/mwjl/projects/binary/scripts/scratch/wn_rowS_diff" + str(
            i) + str(pair) + ".png"
        inputs2.append(name)
    plt.figure(figsize=(4, 4))
    with imageio.get_writer(gif_path2, mode='I') as writer:
        for i in range(len(inputs2)):
            writer.append_data(imageio.imread(inputs2[i].format(i=i)))

    inputs1 = []
    gif_path1 = '/gscratch/vsm/mwjl/projects/binary/plots/pri_star_diff' + str(
        pair) + '.gif'
    for i in nums:
        name = "/gscratch/vsm/mwjl/projects/binary/scripts/scratch/wn_rowP_diff" + str(
            i) + str(pair) + ".png"
        inputs1.append(name)
    plt.figure(figsize=(4, 4))
    with imageio.get_writer(gif_path1, mode='I') as writer:
        for i in range(len(inputs1)):
            writer.append_data(imageio.imread(inputs1[i].format(i=i)))
matplotlib.rcParams["backend"] = "Agg"

from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image

# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops

import imageio

reader = imageio.get_reader('video/1.mp4')
fps = reader.get_meta_data()['fps']
writer = imageio.get_writer('video/output2.mp4', fps=fps)

# ## Object detection imports
# Here are the imports from the object detection module.

from utils import label_map_util

from utils import visualization_utils as vis_util

# # Model preparation

# What model to download.
MODEL_NAME = 'faster_rcnn_resnet101_coco_2018_01_28'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
示例#35
0
def run(env_name,
        agent_type,
        root_dir="result_dir_",
        trial_num=None,
        n_step=25,
        exploration_min=0.1,
        parameters=Parameters()):

    logging.set_verbosity(logging.INFO)

    tf.compat.v1.enable_v2_behavior()
    #tf.enable_eager_execution()

    ### Params ###

    result_dir = root_dir + agent_type + "_" + (
        trial_num
        if trial_num is not None else datetime.today().strftime('%Y-%m-%d'))
    summary_interval = parameters.summary_interval
    conv_layer_params = parameters.conv_layer_params
    fc_layer_params = parameters.fc_layer_params
    target_update_period = parameters.target_update_period
    exploration_min = exploration_min
    replay_buffer_capacity = parameters.replay_buffer_capacity
    target_update_tau = parameters.target_update_tau
    collect_episodes_per_iteration = parameters.collect_episodes_per_iteration
    num_parallel_environments = parameters.num_parallel_environments
    use_tf_functions = parameters.use_tf_functions
    initial_collect_episodes = parameters.initial_collect_episodes
    log_interval = parameters.log_interval
    checkpoint_interval = parameters.checkpoint_interval

    ### TensorBoard summary settings ###

    train_summary_writer = tf.compat.v2.summary.create_file_writer(
        result_dir, flush_millis=10000)
    train_summary_writer.set_as_default()

    global_step = tf.compat.v1.train.get_or_create_global_step()

    with tf.compat.v2.summary.record_if(
            lambda: tf.math.equal(global_step % summary_interval, 0)):

        ### Training Environment setup ###

        train_env = tf_py_environment.TFPyEnvironment(
            parallel_py_environment.ParallelPyEnvironment([
                lambda: suite_atari.
                load(env_name,
                     max_episode_steps=50000,
                     gym_env_wrappers=suite_atari.
                     DEFAULT_ATARI_GYM_WRAPPERS_WITH_STACKING)
            ] * num_parallel_environments))

        eval_py_env = suite_atari.load(
            env_name,
            max_episode_steps=50000,
            gym_env_wrappers=suite_atari.
            DEFAULT_ATARI_GYM_WRAPPERS_WITH_STACKING)
        eval_env = tf_py_environment.TFPyEnvironment(eval_py_env)
        environment_episode_metric = tf_metrics.NumberOfEpisodes()
        step_metrics = [
            tf_metrics.EnvironmentSteps(),
            environment_episode_metric,
        ]

        ### Agent specific setup ##

        if agent_type == 'ddqn':

            #Epsilon decay
            epsilon = tf.compat.v1.train.polynomial_decay(
                learning_rate=1.0,
                global_step=global_step,
                decay_steps=10000,  #5000 for experiment
                end_learning_rate=exploration_min)

            epsilon_metric = EpsilonMetric(epsilon=epsilon, name="Epsilon")

            agent = Ddq_Agent(convolutional_layers=conv_layer_params,
                              target_update_tau=target_update_tau,
                              target_update_period=target_update_period,
                              fully_connected_layers=fc_layer_params,
                              tf_env=train_env,
                              n_step_update=n_step,
                              global_step=global_step,
                              epsilon_greedy=epsilon)
            # Metrics for Tensorboard
            train_metrics = step_metrics + [
                tf_metrics.AverageReturnMetric(),
                tf_metrics.AverageEpisodeLengthMetric(), epsilon_metric
            ]
        elif agent_type == 'ppo':

            agent = Ppo_Agent(convolutional_layers=conv_layer_params,
                              fully_connected_layers=fc_layer_params,
                              tf_env=train_env,
                              global_step=global_step,
                              entropy_regularization=exploration_min)
            # Metrics for Tensorboard
            train_metrics = step_metrics + [
                tf_metrics.AverageReturnMetric(),
                tf_metrics.AverageEpisodeLengthMetric()
            ]
        else:
            raise ValueError('No appropriate agent found')

        eval_metrics = [
            tf_metrics.AverageReturnMetric(),
            tf_metrics.AverageEpisodeLengthMetric()
        ]

        agent.initialize()

        print("agent initialized")

        # Define policy - eval will choose optimal steps, collect is for training and has exploration
        eval_policy = agent.policy
        collect_policy = agent.collect_policy

        # Define the buffer

        replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
            agent.collect_data_spec,
            batch_size=num_parallel_environments,
            max_length=replay_buffer_capacity)

        # Create the driver (the object that uses the policy to interact
        # with the Environment and generates data to train with)

        collect_driver = dynamic_episode_driver.DynamicEpisodeDriver(
            train_env,
            collect_policy,
            observers=[replay_buffer.add_batch] + train_metrics,
            num_episodes=collect_episodes_per_iteration)
        eval_driver = dynamic_episode_driver.DynamicEpisodeDriver(
            eval_env, eval_policy, observers=eval_metrics, num_episodes=10)

        # Checkpoints for model and data saving

        train_checkpointer = common.Checkpointer(
            ckpt_dir=result_dir,
            agent=agent,
            global_step=global_step,
            metrics=metric_utils.MetricsGroup(train_metrics, 'train_metrics'))

        policy_checkpointer = common.Checkpointer(ckpt_dir=os.path.join(
            result_dir, 'policy'),
                                                  policy=eval_policy,
                                                  global_step=global_step)

        train_checkpointer.initialize_or_restore()
        policy_checkpointer.initialize_or_restore()

        if use_tf_functions:
            # To speed up collect use common.function.
            collect_driver.run = common.function(collect_driver.run)
            agent.train = common.function(agent.train)
            eval_driver.run = common.function(collect_driver.run)

        ### Data collection run for off-policy agents (currently supporting DDQN
        if agent_type == 'ddqn':

            # Collect initial replay data.
            logging.info(
                'Initializing replay buffer by collecting experience for %d steps with '
                'a random policy.', initial_collect_episodes)
            #might need to change for ppo
            initial_collect_driver = dynamic_episode_driver.DynamicEpisodeDriver(
                train_env,
                agent.collect_policy,
                observers=[replay_buffer.add_batch],
                num_episodes=initial_collect_episodes)

            initial_collect_driver.run()

        if agent_type == 'ddqn':
            time_step = None
            policy_state = collect_policy.get_initial_state(
                train_env.batch_size)

            timed_at_step = global_step.numpy()
            time_acc = 0

            dataset = replay_buffer.as_dataset(num_parallel_calls=3,
                                               sample_batch_size=30,
                                               num_steps=n_step +
                                               1).prefetch(3)
            iterator = iter(dataset)
            end_time = time.time() + (12 * 60 * 60)
            while time.time() < end_time:
                start_time = time.time()

                time_step, policy_state = collect_driver.run(
                    time_step=time_step,
                    policy_state=policy_state,
                )
                experience, _ = next(iterator)
                train_loss = agent.train(experience)
                time_acc += time.time() - start_time

                if global_step.numpy() % log_interval == 0:
                    logging.info('step = %d, loss = %f', global_step.numpy(),
                                 train_loss.loss)
                    steps_per_sec = (global_step.numpy() -
                                     timed_at_step) / time_acc
                    logging.info('%.3f steps/sec', steps_per_sec)
                    tf.compat.v2.summary.scalar(name='global_steps_per_sec',
                                                data=steps_per_sec,
                                                step=global_step)
                    timed_at_step = global_step.numpy()
                    time_acc = 0

                for train_metric in train_metrics:
                    train_metric.tf_summaries(train_step=global_step,
                                              step_metrics=step_metrics)

                if global_step.numpy() % 2000 == 0:
                    train_checkpointer.save(global_step=global_step.numpy())

                if global_step.numpy() % 2000 == 0:
                    policy_checkpointer.save(global_step=global_step.numpy())

        elif agent_type == 'ppo':
            collect_driver.run = common.function(collect_driver.run,
                                                 autograph=False)
            #eval_driver = common.function(eval_driver.run, autograph=False)
            agent.train = common.function(agent.train, autograph=False)
            collect_time = 0
            train_time = 0
            timed_at_step = global_step.numpy()
            end_time = time.time() + (12 * 60 * 60)
            while time.time() < end_time:
                global_step_val = global_step.numpy()
                start_time = time.time()
                collect_driver.run()
                collect_time += time.time() - start_time

                start_time = time.time()
                trajectories = replay_buffer.gather_all()
                total_loss, _ = agent.train(experience=trajectories)
                replay_buffer.clear()
                train_time += time.time() - start_time

                for train_metric in train_metrics:
                    train_metric.tf_summaries(train_step=global_step,
                                              step_metrics=step_metrics)

                if global_step_val % log_interval == 0:
                    logging.info('step = %d, loss = %f', global_step_val,
                                 total_loss)
                    steps_per_sec = ((global_step_val - timed_at_step) /
                                     (collect_time + train_time))
                    logging.info('%.3f steps/sec', steps_per_sec)
                    logging.info('collect_time = {}, train_time = {}'.format(
                        collect_time, train_time))
                    with tf.compat.v2.summary.record_if(True):
                        tf.compat.v2.summary.scalar(
                            name='global_steps_per_sec',
                            data=steps_per_sec,
                            step=global_step)

                    timed_at_step = global_step_val
                    collect_time = 0
                    train_time = 0
                if global_step.numpy() % checkpoint_interval == 0:
                    train_checkpointer.save(global_step=global_step.numpy())

                if global_step.numpy() % checkpoint_interval == 0:
                    policy_checkpointer.save(global_step=global_step.numpy())

        train_checkpointer.save(global_step=global_step.numpy())
        policy_checkpointer.save(global_step=global_step.numpy())
        num_episodes = 3
        video_filename = result_dir + '/finalVid_ppo_0_1.mp4'
        with imageio.get_writer(video_filename, fps=60) as video:
            for _ in range(num_episodes):
                time_step = eval_py_env.reset()
                video.append_data(eval_py_env.render())
                counter = 5
                while counter > 0:
                    action_step = agent.policy.action(time_step)
                    time_step = eval_py_env.step(action_step.action)
                    viddata = eval_py_env.render()
                    video.append_data(viddata)
                    video.append_data(viddata)
                    video.append_data(viddata)
                    video.append_data(viddata)
                    video.append_data(viddata)
                    if time_step.is_last():
                        eval_py_env.step([1])
                        counter -= 1
samples = []
samples_original = []
trainingDataFolder = "../recorded_training_data-2/"
with open(trainingDataFolder + '/driving_log.csv') as csvfile:
    reader = csv.reader(csvfile, 1)
    for line in reader:
        samples.append(line)
        samples_original.append(line)
        # adding
        if abs(float(line[3])) > 0.1:
            samples.append(line)

# Avoid running the gif creation each time
if 0:
    with imageio.get_writer('./training_movie.gif', mode='I') as writer:
        for filenameIdx in range(0, len(samples), 10):
            filename = samples_original[filenameIdx][0]
            image = imageio.imread(filename)
            writer.append_data(image)

# Plots stats of training steering angle
plt.figure(figsize=(12, 3))
plt.subplot(1, 3, 1)
showDrivingAngles(samples_original,
                  aug='original',
                  title="\ndata set with center images only")
plt.subplot(1, 3, 2)
showDrivingAngles(samples_original,
                  aug='left_right',
                  title="\ndata set with left-right images")
def create_plot_gif(wrist_file=None, ankle_file=None, start_time=None, stop_time=None,
                    sample_rate=75, plot_period_ms=100, wrist_obj=None, ankle_obj=None,
                    output_dir=None,
                    slide_window=False, remove_gravity=False, remove_high_f=False, remove_dc=True):

    print("\nImporting data...")

    if wrist_obj is None:
        if "csv" in wrist_file:
            lw = pd.read_csv(wrist_file, skiprows=100)
        if "edf" in wrist_file:
            d = ImportEDF.GENEActiv(filepath=wrist_file, load_raw=True)

            d.sample_rate = 50

            lw = pd.DataFrame(list(zip(d.timestamps, d.x, d.y, d.z, [None for i in range(len(d.timestamps))],
                              [None for i in range(len(d.timestamps))], [None for i in range(len(d.timestamps))])))

        lw.columns = ["Timestamp", "x", "y", "z", "light", 'button', 'temperature']
        lw["Timestamp"] = pd.to_datetime(lw["Timestamp"], format="%Y-%m-%d %H:%M:%S:%f")

    if start_time is not None and stop_time is not None:
        lw = lw.loc[(lw["Timestamp"] >= pd.to_datetime(start_time)) &
                    (lw["Timestamp"] < pd.to_datetime(stop_time))]

    if ankle_obj is None:
        if "csv" in ankle_file:
            la = pd.read_csv(ankle_file, skiprows=100)
        if "edf" in ankle_file:
            d = ImportEDF.GENEActiv(filepath=ankle_file, load_raw=True)

            d.sample_rate = 50

            la = pd.DataFrame(list(zip(d.timestamps, d.x, d.y, d.z, [None for i in range(len(d.timestamps))],
                                       [None for i in range(len(d.timestamps))],
                                       [None for i in range(len(d.timestamps))])))

        la.columns = ["Timestamp", "x", "y", "z", "light", 'button', 'temperature']
        la["Timestamp"] = pd.to_datetime(la["Timestamp"], format="%Y-%m-%d %H:%M:%S:%f")

    if start_time is not None and stop_time is not None:
        la = la.loc[(la["Timestamp"] >= pd.to_datetime(start_time)) &
                    (la["Timestamp"] < pd.to_datetime(stop_time))]

    filenames = []

    # Converts cropped data to list
    time = [i / sample_rate for i in range(lw.shape[0])]
    lw_x = [i for i in lw["x"]]
    lw_y = [i for i in lw["y"]]
    lw_z = [i for i in lw["z"]]
    la_x = [i for i in la["x"]]
    la_y = [i for i in la["y"]]
    la_z = [i for i in la["z"]]

    if remove_gravity:

        print("-Filtering data to remove gravity...")

        lw_x = filter_signal(data=lw_x, filter_type="highpass", high_f=0.1, filter_order=2, sample_f=sample_rate)
        lw_y = filter_signal(data=lw_y, filter_type="highpass", high_f=0.1, filter_order=2, sample_f=sample_rate)
        lw_z = filter_signal(data=lw_z, filter_type="highpass", high_f=0.1, filter_order=2, sample_f=sample_rate)
        la_x = filter_signal(data=la_x, filter_type="highpass", high_f=0.1, filter_order=2, sample_f=sample_rate)
        la_y = filter_signal(data=la_y, filter_type="highpass", high_f=0.1, filter_order=2, sample_f=sample_rate)
        la_z = filter_signal(data=la_z, filter_type="highpass", high_f=0.1, filter_order=2, sample_f=sample_rate)

    if remove_high_f:

        print("-Filtering data to remove high frequency...")

        lw_x = filter_signal(data=lw_x, filter_type="lowpass", low_f=5, filter_order=2, sample_f=sample_rate)
        lw_y = filter_signal(data=lw_y, filter_type="lowpass", low_f=5, filter_order=2, sample_f=sample_rate)
        lw_z = filter_signal(data=lw_z, filter_type="lowpass", low_f=5, filter_order=2, sample_f=sample_rate)
        la_x = filter_signal(data=la_x, filter_type="lowpass", low_f=5, filter_order=2, sample_f=sample_rate)
        la_y = filter_signal(data=la_y, filter_type="lowpass", low_f=5, filter_order=2, sample_f=sample_rate)
        la_z = filter_signal(data=la_z, filter_type="lowpass", low_f=5, filter_order=2, sample_f=sample_rate)

    if remove_dc:
        print("\n-Removing DC component from signal...")

        lw_x = [i - np.mean(lw_x) for i in lw_x]
        lw_y = [i - np.mean(lw_y) for i in lw_y]
        lw_z = [i - np.mean(lw_z) for i in lw_z]
        la_x = [i - np.mean(la_x) for i in la_x]
        la_y = [i - np.mean(la_y) for i in la_y]
        la_z = [i - np.mean(la_z) for i in la_z]

    min_x = min([min(lw_x), min(la_x)])
    min_y = min([min(lw_y), min(la_y)])
    min_z = min([min(lw_z), min(la_z)])

    max_x = max([max(lw_x), max(la_x)])
    max_y = max([max(lw_y), max(la_y)])
    max_z = max([max(lw_z), max(la_z)])

    min_all = min([min_x, min_y, min_z])
    max_all = max([max_x, max_y, max_z])

    plot_rate = int(np.ceil(plot_period_ms / (1000 / sample_rate)))
    if plot_rate == 0:
        plot_rate = 1

    print("\n-Data will be plotted in {}ms increments...\n".format(plot_period_ms))

    for i in range(0, lw.shape[0], plot_rate):

        print("-Generating plot {} of {}...".format(int((i/plot_rate))+1, int(len(range(0, lw.shape[0], plot_rate)))))

        fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 6))
        plt.subplots_adjust(right=.75, left=.07, hspace=.3)

        ax1.plot(time[:i], lw_x[:i], color='black')
        ax1.plot(time[:i], lw_y[:i], color='red')
        ax1.plot(time[:i], lw_z[:i], color='dodgerblue')
        ax1.axvline(time[i], color='limegreen')

        ax2.plot(time[:i], la_x[:i], color='black')
        ax2.plot(time[:i], la_y[:i], color='red')
        ax2.plot(time[:i], la_z[:i], color='dodgerblue')
        ax2.axvline(time[i], color='limegreen')

        ax1.set_ylim(min_all - .5, max_all + .5)
        ax2.set_ylim(min_all - .5, max_all + .5)

        if not slide_window:
            ax1.set_xlim(0, len(lw_x)/sample_rate)
            ax2.set_xlim(0, len(la_x)/sample_rate)

        if slide_window:
            if time[i] <= 12.5:
                ax1.set_xlim(0, 15)
                ax2.set_xlim(0, 15)
            if time[i] > 12.5:
                ax1.set_xlim(time[i]-7.5, time[i]+7.5)
                ax2.set_xlim(time[i]-7.5, time[i]+7.5)

        ax2.set_xlabel("Time (seconds)")
        ax1.set_ylabel("Acceleration")
        ax2.set_ylabel("Acceleration")
        ax1.set_title("Left Wrist")
        ax2.set_title("Left Ankle")
        ax1.set_ylabel("Acceleration")

        # create file name and append it to a list
        filename = f'{i}.png'
        filenames.append(filename)

        plt.savefig(output_dir + filename)
        plt.close()

    # build gif
    print("\nCombining images into gif...")
    with imageio.get_writer(output_dir + "Output.gif", mode='I') as writer:
        for filename in filenames:
            image = imageio.imread(output_dir + filename)
            writer.append_data(image)

    # Remove files
    for filename in set(filenames):
        os.remove(output_dir + filename)

    print("\nComplete.")

    return lw, la
示例#38
0
    video_use_gif = False

    im = cv2.imread('test_img1.jpg', 1)
    t_im = torch.from_numpy(im).cuda().permute(2, 0, 1).float()[None] / 255.

    if out_test_video:
        if video_use_gif:
            fps = 0.5
            out_wh = (im.shape[1]//2, im.shape[0]//2)
            suffix = '.gif'
        else:
            fps = 5
            out_wh = (im.shape[1], im.shape[0])
            suffix = '.mkv'
        video_last_time = time.perf_counter()
        video = imageio.get_writer('ssim_test'+suffix, fps=fps)

    # 测试ssim
    print('Training SSIM')
    rand_im = torch.randint_like(t_im, 0, 255, dtype=torch.float32) / 255.
    rand_im.requires_grad = True
    optim = torch.optim.Adam([rand_im], 0.003, eps=1e-8)
    losser = SSIM(data_range=1., channel=t_im.shape[1]).cuda()
    ssim_score = 0
    while ssim_score < 0.999:
        optim.zero_grad()
        loss = losser(rand_im, t_im)
        (-loss).sum().backward()
        ssim_score = loss.item()
        optim.step()
        r_im = np.transpose(rand_im.detach().cpu().numpy().clip(0, 1) * 255, [0, 2, 3, 1]).astype(np.uint8)[0]
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 22 12:33:34 2019

@author: lambm
"""
import imageio
import pathlib

images_folder = pathlib.Path("images/")
filenames = [f for f in images_folder.glob('*.*')]

with imageio.get_writer('animation_25.mp4', mode='I', fps=25) as writer:
    for i in range(0, len(filenames)):
        image_path = pathlib.Path("images/shape_{}.png".format(i))
        image = imageio.imread(image_path)
        writer.append_data(image)
        print(i)
        sns.lineplot(data=result, color="#8FCACA", ax=axs[0])
        sns.lineplot(
            data=[
                con.model.predict(np.array([con.state]))[0],
                tf.nn.softmax(con.actions),
            ],
            palette={
                0: "#8FCACA",
                1: "#FFAEA5"
            },
            dashes={
                0: "",
                1: ""
            },
            ax=axs[1],
        )
        files.append(f"./nArmedBandits/results/{epoch}.png")
        plt.savefig(files[-1])
        plt.close()

    with imageio.get_writer("./nArmedBandits/results.gif", mode="I") as writer:
        for file in files:
            image = imageio.imread(file)
            writer.append_data(image)
        for _ in range(36):
            image = imageio.imread(files[-1])
            writer.append_data(image)

    for file in set(files):
        os.remove(file)
示例#41
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i',
                        '--filename_input',
                        type=str,
                        default=os.path.join(data_dir, 'teapot.obj'))
    parser.add_argument('-o',
                        '--filename_output',
                        type=str,
                        default=os.path.join(data_dir, 'example1.gif'))
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()

    # other settings
    camera_distance = 2.732
    elevation = 30
    texture_size = 2

    # load .obj
    vertices, faces = nr.load_obj(args.filename_input)
    vertices = vertices[
        None, :, :]  # [num_vertices, XYZ] -> [batch_size=1, num_vertices, XYZ]
    faces = faces[None, :, :]  # [num_faces, 3] -> [batch_size=1, num_faces, 3]

    # create texture [batch_size=1, num_faces, texture_size, texture_size, texture_size, RGB]
    textures = torch.ones(1,
                          faces.shape[1],
                          texture_size,
                          texture_size,
                          texture_size,
                          3,
                          dtype=torch.float32).cuda()

    # to gpu

    # create renderer
    renderer = nr.Renderer(camera_mode='look_at')

    # draw object
    loop = tqdm.tqdm(range(0, 360, 4))
    writer = imageio.get_writer(args.filename_output, mode='I')
    coords = []
    for num, azimuth in enumerate(loop):
        loop.set_description('Drawing')
        eye = nr.get_points_from_angles(camera_distance, elevation, azimuth)
        eye = (eye[0], eye[1], 2.0 * eye[2])
        renderer.eye = eye

        print(f"\nangle: {renderer.eye}\n")
        coords.append(renderer.eye)
        images, _, _ = renderer(
            vertices, faces,
            textures)  # [batch_size, RGB, image_size, image_size]
        image = images.detach().cpu().numpy()[0].transpose(
            (1, 2, 0))  # [image_size, image_size, RGB]
        writer.append_data((255 * image).astype(np.uint8))
    coords_np = np.array(coords)
    np.set_printoptions(suppress=True)
    print(coords_np)

    ax = plt.axes(projection='3d')
    ax.scatter3D(*coords_np.transpose())
    ax.set_box_aspect([1, 1, 1])
    set_axes_equal(ax)
    show_3d_axes_rgb(ax)
    plt.show()
    writer.close()
示例#42
0
X = np.arange(0, 26, 1)
Y = np.arange(0, 26, 1)
X, Y = np.meshgrid(X, Y)

fig = plt.figure()
ax = Axes3D(fig)
ax.view_init(25, 30)

for i in range(0, 60):
    Z = genfromtxt('csv/%s.csv' % (i * 10), delimiter=',')

    my_cmap = cm.coolwarm
    my_cmap.set_under('green', 0)
    ax.plot_surface(X, Y, Z, cmap=my_cmap, linewidth=0, vmin=0.2)
    plt.title('Анисимов (%s сек.)' % i)
    ax.set_zlabel('температура')
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set(zlim=(0, 2000))
    filename = 'frames/' + str(i) + '.png'
    plt.savefig(filename, dpi=100)
    plt.cla()
    plt.gca()

with imageio.get_writer('graph_2.gif', mode='I') as writer:
    for i in range(0, 60):
        image = imageio.imread('frames/' + str(i) + '.png')
        os.remove('frames/' + str(i) + '.png')
        writer.append_data(image)
def imagearray2file(img_array, outpath=None, fps=30):
    '''
    :param nparray: RxCxTxwidthxheightx3
    :param outpath: the directory where T images will be dumped for each time point in range T
    :param fps: fps of the gif file
    :return:
        it will return an image list with length T
        if outpath is given as a png file, an image will be saved for each t in T.
        if outpath is given as a gif file, an animated image with T frames will be created.
    '''
    import cv2
    from human_body_prior.tools.omni_tools import makepath

    if outpath is not None:
        makepath(outpath, isfile=True)

    if not isinstance(img_array, np.ndarray) or img_array.ndim < 6:
        raise ValueError('img_array should be a numpy array of shape RxCxTxwidthxheightx3')

    R, C, T, img_h, img_w, img_c = img_array.shape

    out_images = []
    for tIdx in range(T):
        row_images = []
        for rIdx in range(R):
            col_images = []
            for cIdx in range(C):
                col_images.append(img_array[rIdx, cIdx, tIdx])
            row_images.append(np.hstack(col_images))
        t_image = np.vstack(row_images)
        out_images.append(t_image)

    if outpath is not None:
        if '.png' in outpath:
            for tIdx in range(T):
                if T > 1:
                    cur_outpath = outpath.replace('.png', '_%03d.png'%tIdx)
                else:
                    cur_outpath = outpath
                cv2.imwrite(cur_outpath, out_images[tIdx])
                while not os.path.exists(cur_outpath): continue  # wait until the snapshot is written to the disk
        elif '.gif' in outpath:
            import imageio
            with imageio.get_writer(outpath, mode='I', fps = fps) as writer:
                for tIdx in range(T):
                    img = out_images[tIdx].astype(np.uint8)
                    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                    writer.append_data(img)
        elif '.avi' in outpath:
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            video = cv2.VideoWriter(outpath, fourcc, fps, (img_w, img_h), True)
            for tIdx in range(T):
                img = out_images[tIdx].astype(np.uint8)
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                video.write(img)

            video.release()
            cv2.destroyAllWindows()
        elif '.mp4' in outpath:

            from moviepy.editor import ImageSequenceClip
            animation = ImageSequenceClip(out_images, fps=fps)
            animation.write_videofile(outpath)

    return out_images
# Load the previously trained weights
net.load_state_dict(
    torch.load('ssd300_mAP_77.43_v2.pth',
               map_location=lambda storage, loc: storage)
)  # We get the weights of the neural network from another one that is pretrained (ssd300_mAP_77.43_v2.pth).
# Creating the transforms
# 1st parameter: the figure size taken into the neural network
# 2nd parameter: the color range
transform = BaseTransform(net.size, (104 / 256.0, 117 / 256.0, 123 / 256.0))

# Doing some Object Detection on a video

#load the video
reader = imageio.get_reader('funny_dog.mp4')
#get the frame rate parameter
fps = reader.get_meta_data()['fps']

# output the video with detections
writer = imageio.get_writer('output.mp4', fps=fps)
for i, frame in enumerate(reader):
    # net.eval() is the function we want to pass into the detec function
    frame = detect(frame, net.eval(), transform)
    # now frame has the detection rectangles

    # To append a frame to the output video
    writer.append_data(frame)
    print(i)
# close this output video
writer.close()
img = plt.imshow(tubes, interpolation='nearest')
plt.axis('off')

img.set_cmap('hot_r')
plt.savefig("visual.png", bbox_inches='tight')

for low in lows:
    flood_recursive(tubes[:], low[0], low[1])

img = plt.imshow(tubes, interpolation='nearest')
img.set_cmap('hot')

i = (i // frameRate) + 1

for j in range(i, (11 * i) // 10):
    '''
    img = plt.imshow(tubes, interpolation='nearest')
    img.set_cmap('hot')
    plt.savefig(f"day9_visual/visual{j}.png", bbox_inches='tight')
    '''

filenames = []

for i in range(j + 1):
    filenames.append(f"day9_visual/visual{i}.png")

with imageio.get_writer('visual.gif', mode='I') as writer:
    for filename in filenames:
        image = imageio.imread(filename)
        writer.append_data(image)
示例#46
0
def main():
    args = parse_arguments()

    ###########################
    # Load mesh
    ###########################

    mesh = TriangleMesh.from_obj(args.mesh)
    vertices = mesh.vertices.cuda()
    faces = mesh.faces.int().cuda()

    # Expand such that batch size = 1

    vertices = vertices.unsqueeze(0)

    ###########################
    # Normalize mesh position
    ###########################

    vertices_max = vertices.max()
    vertices_min = vertices.min()
    vertices_middle = (vertices_max + vertices_min) / 2.
    vertices = (vertices - vertices_middle) * MESH_SIZE

    ###########################
    # Generate vertex color
    ###########################

    if not args.use_texture:
        vert_min = torch.min(vertices, dim=1, keepdims=True)[0]
        vert_max = torch.max(vertices, dim=1, keepdims=True)[0]
        colors = (vertices - vert_min) / (vert_max - vert_min)

    ###########################
    # Generate texture mapping
    ###########################

    if args.use_texture:
        uv = get_spherical_coords_x(vertices[0].cpu().numpy())
        uv = torch.from_numpy(uv).cuda()

        # Expand such that batch size = 1
        uv = uv.unsqueeze(0)

    ###########################
    # Load texture
    ###########################

    if args.use_texture:
        # Load image as numpy array
        texture = np.array(Image.open(args.texture))

        # Convert numpy array to PyTorch tensor
        texture = torch.from_numpy(texture).cuda()

        # Convert from [0, 255] to [0, 1]
        texture = texture.float() / 255.0

        # Convert to NxCxHxW layout
        texture = texture.permute(2, 0, 1).unsqueeze(0)

    ###########################
    # Render
    ###########################

    if args.use_texture:
        renderer_mode = 'Lambertian'

    else:
        renderer_mode = 'VertexColor'

    renderer = Renderer(HEIGHT, WIDTH, mode=renderer_mode)

    loop = tqdm.tqdm(list(range(0, 360, 4)))
    loop.set_description('Drawing')

    os.makedirs(args.output_path, exist_ok=True)
    writer = imageio.get_writer(os.path.join(args.output_path, 'example.gif'), mode='I')
    for azimuth in loop:
        renderer.set_look_at_parameters([90 - azimuth],
                                        [CAMERA_ELEVATION],
                                        [CAMERA_DISTANCE])

        if args.use_texture:
            predictions, _, _ = renderer(points=[vertices, faces.long()],
                                         uv_bxpx2=uv,
                                         texture_bx3xthxtw=texture)

        else:
            predictions, _, _ = renderer(points=[vertices, faces.long()],
                                         colors_bxpx3=colors)

        image = predictions.detach().cpu().numpy()[0]
        writer.append_data((image * 255).astype(np.uint8))

    writer.close()
示例#47
0
def build_gif(
    df,
    x,
    y,
    savedir,
    x_label='',
    y_label='',
    speed_double=False,
    speed_test=False,
    limits_xy=None,
    savefig_args=dict(dpi=100,
                      bbox_inches='tight',
                      pad_inches=0.05,
                      format='png'),
):
    """Create GIF animations.

    Uses a DataFrame containing the "history" of an optimization run.
    """
    import imageio
    import io
    import shutil

    if limits_xy is None:
        limits_xy = (min(df[x]), max(df[x]), min(df[y]), max(df[y]))

    with imageio.get_writer(os.path.join(savedir, '1_contour.gif'),
                            mode='I') as writer:
        for i in range(len(df)):
            if logger.isEnabledFor(logging.INFO):
                print(' GIF', i, len(df), end='\r')
            if i < 3:
                continue
            if speed_test:
                if i > 15:
                    break
            if speed_double:
                if not i % 2:
                    continue
            fig, ax = plot_contour(df.loc[:i, x],
                                   df.loc[:i, y],
                                   df.loc[:i, 'error'],
                                   x_label=x_label,
                                   y_label=y_label,
                                   z_label='Fehler',
                                   limits_xy=limits_xy)
            # plt.show()
            buffer = io.BytesIO()
            fig.savefig(buffer, **savefig_args)
            plt.clf()
            plt.close('all')
            buffer.seek(0)
            image = imageio.imread(buffer)
            writer.append_data(image)

        logger.info('GIF finished')

        filename = os.path.join(savedir, '2_contour_gif_end.png')
        with open(filename, 'wb') as f:
            buffer.seek(0)
            shutil.copyfileobj(buffer, f, length=131072)
            buffer.close()

    # Also create an empty starting plot for the GIF
    fig, ax = plot_contour(df[x],
                           df[y],
                           df['error'],
                           x_label=x_label,
                           y_label=y_label,
                           z_label='Fehler',
                           limits_xy=limits_xy,
                           plot_empty=True)
    filename = os.path.join(savedir, '0_contour_gif_start.png')
    fig.savefig(filename, **savefig_args)
示例#48
0
def rotate(func: Callable,
           points: np.ndarray,
           path: str,
           *args,
           fps: int = 24,
           duration: int = 1,
           cache_directory: str = ".rotate",
           parallelize: bool = True,
           verbose: bool = False,
           **kwargs):
    """Create rotating gif of given image.

    Parameters
    -----------------------
    func: Callable
        function return the figure.
    points: np.ndarray
        The 3D or 4D array to rotate or roto-translate.
    path: str
        path where to save the GIF.
    *args
        positional arguments to be passed to the `func` callable.
    fps: int = 24
        number of FPS to create.
    duration: int = 1
        Duration of the rotation in seconds.
    cache_directory: str = ".rotate"
        directory where to store the frame.
    parallelize: bool = True
        whetever to parallelize execution.
    verbose: bool = False
        whetever to be verbose about frame creation.
    **kwargs
        keyword argument to be passed to the `func` callable

    """
    global conversion_command

    os.makedirs(cache_directory, exist_ok=True)
    X = MinMaxScaler(feature_range=(-1, 1)).fit_transform(points)

    total_frames = duration * fps

    tasks = [(func, X, 2 * np.pi * frame / total_frames, args, kwargs,
              "{cache_directory}/{frame}.jpg".format(
                  cache_directory=cache_directory, frame=frame))
             for frame in range(total_frames)]

    if parallelize:
        number_of_processes = cpu_count()
        with get_context("spawn").Pool(number_of_processes) as p:
            chunks_size = total_frames // number_of_processes
            loading_bar = tqdm(total=total_frames,
                               desc="Rendering frames",
                               disable=not verbose,
                               dynamic_ncols=True,
                               leave=False)
            for executed_tasks_number in p.imap(_render_frame_wrapper,
                                                chunks(tasks, chunks_size)):
                loading_bar.update(executed_tasks_number)
            loading_bar.close()
            p.close()
            p.join()
    else:
        for task in tqdm(tasks,
                         desc="Rendering frames",
                         disable=not verbose,
                         dynamic_ncols=True,
                         leave=False):
            _render_frame_wrapper([task])

    if path.endswith(".gif"):
        with imageio.get_writer(path, mode='I', fps=fps) as writer:
            for task in tqdm(tasks,
                             desc="Merging frames",
                             disable=not verbose,
                             dynamic_ncols=True,
                             leave=False):
                writer.append_data(imageio.imread(task[-1]))
        optimize(path)
    elif path.split(".")[-1] in ("webm", "mp4", "avi"):
        height, width, _ = cv2.imread(tasks[0][-1]).shape
        encoding = {
            "mp4": "MP4V",
            "avi": "FMP4",
            "webm": "vp80"
        }[path.split(".")[-1]]
        fourcc = cv2.VideoWriter_fourcc(*encoding)
        video = cv2.VideoWriter(path, fourcc, fps, (width, height))
        for task in tqdm(tasks,
                         desc="Merging frames",
                         disable=not verbose,
                         dynamic_ncols=True,
                         leave=False):
            video.write(cv2.imread(task[-1]))
        cv2.destroyAllWindows()
        video.release()
    else:
        raise ValueError("Unsupported format!")

    shutil.rmtree(cache_directory)

    if not os.path.exists(path):
        raise ValueError(
            ("The expected target path file `{}` was "
             "not created. Tipically this is caused by some "
             "errors in the encoding of the file that has "
             "been chosen. Please take a look at the log that "
             "has be printed in either the console or the jupyter "
             "kernel.").format(path))
import imageio
from tqdm import tqdm
import cv2

vpath = '/home/khanh/Downloads/64_20_poses.mp4'
gpath = './teaser.gif'

owriter = imageio.get_writer(gpath, fps=24)
vreader = imageio.get_reader(vpath)
for frm_idx, img in tqdm(enumerate(vreader)):
    if frm_idx % 5 == 0:
        img = cv2.resize(img, dsize=(0, 0), fx=0.3, fy=0.3)
        owriter.append_data(img)
owriter.close()
示例#50
0
def run_projection(network_pkl: str, in_dir: str, out_dir: str,
                   save_video: bool, seed: int, steps: int):
    """Project given image to the latent space of pretrained network pickle.
    Examples:
    python projector.py --outdir=out --target=~/mytargetimg.png \\
        --network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/ffhq.pkl
    """
    np.random.seed(seed)
    torch.manual_seed(seed)

    # Load networks.
    print('Loading networks from "%s"...' % network_pkl)
    device = torch.device('cuda')
    with dnnlib.util.open_url(network_pkl) as fp:
        G = legacy.load_network_pkl(fp)['G_ema'].requires_grad_(False).to(
            device)  # type: ignore

    img_files = img_list(in_dir)
    num_images = len(img_files)

    for image_idx in range(num_images):
        fname = basename(img_files[image_idx])
        print('Projecting image %d/%d .. %s' %
              (image_idx + 1, num_images, basename(img_files[image_idx])))
        work_dir = os.path.join(out_dir, fname)
        os.makedirs(work_dir, exist_ok=True)

        # Load target image.
        target_pil = PIL.Image.open(img_files[image_idx]).convert('RGB')
        w, h = target_pil.size
        s = min(w, h)
        target_pil = target_pil.crop(
            ((w - s) // 2, (h - s) // 2, (w + s) // 2, (h + s) // 2))
        target_pil = target_pil.resize((G.img_resolution, G.img_resolution),
                                       PIL.Image.LANCZOS)
        target_uint8 = np.array(target_pil, dtype=np.uint8)

        # Optimize projection.
        # start_time = perf_counter()
        projected_w_steps = project(
            G,
            target=torch.tensor(target_uint8.transpose([2, 0, 1]),
                                device=device),  # pylint: disable=not-callable
            num_steps=steps,
            device=device,
            verbose=True)
        # print (f'Elapsed: {(perf_counter()-start_time):.1f} s')

        # Render debug output: optional video and projected image and W vector.
        os.makedirs(out_dir, exist_ok=True)
        if save_video:
            vfile = '%s/proj.mp4' % work_dir
            video = imageio.get_writer(vfile,
                                       mode='I',
                                       fps=25,
                                       codec='libx264',
                                       bitrate='16M')
            print('Saving optimization progress video %s' % vfile)
            for projected_w in projected_w_steps:
                synth_image = G.synthesis(projected_w.unsqueeze(0),
                                          noise_mode='const')
                synth_image = (synth_image + 1) * (255 / 2)
                synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(
                    torch.uint8)[0].cpu().numpy()
                video.append_data(
                    np.concatenate([target_uint8, synth_image], axis=1))
            video.close()

        # Save final projected frame and W vector.
        target_pil.save('%s/target.jpg' % work_dir)
        projected_w = projected_w_steps[-1]
        synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')
        synth_image = (synth_image + 1) * (255 / 2)
        synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(
            torch.uint8)[0].cpu().numpy()

        PIL.Image.fromarray(synth_image,
                            'RGB').save('%s/%s.jpg' % (work_dir, fname))
        np.savez('%s/%s.npz' % (work_dir, fname),
                 w=projected_w.unsqueeze(0).cpu().numpy())
def make_gif(filename):
    with imageio.get_writer(filename, mode='I') as writer:
        for filename in sorted(glob.glob('/tmp/_tmp_*.png')):
            writer.append_data(imread(filename))
            os.remove(filename)
    writer.close()
示例#52
0
    checkpoint_dir = './training_checkpoints'
    checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
    checkpoint = tf.train.Checkpoint(
        generator_optimizer=generator_optimizer,
        discriminator_optimizer=discriminator_optimizer,
        generator=generator,
        discriminator=discriminator)

    with file_writer.as_default():
        checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
        train(train_dataset, EPOCHS)

        #display_image(EPOCHS)
        anim_file = 'dcgan.gif'

        with imageio.get_writer(anim_file, mode='I') as writer:
            filenames = glob.glob('image*.png')
            filenames = sorted(filenames)
            last = -1
            for i, filename in enumerate(filenames):
                frame = 2 * (i**0.5)
                if round(frame) > round(last):
                    last = frame
                else:
                    continue
            image = imageio.imread(filename)
            writer.append_data(image)
            image = imageio.imread(filename)
            writer.append_data(image)
def spectra_plot(pair):
    g_max = 5 * 10**11
    m_max = 2 * 10**12
    pri_wl, pri_spec, pri_wl_uv, pri_spec_uv, sec_wl, sec_spec, sec_wl_uv, sec_spec_uv, pair = chooser(
        pair)
    i = 0
    while i < 1000:
        temp = []
        temp_uv = []
        row = pri_spec[i]
        for num in row:
            f_num = float(num)
            temp.append(f_num)
        row_uv = pri_spec_uv[i]
        for num in row_uv:
            f_num_uv = float(num)
            temp_uv.append(f_num_uv)
        fig, ax = plt.subplots(2, 2, figsize=(20, 20))
        if pair == 'MK':
            ymax = m_max
            uv_max = 10**8
        else:
            ymax = g_max
            uv_max = 10**6
        temp_sec = []
        temp_sec_uv = []
        row_sec = sec_spec[i]
        for num in row_sec:
            f_num = float(num)
            temp_sec.append(f_num)
        row_sec_uv = sec_spec_uv[i]
        for num in row_sec_uv:
            f_num_uv = float(num)
            temp_sec_uv.append(f_num_uv)

        ax[1, 1].plot(sec_wl[:len(temp)], temp_sec[:len(sec_wl)])
        ax[1, 0].plot(sec_wl_uv, temp_sec_uv)
        ax[1, 1].set_xlim(1, 5)
        ax[1, 0].set_xlim(0.1, 0.2)
        ax[1, 1].set_ylim(0, ymax)
        ax[1, 0].set_ylim(0, uv_max)

        ax[0, 1].plot(pri_wl[:len(temp)], temp[:len(pri_wl)])
        ax[0, 0].plot(pri_wl_uv, temp_uv)
        ax[0, 1].set_xlim(1, 5)
        ax[0, 0].set_xlim(0.1, 0.2)
        ax[0, 1].set_ylim(0, ymax)
        ax[0, 0].set_ylim(0, uv_max)
        fig.savefig(
            '/gscratch/vsm/mwjl/projects/binary/scripts/scratch/wn_row' +
            str(i) + str(pair) + '.png')
        i = i + 10
    nums = range(0, 1000, 10)
    inputs2 = []
    gif_path2 = '/gscratch/vsm/mwjl/projects/binary/plots/spectra_' + str(
        pair) + '.gif'
    for i in nums:
        name = "/gscratch/vsm/mwjl/projects/binary/scripts/scratch/wn_row" + str(
            i) + str(pair) + ".png"
        inputs2.append(name)
    plt.figure(figsize=(4, 4))
    with imageio.get_writer(gif_path2, mode='I') as writer:
        for k in range(len(inputs2)):
            writer.append_data(imageio.imread(inputs2[k].format(i=k)))
示例#54
0
def run_sim_test(type_test):
    real_robot_joints = []
    sim_robot_joints = []
    target_robot_joints = []
    real_robot_eef_rframe = []
    real_robot_eef_sframe = []
    sim_robot_eef_rframe = []
    sim_robot_eef_sframe = []

    # Create dict to hold options that will be passed to env creation call
    options = {}

    # Choose environment and add it to options
    options["env_name"] = "Lift"
    options["robots"] = ["Jaco"]

    # Choose camera
    camera = "frontview"

    n_joints = 7
    write_path = os.path.join('datasets', type_test)

    # load data
    # latin1 allows us to load python2
    real_robot = np.load(os.path.join('datasets', type_test + '.npz'),
                         allow_pickle=True,
                         encoding='latin1')
    real_eef_pos = real_robot['eef_pos']
    #real_joint_pos = np.mod(real_robot['joint_pos'], 4*np.pi)
    real_joint_pos = real_robot['joint_pos']

    real_actions = real_robot['actions']
    init_qpos = list(real_joint_pos[0][:7])
    # Choose controller
    controller_file = "jaco_joint_position_5hz.json"
    controller_fpath = os.path.join(
        os.path.split(suite.__file__)[0], 'controllers', 'config',
        controller_file)
    print('loading controller from', controller_fpath)
    # Load the desired controller
    options["controller_configs"] = load_controller_config(
        custom_fpath=controller_fpath)
    #options['initial_qposes'] = [init_qpos]

    control_freq = 2
    n_steps = len(real_actions)

    # initialize the task
    env = suite.make(
        **options,
        has_renderer=False,
        has_offscreen_renderer=True,
        ignore_done=True,
        use_camera_obs=True,
        control_freq=control_freq,
        camera_names=camera,
        camera_heights=512,
        camera_widths=512,
    )
    site = 'gripper0_grip_site'
    env = VisualizationWrapper(env)
    env.reset()
    env.robots[0].set_robot_joint_positions(init_qpos)
    env.robots[0].controller.update_initial_joints(init_qpos)

    video_writer = imageio.get_writer(write_path + '.mp4', fps=2)
    eef_site_id = env.robots[0].eef_site_id

    # Get action limits
    low, high = env.action_spec
    #env.robots[0].set_robot_joint_positions(init_real[:7])

    sim_joint_pos = env.sim.data.qpos[env.robots[0]._ref_joint_pos_indexes]
    for t in range(n_steps - 1):
        #action = np.deg2rad(real_actions[t-1])
        action = real_joint_pos[t, :7] - sim_joint_pos

        if len(action) == 7:
            action = np.hstack((action, [0]))

        obs, reward, done, _ = env.step(action)

        video_img = obs['%s_image' % camera][::-1]
        video_writer.append_data(video_img)

        # get simulator position and quaternion in real robot frame
        sim_eef_pos_rframe, sim_eef_quat_rframe = get_sim2real_posquat(env)
        sim_eef_pos_sframe, sim_eef_quat_sframe = get_sim_posquat(env)
        sim_joint_pos = env.sim.data.qpos[env.robots[0]._ref_joint_pos_indexes]
        sim_goal_joint_pos = env.robots[0].controller.goal_qpos

        sim_robot_eef_rframe.append(
            deepcopy(np.hstack((sim_eef_pos_rframe, sim_eef_quat_rframe))))
        sim_robot_eef_sframe.append(
            deepcopy(np.hstack((sim_eef_pos_sframe, sim_eef_quat_sframe))))
        sim_robot_joints.append(deepcopy(sim_joint_pos))
        target_robot_joints.append(deepcopy(sim_goal_joint_pos))

        real_eef_pos_sframe, real_eef_quat_sframe = get_real2sim_posquat(
            real_eef_pos[t, :3], real_eef_pos[t, 3:7])
        real_robot_eef_rframe.append(real_eef_pos[t])
        real_robot_eef_sframe.append(
            deepcopy(np.hstack((real_eef_pos_sframe, real_eef_quat_sframe))))
        real_robot_joints.append(real_joint_pos[t])

    f, ax = plt.subplots(7, figsize=(10, 20))
    real_robot_eef_rframe = np.array(real_robot_eef_rframe)
    real_robot_eef_sframe = np.array(real_robot_eef_sframe)
    sim_robot_eef_rframe = np.array(sim_robot_eef_rframe)
    sim_robot_eef_sframe = np.array(sim_robot_eef_sframe)
    y = np.arange(len(real_robot_eef_rframe))
    vmin = -np.pi
    vmax = np.pi
    for i in range(7):
        if not i:
            ax[i].scatter(y,
                          real_robot_eef_rframe[:, i],
                          marker='o',
                          s=4,
                          c='r',
                          label='robot_rframe')
            ax[i].scatter(y,
                          real_robot_eef_sframe[:, i],
                          marker='o',
                          s=4,
                          c='k',
                          label='robot_sframe')
            ax[i].scatter(y,
                          sim_robot_eef_rframe[:, i],
                          marker='o',
                          s=4,
                          c='g',
                          label='sim_rframe')
            ax[i].scatter(y,
                          sim_robot_eef_sframe[:, i],
                          marker='o',
                          s=4,
                          c='b',
                          label='sim_sframe')
        else:
            ax[i].scatter(y,
                          real_robot_eef_rframe[:, i],
                          marker='o',
                          s=4,
                          c='r')
            ax[i].scatter(y,
                          real_robot_eef_sframe[:, i],
                          marker='o',
                          s=4,
                          c='k')
            ax[i].scatter(y,
                          sim_robot_eef_rframe[:, i],
                          marker='o',
                          s=4,
                          c='g')
            ax[i].scatter(y,
                          sim_robot_eef_sframe[:, i],
                          marker='o',
                          s=4,
                          c='b')
        ax[i].plot(real_robot_eef_rframe[:, i], c='r')
        ax[i].plot(real_robot_eef_sframe[:, i], c='k')
        ax[i].plot(sim_robot_eef_rframe[:, i], c='g')
        ax[i].plot(sim_robot_eef_sframe[:, i], c='b')

    for i in range(4, 7):
        ax[i].set_ylim([vmin, vmax])
    ax[0].set_title('x')
    ax[1].set_title('y')
    ax[2].set_title('z')
    ax[3].set_title('qx')
    ax[4].set_title('qy')
    ax[5].set_title('qz')
    ax[6].set_title('qw')
    ax[0].legend()
    plt.savefig(write_path + '_eef.png')
    plt.close()

    f, ax = plt.subplots(7, figsize=(10, 20))
    real_robot_joints = np.array(real_robot_joints)
    sim_robot_joints = np.array(sim_robot_joints)
    target_robot_joints = np.array(target_robot_joints)
    vmin = -4 * np.pi
    vmax = 4 * np.pi
    for i in range(7):
        ax[i].set_title(i)
        if not i:
            ax[i].plot(real_robot_joints[:, i], c='b', label='real')
            ax[i].plot(sim_robot_joints[:, i], c='k', label='sim')
            ax[i].plot(target_robot_joints[:, i], c='c', label='goal')
        else:
            ax[i].plot(real_robot_joints[:, i], c='b')
            ax[i].plot(sim_robot_joints[:, i], c='k')
            ax[i].plot(target_robot_joints[:, i], c='c')
        ax[i].scatter(y, real_robot_joints[:, i], s=2, c='b')
        ax[i].scatter(y, sim_robot_joints[:, i], s=2, c='k')
        ax[i].scatter(y, target_robot_joints[:, i], s=2, c='c')

    for x in range(7):
        ax[x].set_ylim([vmin, vmax])
    ax[0].legend()
    plt.savefig(write_path + '_joints.png')
    plt.close()

    video_writer.close()
    print("Video saved to {}".format(write_path))
示例#55
0
def main():
    args = parse_arguments()

    ###########################
    # Setup model
    ###########################

    model = Model(args.mesh, args.image, args)
    model.cuda()

    ###########################
    # Optimize
    ###########################

    loop = tqdm.tqdm(range(args.epochs))
    loop.set_description('Optimizing')

    optimizer = torch.optim.Adam(
        [p for p in model.parameters() if p.requires_grad],
        lr=0.1, betas=(0.5, 0.999)
    )

    azimuth = 0.0

    os.makedirs(args.output_path, exist_ok=True)
    writer = imageio.get_writer(os.path.join(
        args.output_path, 'example3_optimization.gif'), mode='I')
    for i in loop:
        optimizer.zero_grad()

        loss = model()

        loss.backward()
        optimizer.step()

        model.renderer.eye = get_points_from_angles(
            args.camera_distance, args.elevation, azimuth)

        images, _, _ = model.renderer(
            model.vertices,
            model.faces,
            torch.tanh(model.textures)
        )

        image = images.detach()[0].permute(1, 2, 0).cpu().numpy()
        writer.append_data((255 * image).astype(np.uint8))

        azimuth = (azimuth + 4) % 360

    writer.close()

    ###########################
    # Render optimized mesh
    ###########################

    loop = tqdm.tqdm(range(0, 360, 4))
    loop.set_description('Drawing')

    os.makedirs(args.output_path, exist_ok=True)
    writer = imageio.get_writer(os.path.join(
        args.output_path, 'example3_mesh.gif'), mode='I')
    for azimuth in loop:
        model.renderer.eye = get_points_from_angles(
            args.camera_distance, args.elevation, azimuth)

        images, _, _ = model.renderer(
            model.vertices,
            model.faces,
            torch.tanh(model.textures)
        )

        image = images.detach()[0].permute(1, 2, 0).cpu().numpy()
        writer.append_data((255 * image).astype(np.uint8))

    writer.close()
示例#56
0
def create_output_products(data, task_id=None):
    """Create the final output products for this algorithm.

    Open the final dataset and metadata and generate all remaining metadata.
    Convert and write the dataset to variuos formats and register all values in the task model
    Update status and exit.

    Args:
        data: tuple in the format of processing_task function - path, metadata, and {chunk ids}

    """
    logger.info("CREATE_OUTPUT")
    full_metadata = data[1]
    dataset = xr.open_dataset(data[0], autoclose=True).astype('float64')
    dataset['wofs'] = dataset.wofs / dataset.wofs_total_clean
    nan_to_num(dataset, 0)
    dataset_masked = mask_tsm(dataset, dataset.wofs)

    task = TsmTask.objects.get(pk=task_id)

    task.result_path = os.path.join(task.get_result_path(), "average_tsm.png")
    task.clear_observations_path = os.path.join(task.get_result_path(),
                                                "clear_observations.png")
    task.water_percentage_path = os.path.join(task.get_result_path(),
                                              "water_percentage.png")
    task.data_path = os.path.join(task.get_result_path(), "data_tif.tif")
    task.data_netcdf_path = os.path.join(task.get_result_path(),
                                         "data_netcdf.nc")
    task.animation_path = os.path.join(task.get_result_path(
    ), "animation.gif") if task.animated_product.animation_id != 'none' else ""
    task.final_metadata_from_dataset(dataset_masked)
    task.metadata_from_dict(full_metadata)

    bands = ['normalized_data', 'total_clean', 'wofs']
    band_paths = [
        task.result_path, task.clear_observations_path,
        task.water_percentage_path
    ]

    dataset_masked.to_netcdf(task.data_netcdf_path)
    write_geotiff_from_xr(task.data_path, dataset_masked, bands=bands)

    for band, band_path in zip(bands, band_paths):
        write_single_band_png_from_xr(band_path,
                                      dataset_masked,
                                      band,
                                      color_scale=task.color_scales[band],
                                      fill_color=task.query_type.fill)

    if task.animated_product.animation_id != "none":
        with imageio.get_writer(task.animation_path, mode='I',
                                duration=1.0) as writer:
            valid_range = range(len(full_metadata))
            for index in valid_range:
                path = os.path.join(task.get_temp_path(),
                                    "animation_final_{}.nc".format(index))
                if os.path.exists(path):
                    png_path = os.path.join(task.get_temp_path(),
                                            "animation_{}.png".format(index))
                    animated_data = mask_tsm(
                        xr.open_dataset(path,
                                        autoclose=True).astype('float64'),
                        dataset.wofs
                    ) if task.animated_product.animation_id != "scene" else xr.open_dataset(
                        path, autoclose=True)
                    write_single_band_png_from_xr(
                        png_path,
                        animated_data,
                        task.animated_product.data_variable,
                        color_scale=task.color_scales[
                            task.animated_product.data_variable],
                        fill_color=task.query_type.fill)
                    image = imageio.imread(png_path)
                    writer.append_data(image)

    dates = list(
        map(lambda x: datetime.strptime(x, "%m/%d/%Y"),
            task._get_field_as_list('acquisition_list')))
    if len(dates) > 1:
        task.plot_path = os.path.join(task.get_result_path(), "plot_path.png")
        create_2d_plot(task.plot_path,
                       dates=dates,
                       datasets=task._get_field_as_list(
                           'clean_pixel_percentages_per_acquisition'),
                       data_labels="Clean Pixel Percentage (%)",
                       titles="Clean Pixel Percentage Per Acquisition")

    logger.info("All products created.")
    task.update_bounds_from_dataset(dataset_masked)
    task.complete = True
    task.execution_end = datetime.now()
    task.update_status(
        "OK",
        "All products have been generated. Your result will be loaded on the map."
    )
    shutil.rmtree(task.get_temp_path())
    return True
示例#57
0
# initializing dlib's facial landmark predictor
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')

# alpha for blending
alpha = 1

# initializing opencv stuff
font = cv2.FONT_HERSHEY_SIMPLEX
cap_b = cv2.VideoCapture(args['video_background'])
cap_f = cv2.VideoCapture(args['video_face'])
# cap = cv2.VideoCapture('http://192.168.1.161/live')
ret, f = cap_f.read()
ret, b = cap_b.read()
f_size = f.shape
print f_size[0:2]
writer = imageio.get_writer(args['output_name'] + '.mp4', fps=20.0)

# prompt the user if the face found is a good detection
face_dets = detector(f, 1)
cut = None
mask = None
face_pts = None
while True:
    if len(face_dets) == 0:
        ret, f = cap_f.read()
        face_dets = detector(f, 1)
        continue
    for (i, face) in enumerate(face_dets):
        # face = face.rect
        f_show = copy.deepcopy(f)
        (x, y, w, h) = face_utils.rect_to_bb(face)
        
    with es.rio.open(list_results[idx], 'r') as src:        
        ep.plot_bands(src.read(),  cmap='RdYlGn', ax=ax2, 
                      extent=extent, cbar=False)
        manzana.boundary.plot(ax=ax2, color='white', alpha=0.5, linewidth=1)
        ax2.axis('off')
            
    plt.tight_layout()
    plt.savefig(os.path.join(path_figures, f'rgb_{imgname}-{timename}.png'))
        
    plt.close()

# %%
# Generación de la animación con la secuencia imágenes con la clasificación  Keras

with imageio.get_writer(movie_classkeras, mode='I',  duration=0.8) as writer:
    for filename in sorted(glob.glob(os.path.join(path_figures, f'rgb_*-{timename}.png'))):
        print(filename)
        image = imageio.imread(filename)
        plt.tight_layout()
        writer.append_data(image)    

# %%

reader_moviekeras = imageio.get_reader(movie_classkeras)
writer_movie = imageio.get_writer(movie_classkeras.replace('gif', 'mp4'), fps=1)

for frame in reader_moviekeras:
    writer_movie.append_data(frame)
writer_movie.close()
示例#59
0
    # initialize the task
    env = suite.make(
        **options,
        has_renderer=False,
        has_offscreen_renderer=True,
        ignore_done=True,
        use_camera_obs=True,
        control_freq=20,
        camera_names=camera,
        camera_segmentations=segmentation_level,
        camera_heights=512,
        camera_widths=512,
    )
    env.reset()

    video_writer = imageio.get_writer(args.video_path, fps=20)

    # Get action limits
    low, high = env.action_spec

    # do visualization
    for i in range(100):
        action = 0.5 * np.random.uniform(low, high)
        obs, reward, done, _ = env.step(action)

        video_img = obs[f"{camera}_segmentation_{segmentation_level}"].squeeze(
            -1)[::-1]
        np.savetxt("/tmp/seg_{}.txt".format(i), video_img, fmt="%.2f")
        video_img = segmentation_to_rgb(video_img, args.random_colors)
        video_writer.append_data(video_img)
示例#60
0
def convert2of(video_path, model_path):
    parser = argparse.ArgumentParser()
    parser.add_argument('--small', action='store_true', help='use small model')
    parser.add_argument('--mixed_precision',
                        action='store_true',
                        help='use mixed precision')
    parser.add_argument('--alternate_corr',
                        action='store_true',
                        help='use efficent correlation implementation')
    args = parser.parse_args()

    model = torch.nn.DataParallel(RAFT(args))
    model.load_state_dict(torch.load(model_path))

    model = model.module
    model.to("cuda")
    model.eval()

    # path = os.path.join(video_path)
    subdirs = os.listdir(video_path)
    for subdir in subdirs:
        images = []

        # compute the path to the subdir
        subpath = os.path.join(video_path, subdir)
        # elems = os.listdir(subpath)
        # for elem in elems:
        #     # name = elem[:-4]
        #     path = os.path.join(subpath, elem)
        #     sample = os.listdir(path)
        #     for name in sample:
        #         print(name)
        #         ssspath = os.path.join(path, name)
        cap = cv2.VideoCapture(subpath)
        fps = cap.get(cv2.CAP_PROP_FPS)
        while True:
            ret, frame = cap.read()
            if ret:
                frame = frame[int(720 * 0.15):int(720 * 0.85),
                              int(1280 * 0.15):int(1280 * 0.85)]
                frame = cv2.resize(frame, (int(455), int(256)))

                images.append(torch.from_numpy(frame).permute(2, 0, 1).float())
            else:
                break
    # cap = de.VideoReader(video_path, width = 455, height= 256)

    # fps = len(cap)
    # print(fps)
    # shape = cap[0].shape
    # print(shape)
    # i = 0
    # for i in cap:
    #  frame = cap[i].asnumpy()
    #  i = i + 1

        print("Read frames finished")
        images = torch.stack(images, dim=0)
        images = images.to("cuda")
        padder = InputPadder(images.shape)
        images = padder.pad(images)[0]
        fourcc = cv2.VideoWriter_fourcc(*'MP42')

        image1 = images[0, None]
        image2 = images[1, None]
        start_t = time.time()
        with torch.no_grad():
            flow_low, flow_up = model(image1, image2, iters=20, test_mode=True)
            print("Each prediction cost {}s".format(time.time() - start_t))
            output_image = viz(image1, flow_up)
        # out = cv2.VideoWriter(dst_path, fourcc,
        #                       fps, (output_image.shape[1], output_image.shape[0]))
        dst_path = os.path.join(video_path, 'move_opt')
        if not os.path.exists(dst_path):
            os.makedirs(dst_path)
        dst_path = os.path.join(dst_path, subdir)
        out = imageio.get_writer(dst_path, format='mp4', mode='I', fps=fps)

        #print(output_image.shape)
        with torch.no_grad():
            for i in range(images.shape[0] - 1):

                image1 = images[i, None]
                image2 = images[i + 1, None]

                _, flow_up = model(image1, image2, iters=20, test_mode=True)
                tmp = viz(image1, flow_up)
                # tmp = cv2.cvtColor(tmp, cv2.COLOR_RGB2BGR)
                # gray = cv2.cvtColor(tmp, cv2.COLOR_RGB2GRAY)
                # tmp = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
                # cv2.imshow('', tmp)
                # cv2.waitKey(1)

                # out.write(tmp)
                out.append_data(tmp)
        cap.release()