Beispiel #1
0
def sim_animation(evaluation, show=True, save=False, fname=None):
    # nrows, ncols, index
    fig = plt.figure(figsize=[12, 12])
    # best
    ax1 = fig.add_subplot(2, 2, 1)
    # remaining alphas
    ax2 = fig.add_subplot(4, 4, 3)
    ax3 = fig.add_subplot(4, 4, 4)
    ax4 = fig.add_subplot(4, 4, 7)
    ax5 = fig.add_subplot(4, 4, 8)
    # ft parameter values
    ax6 = fig.add_subplot(4, 2, 5)
    # genotype
    ax7 = fig.add_subplot(2, 4, 7)
    # network
    ax8 = fig.add_subplot(2, 4, 8)
    # ax9 ??
    ax9 = fig.add_subplot(4, 2, 7)

    # best trial, and the remaining alphas
    genotype = evaluation.genotype
    best_trial = evaluation.trials[0]
    best_trial_time = len(best_trial.data_ft)
    # sorted remaining (different alphas)
    alphas = [best_trial.alpha]
    rm_trials = []
    for trial in evaluation.trials[1:]:
        if trial.alpha not in alphas:
            alphas.append(trial.alpha)
            rm_trials.append(trial)

    # text above
    fig.suptitle("{}".format(fname), ha="center", va="center")
    time = fig.text(0.5,
                    0.92,
                    "time=0".format(best_trial.ft),
                    ha="center",
                    va="center")

    # ax1: basics, objects for agents and ir sensors
    ax1.set_title("alpha={}, ft={}".format(
        np.round(np.degrees(best_trial.alpha), 2), np.round(best_trial.ft), 2))
    ax1.set_xlim(-100, 100)
    ax1.set_ylim(-100, 100)
    ax1.set_aspect("equal")
    agents = []
    ag_colors = ["blue", "green", "red"]
    trajs = []
    irs = []
    for i in range(len(best_trial.agents)):
        ag, = ax1.plot([], [], color="black")
        agents.append(ag)
        ag_irs = []
        traj, = ax1.plot([], [], color=ag_colors[i])
        trajs.append(traj)
        for irx in best_trial.agents[i].sensors.irx:
            if irx:
                ir, = ax1.plot([], [], color="black")
                ag_irs.append(ir)
        irs.append(ag_irs)
    ags, = ax1.plot([], [], color="grey", linestyle="dashed")
    centroid, = ax1.plot([], [], marker="x", color="grey")

    # ax2-5: centroids, space and text
    ax2.set_title("alpha={}, ft={}".format(
        np.round(np.degrees(evaluation.trials[1].alpha)),
        np.round(evaluation.trials[1].ft), 2))
    ax2.set_xlim(-60, 60)
    ax2.set_ylim(-60, 60)
    cent2, = ax2.plot([], [], marker="x", color="black")
    ax2.tick_params(axis="y",
                    which="both",
                    left=False,
                    right=True,
                    labelleft=False)
    ax3.set_title("alpha={}, ft={}".format(
        np.round(np.degrees(evaluation.trials[2].alpha)),
        np.round(evaluation.trials[2].ft), 2))
    ax3.set_xlim(-60, 60)
    ax3.set_ylim(-60, 60)
    cent3, = ax3.plot([], [], marker="x", color="black")
    ax4.set_title("alpha={}, ft={}".format(
        np.round(np.degrees(evaluation.trials[3].alpha)),
        np.round(evaluation.trials[3].ft), 2),
                  y=-0.18)
    ax4.set_xlim(-60, 60)
    ax4.set_ylim(-60, 60)
    ax4.tick_params(axis="x",
                    which="both",
                    bottom=False,
                    top=True,
                    labelbottom=False)
    ax4.tick_params(axis="y",
                    which="both",
                    left=False,
                    right=True,
                    labelleft=False)
    cent4, = ax4.plot([], [], marker="x", color="black")
    ax5.set_title("alpha={}, ft={}".format(
        np.round(np.degrees(evaluation.trials[4].alpha)),
        np.round(evaluation.trials[4].ft), 2),
                  y=-0.18)
    ax5.set_xlim(-60, 60)
    ax5.set_ylim(-60, 60)
    ax5.tick_params(axis="x",
                    which="both",
                    bottom=False,
                    top=True,
                    labelbottom=False)
    cent5, = ax5.plot([], [], marker="x", color="black")

    # ax7: network neural space
    ax7.set_xlim(0, 200)
    ax7.set_ylim(0, 250)
    ax7.tick_params(axis="y",
                    which="both",
                    left=False,
                    right=True,
                    labelleft=False)
    # ax8: network
    ax8.set_xlim(0, 200)
    ax8.set_ylim(0, 250)

    # ax6: plot dist,maxdist, gt, cp, ft
    ax6.set_xlim(0, best_trial_time)
    ax6.set_ylim(0, 1.01)
    tbar, = ax6.plot([], [], color="black")
    # ax9: plot d0,d1,d2, st,stft, gt
    ax9.set_xlim(0, best_trial_time)
    ax6.set_ylim(0, 1.01)
    tbar, = ax6.plot([], [], color="black")

    # to pause the animation and check data
    anim_running = True

    def onClick(event):
        nonlocal anim_running
        if anim_running:
            anim.event_source.stop()
            anim_running = False
        else:
            anim.event_source.start()
            anim_running = True
        xt = best_trial
        print("\nxt = trial object\n")
        import pdb
        pdb.set_trace()

    def init():
        # remaining alphas trajectories (ax2-ax5)
        for enum, xag in enumerate(rm_trials[0].agents):
            ax2.plot(xag.data.x, xag.data.y, color=ag_colors[enum])
        for enum, xag in enumerate(rm_trials[1].agents):
            ax3.plot(xag.data.x, xag.data.y, color=ag_colors[enum])
        for enum, xag in enumerate(rm_trials[2].agents):
            ax4.plot(xag.data.x, xag.data.y, color=ag_colors[enum])
        for enum, xag in enumerate(rm_trials[3].agents):
            ax5.plot(xag.data.x, xag.data.y, color=ag_colors[enum])

        # ax6
        # ycp = [cp[1] for cp in best_trial.data_cp]
        cpx = [cp[0] for cp in best_trial.data_cp] + [best_trial_time]
        cpy = [cp[1]
               for cp in best_trial.data_cp] + [best_trial.data_cp[-1][1]]
        ax6.plot(cpx, cpy, label="cp")
        # gt, st, ft
        ygt = [gt[2] for gt in best_trial.data_gt]
        ystft = [st[4] / 3 for st in best_trial.data_st]
        yft = [ft[1] / 120 for ft in best_trial.data_ft]
        ax6.plot(ygt, label="gt")
        ax6.plot(ystft, label="stft/3")
        ax6.plot(yft, label="ft/max(ft)")
        ax6.legend()

        # ax9
        #

        # ax7: sensor and motor regions
        ax7.plot([0, 200], [50, 50], color="black")
        ax7.plot([0, 200], [220, 220], color="black")
        for sx in evaluation.nx_space.sregion:
            ax7.plot(*sx.area.exterior.xy, color="black")
        for mx in evaluation.nx_space.mregion:
            ax7.plot(*mx.area.exterior.xy, color="black")
        # from genotype
        for nx in evaluation.network:
            # neural region
            npoint = plt.Circle((nx.x, nx.y),
                                radius=1,
                                fill=True,
                                color="black")
            ax7.add_patch(npoint)
            ax7.plot(*nx.area.exterior.xy, color="black")
            # input connections
            for ci in nx.l_in:
                lst = "dashed" if ci[2] < 0 else "solid"
                ax7.plot([ci[0], nx.x], [ci[1], nx.y],
                         color="blue",
                         linestyle=lst)
            # output connections
            for co in nx.l_out:
                lst = "dashed" if co[2] < 0 else "solid"
                ax7.plot([nx.x, co[0]], [nx.y, co[1]],
                         color="red",
                         linestyle=lst)

        # ax8: actual network
        ax8.plot([0, 200], [50, 50], color="black")
        ax8.plot([0, 200], [220, 220], color="black")
        for sx in evaluation.nx_space.sregion:
            ax8.plot(*sx.area.exterior.xy, color="black")
        for mx in evaluation.nx_space.mregion:
            ax8.plot(*mx.area.exterior.xy, color="black")
        # from genotype (only working connections)
        for nx in evaluation.network:
            # neurons
            npoint = plt.Circle((nx.x, nx.y),
                                radius=1,
                                fill=True,
                                color="black")
            ax8.add_patch(npoint)
            ax8.plot(*nx.area.exterior.xy, color="black")
            # input connections
            for i in range(len(nx.cx_in)):
                if nx.cx_in[i]:
                    ci = nx.l_in[i]
                    lst = "dashed" if ci[2] < 0 else "solid"
                    ax8.plot([ci[0], nx.x], [ci[1], nx.y],
                             color="blue",
                             linestyle=lst)
            # output connections
            for o in range(len(nx.cx_out)):
                if nx.cx_out[o]:
                    co = nx.l_out[o]
                    lst = "dashed" if co[2] < 0 else "solid"
                    ax8.plot([nx.x, co[0]], [nx.y, co[1]],
                             color="red",
                             linestyle=lst)
        return True

    def animate(i):
        # i: from number of savings in trial
        time.set_text("time={}".format(i))
        # at: adjusted i for savings from agents' states
        at = int(i / evaluation.dt)
        # main centroid and triangle
        centroid.set_data(best_trial.triangles[i].centroid.xy)
        ags.set_data(*best_trial.triangles[i].exterior.xy)
        # remaining centroids (can be shorter)
        if i < len(rm_trials[0].triangles):
            cent2.set_data(rm_trials[0].triangles[i].centroid.xy)
        if i < len(rm_trials[1].triangles):
            cent3.set_data(rm_trials[1].triangles[i].centroid.xy)
        if i < len(rm_trials[2].triangles):
            cent4.set_data(rm_trials[2].triangles[i].centroid.xy)
        if i < len(rm_trials[3].triangles):
            cent5.set_data(rm_trials[3].triangles[i].centroid.xy)
        # ax1 agents
        for enum, ag in enumerate(best_trial.agents):
            # agent body
            agents[enum].set_data(*ag.data.body[at].exterior.xy)
            # trajectories
            trajs[enum].set_data(ag.data.x[:at], ag.data.y[:at])
            # sensors
            ag_irs = [ir for ir in ag.data.irs[at] if ir]
            for n_ir, ir in enumerate(ag_irs):
                irs[enum][n_ir].set_data(*ir.xy)
        all_irs = [ir for irx in irs for ir in irx]
        # time mark for ax6
        tbar.set_data([i, i], [0, 1])

        #import pdb; pdb.set_trace()
        return centroid, ags, traj, tuple(agents) + tuple(all_irs)

    fig.canvas.mpl_connect('button_press_event', onClick)
    anim = animation.FuncAnimation(fig,
                                   animate,
                                   init_func=init,
                                   frames=best_trial_time,
                                   interval=10,
                                   blit=False,
                                   repeat=False)

    if save:
        # writer for saving the animation
        xwriter = animation.FFMpegWriter(fps=30)
        try:
            anim.save("{}.mp4".format(fname), writer=xwriter)
        except:
            print('\ncouldn\'t save animation...')
    if show:
        plt.show()
    plt.close("all")
Beispiel #2
0
    ax1.clear()
    k=k+1
    for j in range (30):
        A=(1.0-(4*d_t*alpha/hs))*tf_a[1:-1,1:-1]
        B=(d_t*alpha/hs)*(tf_a[1:-1,:-2]+tf_a[:-2,1:-1]+tf_a[2:,1:-1]+tf_a[1:-1,2:])
        tf_a[1:-1,1:-1]=A+B
    ax1.imshow(tf_a)
    plt.subplots_adjust(top=0.99, right=0.99, left=0.05, bottom=0.01)
#    ax.margins(x=0)
    print k




plt.rcParams['animation.ffmpeg_path'] ='C:\\ffmpeg\\bin\\ffmpeg.exe'
FFwriter = animation.FFMpegWriter(bitrate=5000,fps=10)

ani = animation.FuncAnimation(fig, animate, frames=200)
ani.save('boxa/result_'+str(hh)+'.mp4', writer=FFwriter)

tf_aa=np.zeros([2,300,300])
#tf_aa=np.append(tf_a,alphaa,axis=1)
tf_aa[0]=tf_a
tf_aa[1]=alphaa
np.save('boxa/data/data_'+str(hh)+'.npy',tf_aa)
##
##
#


Beispiel #3
0
import numpy as np

from build_data import build_city_data, build_plot_data
from plot_data import plot_chart

cities = build_city_data()

names, pop_fns, lats, longs, colors = build_plot_data(cities)

start_year = 1901
end_year = 2012
speed_multiplier = 3

num_top_cities = 50
interval = (0.06 - 0.001 * num_top_cities) * speed_multiplier
file_name = 'output/animated_only_map_no_labels_' + str(num_top_cities) + '_' + str(start_year) + '_' + str(end_year) + '.mp4'

pop_limit = 100000

# Plots
fig = plt.figure(figsize=(10.8,10.8), tight_layout=True)
writer = animation.FFMpegWriter(fps=60, bitrate=5000)
with writer.saving(fig, file_name, dpi=100):
  for t in np.arange(start_year, end_year, interval):
    print (str(t))
    plot_chart(fig, t, num_top_cities, pop_limit, names, pop_fns, lats, longs, colors,
               compress_pops=False, save_img=False, in_animation=True, show_map = True, show_map_labels = False, show_chart = False)
    writer.grab_frame()

Beispiel #4
0
def create_anim(cut_polyline=None,
                pvt=None,
                save_path=None,
                save_name=None,
                save_type='mp4',
                skip_frames=20):
    """Save type as 'gif' or 'mp4' """

    # for testing
    if cut_polyline is None:
        cut_polyline = test_poly()
        p = np.array(cut_polyline[0])
        import generate_pvt
        import imp
        imp.reload(generate_pvt)
        pvt = generate_pvt.GeneratePVT(cut_polyline, smooth_iters=500)

    else:
        p = np.array(cut_polyline[0])

    pvt.p = p

    cp_p_line, cp_vel_line = manage_pvt(pvt)
    cp_p_line = offset_p_line(cp_p_line)[0]
    cp_vel_line = offset_p_line(cp_vel_line)[0]

    vel_interp(pvt)
    p = pvt.p

    p, bc, bcf, tc, tcf, fudge = offset_p_line(p)

    interpolate = False
    if interpolate:
        new_ar = spread_array(p, 5)
    else:
        new_ar = p

    fig, ax = plt.subplots()  # Create a figure containing a single axes.
    fig.set_size_inches(8, 8, forward=True)
    fig.set_dpi(100)
    ax.set_xlim((bc, tc))
    ax.set_ylim((bc, tc))

    # lines ---------------------------------
    line, = ax.plot([], [], lw=4.1, label='cut_polyline',
                    color='deeppink')  # cut_polyline
    p_line, = ax.plot([], [], lw=2.1, label='P polyline',
                      color='yellow')  # cut_polyline
    vel_line, = ax.plot([], [], lw=.5, label='vel * time',
                        color='black')  # cut_polyline
    xs_line, = ax.plot([], [], lw=5.0, label='x velocity',
                       color='red')  # x speed
    ys_line, = ax.plot([], [], lw=5.0, label='y velocity',
                       color='greenyellow')  # x speed
    hs_line, = ax.plot([], [], lw=5.0, label='head speed',
                       color='dodgerblue')  # x speed
    xj_line, = ax.plot([], [], lw=5.0, label='x jerk',
                       color='crimson')  # x speed
    yj_line, = ax.plot([], [], lw=5.0, label='y jerk', color='gold')  # x speed
    time_line, = ax.plot([], [], lw=5.0, label='time * 20',
                         color='purple')  # x speed

    # points --------------------------------
    cp_point, = ax.plot([0], [0], 'go', color='deeppink')  # cut_polyline
    p_point, = ax.plot([0], [0], 'go', color='yellow')  # cut_polyline
    v_point, = ax.plot([0], [0], 'go', color='black')  # cut_polyline
    x_point, = ax.plot([0], [0], 'go', color='red')
    y_point, = ax.plot([0], [0], 'go', color='greenyellow')
    # ---------------------------------------

    ax.legend(loc=2)  # Add a legend.

    idexer = np.arange(
        p.shape[0])[::skip_frames]  # skip frames to save animation time

    def animate(i):
        i = idexer[i]
        # poly lines
        line.set_data(new_ar[:, 0], new_ar[:, 1])
        p_line.set_data(cp_p_line[:, 0], cp_p_line[:, 1])
        vel_line.set_data(cp_vel_line[:, 0], cp_vel_line[:, 1])

        # bar graphs
        q = np.array([[bcf, 0.0], [bcf, 1.2]])
        r = np.array([[bcf * 2, 0.0], [bcf * 2, 1.2]])
        s = np.array([[bcf * 4, 0.0], [bcf * 4, 1.2]])
        t = np.array([[bcf * 6, 0.0], [bcf * 6, 1.2]])
        u = np.array([[bcf * 7, 0.0], [bcf * 7, 1.2]])
        v = np.array([[bcf * 9, 0.0], [bcf * 9, 1.2]])

        q[1][-1] = pvt.xV[i]
        r[1][-1] = pvt.yV[i]
        s[1][-1] = pvt.hs[i]
        t[1][-1] = pvt.xJ[i]
        u[1][-1] = pvt.yJ[i]
        v[1][-1] = pvt.ti[i] * 10

        xs_line.set_data(q[:, 0], q[:, 1])
        ys_line.set_data(r[:, 0], r[:, 1])
        hs_line.set_data(s[:, 0], s[:, 1])
        xj_line.set_data(t[:, 0], t[:, 1])
        yj_line.set_data(u[:, 0], u[:, 1])
        time_line.set_data(v[:, 0], v[:, 1])

        # points
        cp_point.set_data(new_ar[:, 0][i], new_ar[:, 1][i])
        #p_point.set_data(cp_p_line[:,0][i], cp_p_line[:,1][i])
        #v_point.set_data(cp_vel_line[:,0][i], cp_vel_line[:,1][i])
        x_point.set_data(new_ar[:, 0][i], [bc + fudge * 0.1])
        y_point.set_data([tc - fudge * 0.1], new_ar[:, 1][i])

        return (
            line,
            p_line,
            vel_line,
            xs_line,
            ys_line,
            hs_line,
            xj_line,
            yj_line,
            time_line,
            cp_point,
            p_point,
            v_point,
            x_point,
            y_point,
        )

    anim = animation.FuncAnimation(fig,
                                   animate,
                                   frames=idexer.shape[0],
                                   interval=20,
                                   blit=True)
    path = save_path
    name = save_name

    if path is None:
        path = os.path.expanduser('~/Desktop/')
    if save_name is None:
        name = 'PVT_movie'

    if save_type == 'gif':
        anim.save(path + name + '.gif', writer='imagemagick', fps=60)

    if save_type == 'mp4':
        anim.save(path + name + '.mp4', writer=animation.FFMpegWriter(fps=60))

    #plt.show()
    plt.close(fig=None)
Beispiel #5
0
    def AnimateResult(self,
                      name,
                      sizex,
                      sizey,
                      npx,
                      npy,
                      tp_per_real_sec,
                      max_framerate,
                      flabelstart,
                      zmax=1.,
                      zmin=0.,
                      filt=False):
        fig = plt.figure(figsize=(sizex, sizey))
        ax1 = fig.add_subplot(111,
                              autoscale_on=False,
                              xlim=(0, self.DomLenX),
                              ylim=(0, self.DomLenY),
                              aspect='equal')
        #ax2 = fig.add_subplot(132,autoscale_on=False,xlim=(0,self.DomLenX), ylim=(0,self.DomLenY),aspect='equal')
        #ax3 = fig.add_subplot(133,autoscale_on=False,xlim=(0,self.DomLenX), ylim=(0,self.DomLenY),aspect='equal')
        #ax4 = fig.add_subplot(224,autoscale_on=False,xlim=(0,self.DomLen), ylim=(np.amin(self.E),np.amax(self.E)))
        x = np.linspace(0., self.DomLenX, npx, endpoint=False)
        y = np.linspace(0., self.DomLenY, npy, endpoint=False)
        Y, X = np.meshgrid(x, y)
        if zmax > 2. * np.amax(self.rho[0].RegularGrid(npx, npy)):
            zmax = np.amax(self.rho[0].RegularGrid(npx, npy))

        quad1 = ax1.pcolormesh(
            X,
            Y,
            self.rho[0].RegularGrid(npx, npy),
            vmax=zmax,
            vmin=zmin,
            cmap='jet'
        )  #; quad2 = ax2.pcolormesh(X,Y,self.energy[0].RegularGrid())
        #quad3 = ax3.pcolormesh(X,Y,self.phi[0].RegularGrid());

        ax1.set_title('Density',
                      fontsize=14)  #; ax2.set_title('KE density',fontsize=14)
        #ax3.set_title('Electric Potential',fontsize=14)

        #ax3.set_xlabel(r'$x/\lambda_D$',fontsize=13); ax4.set_xlabel(r'$x/\lambda_D$',fontsize=13)

        ax1.set_xlabel(r'$x/\lambda_D$', fontsize=13)
        ax1.set_ylabel(r'$y/\lambda_D$', fontsize=13)

        mult = int(1)
        speed = int(tp_per_real_sec * self.rho.shape[0] / self.T)
        while speed > max_framerate:
            speed = int(speed / 2.)
            mult *= 2

        numframes = int(self.rho.shape[0] / mult)
        print('Number of frames in movie: ' + str(numframes))
        print('Framerate = ' + str(speed))

        def init():
            quad1.set_array(np.ndarray([]))  #; quad2.set_array(np.ndarray([]))
            #quad3.set_data(np.ndarray([]))
            return quad1,  #quad2, quad3

        def animate(i):
            r = self.rho[mult * i].RegularGridBicubic(
                npx, npy)  #; e = self.energy[i].RegularGrid()
            if filt:
                r = FIL.BinomialFilter2D(r)
            #p = self.phi[i].RegularGrid()
            quad1.set_array(
                r[:-1, :-1].ravel())  #; quad2.set_array(e[:-1,:-1].ravel())
            #quad3.set_array(p[:-1,:-1].ravel())
            l = i + flabelstart
            fr = 'frame%03d.png' % l
            framename = 'MovieFrames/' + name + fr
            fig.savefig(framename)
            return quad1,  #quad2, quad3

        fullname = name + '.mp4'

        anim = animation.FuncAnimation(fig,
                                       animate,
                                       init_func=init,
                                       frames=numframes,
                                       blit=False)
        anim.save(fullname, writer=animation.FFMpegWriter(fps=speed))
Beispiel #6
0
def animate_spike_raster(fname_spikes,
                         fname_movie_out,
                         nrn_idx=None,
                         time_mode="real",
                         time_stop=None,
                         time_window=1.0,
                         fps=30):
    """Generate a movie from the spike raster

    Parameters
    ----------
    fname_spikes: string
        input spike data text filename
        first column is sim time
        second column is real time
        subsequent columns are each neuron's spikes as would be recorded in the nengo simulator
    fname_movie_out: string
        if string, filename of output movie
    nrn_idx: list-like or none
        indices of neurons to use to generate wav file
        if None, uses all neurons, one per wav file channel
    time_mode: "real" or "sim"
        Whether to use the spike's real or simulation time
    time_stop: float or None
        if float, movie ends time_window after time_stop
        if None, movie ends after time_window past last spike
    time_window: float
        Size of time window over which to view spikes
    fps: int
        frames per second
    """
    sim_time, real_time, spk_data_raw = _load_spike_data(fname_spikes)
    if nrn_idx is not None:
        spk_data_raw = spk_data_raw[:, nrn_idx]
    n_samples, n_neurons = spk_data_raw.shape
    if time_mode == "real":
        time = real_time
    elif time_mode == "sim":
        time = sim_time
    spk_times = []
    for nrn_idx in range(n_neurons):
        spk_idx = np.nonzero(spk_data_raw[:, nrn_idx])[0]
        spk_times.append(time[spk_idx])

    fig = plt.figure()

    colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
    ax = fig.add_subplot(111)
    y_range = 0.8
    last_spk_time = 0.
    for nrn_idx in range(n_neurons):
        y_mid = nrn_idx
        y_min = y_mid - y_range / 2.
        y_max = y_mid + y_range / 2.
        spk_idx = np.nonzero(spk_data_raw[:, nrn_idx])[0]
        spk_times = time[spk_idx]
        last_spk_time = np.max(spk_times.tolist() + [last_spk_time])
        ax.vlines(spk_times, y_min, y_max, colors[nrn_idx % len(colors)])
    ax.set_ylim(-y_range / 1.5, n_neurons - 1 + y_range / 1.5)
    ax.set_title("Spike Raster")
    ax.set_xlabel("Time (s)")
    ax.set_ylabel("Neuron Index")
    ax.set_yticks(_filter_nrn_idx_yticks(ax.get_yticks(), n_neurons))

    time_start = -time_window
    if time_stop is None:
        time_stop = last_spk_time + time_window
    else:
        time_stop = time_stop + time_window
    animation_time_total = time_stop - time_start - time_window
    animation_dt = 1. / fps
    n_frames = int(np.round(animation_time_total * fps))

    moviewriter = animation.FFMpegWriter(fps=fps)
    with moviewriter.saving(fig, fname_movie_out, dpi=100):
        for frame_idx in range(n_frames):
            time_since_start = frame_idx * animation_dt
            ax.set_xlim(time_start + time_since_start,
                        time_start + time_since_start + time_window)
            moviewriter.grab_frame()
Beispiel #7
0
import numpy as np
from time import time
import multiprocessing as mp
import matplotlib.pyplot as plt
from athena_read import athdf
import matplotlib.gridspec as gridspec
from matplotlib import animation
from matplotlib import rc
import pandas as pd
from mpl_toolkits.axes_grid1.inset_locator import inset_axes

rc('text', usetex=True)
rc('font', family='serif', size=12)

#creat movie writter
FFwriter = animation.FFMpegWriter(
    fps=20, extra_args=['-vcodec', 'libx264', '-pix_fmt', 'yuv420p'])

#reading in pm_trackfile using pandas
pmtrackfile = pd.read_csv("pm_trackfile.dat", delim_whitespace=True)

#reading the hdf5 data dumps
frame = athdf("BDstream.out1.00100.athdf")

#the range in R direction we want to plot, in index
rmin = 0
rmax = 384

#creat np.meshgrid from the coordinate
#['x1f'] is the face-centered R direction coordinates, ['x2f'] is the face-centered Phi direction coordinates.
r, phi = np.meshgrid(frame['x1f'][rmin:rmax], frame['x2f'])
X = r * np.sin(phi)
# Load ground_truth bounding boxes from the file (remove useless data)
gt_boxes = np.loadtxt(gt_boxes_file, delimiter=',')
gt_boxes = gt_boxes[:, :6]

# Collect names of all images of the input video
in_image_names = os.listdir(path=input_path)
in_image_names.sort()

# Defne bounding-box colors
colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 0, 255), (128, 0, 0),
          (0, 128, 0), (0, 0, 128), (128, 0, 128), (128, 128, 0),
          (0, 128, 128)]

# Initialize the writer object (converts frames into the video)
writer = anim.FFMpegWriter(fps=25, codec='mpeg4', bitrate=5000)
plt.figure()
fig, ax = plt.subplots(1, figsize=(12.8, 7.2), dpi=100)

# Open the stream to the output video, which will be built frame by frame
with writer.saving(fig, output_video_path, dpi=100):

    frame_number = 0

    # Load each frame of the input video
    for img_name in tqdm(in_image_names):

        frame_number += 1

        # Take next frame of the video and get detections
        pil_img = Image.open(input_path + img_name)
Beispiel #9
0
import networkx as nx
import matplotlib.patches as pt
import matplotlib.pyplot as plt
import random
from matplotlib import animation
import d2dLedgerNode

Animation = False

if (Animation):
    mp4writer = animation.FFMpegWriter(fps=3, metadata=dict(artist='kshibata'), bitrate=4000)


width = 1920
height = 1080
timeLength = 20
node_count = 50
maxrange = 300
start_time = 1

ComErrorRatio = 20 # %
dataErrorRatio = 1 # %

waitTime = 8

testRequest = [{'type':'newRequest','payload':[{'type':'pointTransfer','from':'B0000002','to':'C0000003','amount':500}]}]

#
G = nx.DiGraph()
G2 = nx.DiGraph()
G2.add_node(0,Position=(0,0))
Beispiel #10
0
for i in range(20):
    for j in range(10):
        c = complex(-0.6 - j / 100, -0.1 - i / 200)
        plt.axis("off")
        julia = julia_set(c, mkplot=False)
        t = ax.annotate(f"({round(c.real, 2)}, {round(c.imag, 2)})", (225, 10),
                        color="w")
        plot = ax.imshow(julia, interpolation="nearest", cmap="gnuplot2")
        ims.append((plot, t))

im_ani = animation.ArtistAnimation(fig,
                                   ims,
                                   interval=500,
                                   repeat_delay=3000,
                                   blit=True)
im_ani.save("c_choice.mp4", writer=animation.FFMpegWriter())

julia_set(complex(-0.418, -0.59), mkplot=True)
julia_set(complex(-0.593, -0.443), mkplot=True)
julia_set(complex(-0.155, -0.653), mkplot=True)
julia_set(complex(-0.715, -0.225), mkplot=True)
julia_set(complex(-0.705, -0.266), mkplot=True)


# Variants of the Julia set
def julia_set_log(c, base, mkplot=False, savefig=False, cmap="gnuplot2"):
    """
    Returns a matrix of pixel values, and optionally displays a plot
    :param c: the complex seed for the julia set fractal
    :param mkplot: whether to create the plot
    :param cmap: the matplotlib colourmap as a string
Beispiel #11
0
serie_s10 = df.iloc[:, 11].tolist()
serie_s11 = df.iloc[:, 12].tolist()

##### SETTINGS #####
lista_grilla_fuerzas = crea_lista_grilla_fuerzas(timestep_min, timestep_max)

##### SETTINGS #####
total_frames = len(lista_grilla_fuerzas)
grid_x = np.linspace(xi, xf, cols)
grid_y = np.linspace(yi, yf, rows)
grid_density0 = lista_grilla_fuerzas[0]

##### PLOT #####
fig = plt.figure()
ax = plt.axes(xlim=(-1, 1), ylim=(0, 1.2))
grafica_bordes_chaleco()
levels = np.linspace(10, np.max(lista_grilla_fuerzas), 70, endpoint=True)
levbar = np.linspace(10, np.max(lista_grilla_fuerzas), 8, endpoint=True)
cont = plt.contourf(grid_x, grid_y, grid_density0, levels, cmap=plt.cm.jet)
plt.colorbar(ticks=levbar)
plt.grid(False)
plt.xlabel('$x$-location~(m)', fontsize=10)
plt.ylabel('$y$-location~(m)', fontsize=10)
plt.xlim(-0.5, 1.5)
plt.ylim(0, 1.2)
plt.subplots_adjust(bottom=0.29, right=0.98, left=0.15)

##### ANIMATION #####
anim = animation.FuncAnimation(fig, animate, frames=total_frames, repeat=False)
anim.save('{}.mp4'.format(output_filename), writer=animation.FFMpegWriter())
Beispiel #12
0
                        origin='lower',
                        extent=[0, Lx, 0, Ly],
                        aspect='equal',
                        vmin=-0.1,
                        vmax=0.1)
        ims.append([im])
    gc.collect()

stop = time.time()
print("DONE!")

print("Min Energy:", np.min(U))
print("Max Energy:", np.max(U))

#
# STEP 4: VISUALIZATION!
#

if animate:
    an = anim.ArtistAnimation(fig, ims, interval=1, repeat_delay=0, blit=True)
    writer = anim.FFMpegWriter(fps=30)
    if save:
        an.save('bz_vid.mp4', writer=writer, dpi=500)

#Plot3D(xv, yv, Bz)
Plot2D(xv, yv, Bz)
Energy(U)
DivE(divE)

plt.show()
Beispiel #13
0
def central_dot(filename_out, seconds, fps=30, detector=None):
    ''' Generate a video of a central dot
        running for the defined number of
        seconds, typically used as resting
        interval for visual experiments

    Parameters
    ----------
    filename_out: str,
        Path and name of the MP4 video file to be created
    seconds: int,
        Length in seconds of the video file to be created
    fps: int, optional
        Frames per second of the generated video. (default=30)
    detector: float (0-1), None, optional
        If a light detector will be used, creates a corner
        with brightness provided by the user (0-1).
        Otherwise, detector is set to None and no corner
        is drawn
    
    Returns
    -------
    (none) (output video is saved in the specified filename_out)

    See Also
    --------
    preprocess: reads a CSV file from VICON Motion Capture and
        creates a new CSV file only with the trajectories,
        changing to another reference frame, if convenient
    make_video: uses pre-processed VICON data to generate
        video of the movement at specified viewing angle
    scrambled_video: uses pre-processed VICON data to produce
        video of scrambled points (non-biological motion)
    gen_detector_intro: creates a detector calibration to be
        included at the start of experiments requiring precise 
        sychronization.

    Example
    -------
    vicon.central_dot('C:\\Users\\MyUser\\Documents\\Vicon\\rest_video.mp4',
        15,detector=0.7)
    '''
    import numpy as np
    import pandas as pd
    import matplotlib
    matplotlib.use('TkAgg')  # Needed to run on mac
    from matplotlib import pyplot as plt
    from mpl_toolkits.mplot3d import Axes3D
    from matplotlib.colors import cnames
    from matplotlib import animation
    from matplotlib import patches as patches

    numframes = seconds * fps

    #generate figure
    fig = plt.figure()
    plt.style.use('dark_background')
    fig.subplots_adjust(left=0,
                        bottom=0,
                        right=1,
                        top=1,
                        wspace=None,
                        hspace=None)
    fig.set_size_inches(13.66, 7.68, forward=True)
    ax = plt.axes()

    def animate(i):
        #plot the points
        ax.clear()
        ax.scatter(0.5, 0.5, c='w', alpha=0.7)
        #set axis limits, removeing grid, setting background etc
        ax.set_xlim(0, 1)
        ax.set_ylim(0, 1)
        if detector != None:
            ax.add_patch(
                patches.Rectangle((0.95, 0.85),
                                  0.1,
                                  0.3,
                                  fill=True,
                                  fc=(detector, detector, detector),
                                  zorder=2,
                                  clip_on=False))
        ax.patch.set_facecolor('black')
        fig.set_facecolor('black')
        plt.axis('off')
        plt.grid(b=None)

    #make animation
    ani = animation.FuncAnimation(fig, animate, frames=numframes)

    #setting up animation file
    Writer = animation.writers['ffmpeg']
    writer = animation.FFMpegWriter(fps=fps,
                                    metadata=dict(artist='NeuroMat'),
                                    bitrate=1800,
                                    extra_args=['-vcodec', 'libx264'])

    #save animation
    ani.save(filename_out, writer=writer)
    plt.close()
Beispiel #14
0
if booshow:
    pass
else:
    if boolog:
        logn = 'log'
    else:
        logn = 'lin'
    matplotlib.rcParams['animation.ffmpeg_path'] = 'ffmpeg'
    moviefile = filename.split('.npy')[0] + 'movie' + logn + str(
        int(time.time())) + '.mp4'
    if boompg:
        # for web compatibility
        FFwriter = animation.FFMpegWriter(bitrate=1000,
                                          codec='libx264',
                                          extra_args=[
                                              '-pix_fmt', 'yuv420p', '-preset',
                                              'slow', '-profile:v', 'baseline',
                                              '-level', '3.0'
                                          ],
                                          fps=gfps)
    else:
        # default
        FFwriter = animation.FFMpegWriter(bitrate=1000,
                                          fps=gfps,
                                          codec='libx264',
                                          extra_args=[
                                              '-crf', '5', '-preset',
                                              'veryslow', '-pix_fmt', 'yuv420p'
                                          ])

#the options are there to enforce compatibility with the Chrome web browser
Beispiel #15
0
    def WaterSurfaceProfile(self,
                            Sub,
                            PlotStart,
                            PlotEnd,
                            Interval=200,
                            XS=0,
                            XSbefore=10,
                            XSafter=10,
                            Save=False,
                            Path='',
                            SaveFrames=60):
        """
        =============================================================================
            WaterSurfaceProfile(Sub, PlotStart, PlotEnd, interval = 200, XS=0,
                                XSbefore = 10, XSafter = 10)
        =============================================================================

        Parameters
        ----------
        Sub : [Object]
            Sub-object created as a sub class from River object.
        PlotStart : [datetime object]
            starting date of the simulation.
        PlotEnd : [datetime object]
            end date of the simulation.
        Interval : [integer], optional
            speed of the animation. The default is 200.
        XS : [integer], optional
            order of a specific cross section to plot the data animation around it. The default is 0.
        XSbefore : [integer], optional
            number of cross sections to be displayed before the chosen cross section . The default is 10.
        XSafter : [integer], optional
            number of cross sections to be displayed after the chosen cross section . The default is 10.
        Save : [Boolen/string]
            different formats to save the animation 'gif', 'avi', 'mov', 'mp4'.The default is False
        Path : [String]
            Path where you want to save the animation, you have to include the
            extension at the end of the path.
        SaveFrames : [integer]
            numper of frames per second

        in order to save a video using matplotlib you have to download ffmpeg from
        https://ffmpeg.org/ and define this path to matplotlib

        import matplotlib as mpl
        mpl.rcParams['animation.ffmpeg_path'] = "path where you saved the ffmpeg.exe/ffmpeg.exe"

        Returns
        -------
        TYPE

        """

        assert PlotStart < PlotEnd, "start Simulation date should be before the end simulation date "
        if Sub.from_beginning == 1:
            Period = Sub.Daylist[np.where(
                Sub.ReferenceIndex == PlotStart)[0][0]:np.where(
                    Sub.ReferenceIndex == PlotEnd)[0][0] + 1]
        else:
            ii = Sub.ReferenceIndex.index[np.where(
                Sub.ReferenceIndex == PlotStart)[0][0]]
            ii2 = Sub.ReferenceIndex.index[np.where(
                Sub.ReferenceIndex == PlotEnd)[0][0]]
            Period = list(range(ii, ii2 + 1))

        counter = [(i, j) for i in Period for j in hours]

        fig = plt.figure(60, figsize=(20, 10))
        gs = gridspec.GridSpec(nrows=2, ncols=6, figure=fig)
        ax1 = fig.add_subplot(gs[0, 2:6])
        ax1.set_ylim(0, int(Sub.Result1D['q'].max()))

        if XS == 0:
            # plot the whole sub-basin
            ax1.set_xlim(Sub.XSname[0] - 1, Sub.XSname[-1] + 1)
            ax1.set_xticks(Sub.XSname)
            ax1.set_xticklabels(Sub.XSname)
        else:
            # not the whole sub-basin
            FigureFirstXS = Sub.XSname[XS] - XSbefore
            FigureLastXS = Sub.XSname[XS] + XSafter
            ax1.set_xlim(FigureFirstXS, FigureLastXS)

            ax1.set_xticks(list(range(FigureFirstXS, FigureLastXS)))
            ax1.set_xticklabels(list(range(FigureFirstXS, FigureLastXS)))

        ax1.tick_params(labelsize=6)
        ax1.set_xlabel('Cross section No', fontsize=15)
        ax1.set_ylabel('Discharge (m3/s)', fontsize=15, labelpad=0.5)
        ax1.set_title('Sub-Basin' + ' ' + str(Sub.ID), fontsize=15)
        ax1.legend(["Discharge"], fontsize=15)

        Qline, = ax1.plot(
            [], [], linewidth=5
        )  #Sub.Result1D['q'][Sub.Result1D['day'] == Sub.Result1D['day'][1]][Sub.Result1D['hour'] == 1]
        ax1.grid()

        ### BC
        # Q
        ax2 = fig.add_subplot(gs[0, 1])
        ax2.set_xlim(1, 25)
        ax2.set_ylim(0, int(Sub.QBC.max().max()) + 1)

        ax2.set_xlabel('Time', fontsize=15)
        ax2.set_ylabel('Q (m3/s)', fontsize=15, labelpad=0.1)
        #ax2.yaxis.set_label_coords(-0.05, int(BC_q_T.max().max()))
        ax2.set_title("BC - Q", fontsize=20)
        ax2.legend(["Q"], fontsize=15)

        BC_q_line, = ax2.plot([], [], linewidth=5)
        BC_q_point = ax2.scatter([], [], s=300)
        ax2.grid()

        # h
        ax3 = fig.add_subplot(gs[0, 0])
        ax3.set_xlim(1, 25)
        ax3.set_ylim(float(Sub.HBC.min().min()), float(Sub.HBC.max().max()))

        ax3.set_xlabel('Time', fontsize=15)
        ax3.set_ylabel('water level', fontsize=15, labelpad=0.5)
        ax3.set_title("BC - H", fontsize=20)
        ax3.legend(["WL"], fontsize=10)

        BC_h_line, = ax3.plot(
            [], [], linewidth=5
        )  #Sub.Result1D['q'][Sub.Result1D['day'] == Sub.Result1D['day'][1]][Sub.Result1D['hour'] == 1]
        BC_h_point = ax3.scatter([], [], s=300)
        ax3.grid()

        # water surface profile
        ax4 = fig.add_subplot(gs[1, 0:6])

        if XS == 0:
            ax4.set_xlim(Sub.XSname[0] - 1, Sub.XSname[-1] + 1)
            ax4.set_xticks(Sub.XSname)
        else:
            ax4.set_xlim(FigureFirstXS, FigureLastXS)
            ax4.set_xticks(list(range(FigureFirstXS, FigureLastXS)))
            ax4.set_ylim(Sub.crosssections['gl'][FigureFirstXS],
                         Sub.crosssections['zr'][FigureLastXS] + 5)

        #ax4 = fig.add_subplot(gs[0:2,0:6])

        ax4.tick_params(labelsize=8)
        ax4.plot(Sub.XSname,
                 Sub.crosssections['zl'],
                 'k--',
                 dashes=(5, 1),
                 linewidth=2,
                 label='Left Dike')
        ax4.plot(Sub.XSname,
                 Sub.crosssections['zr'],
                 'k.-',
                 linewidth=2,
                 label='Right Dike')

        if Sub.Version == 1:
            ax4.plot(Sub.XSname,
                     Sub.crosssections['gl'],
                     'k-',
                     linewidth=5,
                     label='Bankful level')
        else:
            ax4.plot(Sub.XSname,
                     Sub.crosssections['gl'],
                     'k-',
                     linewidth=5,
                     label='Ground level')
            ax4.plot(Sub.XSname,
                     Sub.crosssections['gl'] + Sub.crosssections['dbf'],
                     'k',
                     linewidth=2,
                     label='Bankful depth')

        ax4.set_title("Water surface Profile Simulation", fontsize=15)
        ax4.legend(fontsize=15)
        ax4.set_xlabel("Profile", fontsize=15)
        ax4.set_ylabel("Elevation m", fontsize=15)
        ax4.grid()

        if XS == 0:
            day_text = ax4.annotate('Begining',
                                    xy=(Sub.XSname[0],
                                        Sub.crosssections['gl'].min()),
                                    fontsize=20)
        else:
            day_text = ax4.annotate(
                'Begining',
                xy=(FigureFirstXS + 1,
                    Sub.crosssections['gl'][FigureLastXS] + 1),
                fontsize=20)

        WLline, = ax4.plot([], [], linewidth=5)
        hLline, = ax4.plot([], [], linewidth=5)

        gs.update(wspace=0.2,
                  hspace=0.2,
                  top=0.96,
                  bottom=0.1,
                  left=0.05,
                  right=0.96)

        # animation

        def init_q():
            Qline.set_data([], [])
            WLline.set_data([], [])
            hLline.set_data([], [])
            day_text.set_text('')

            BC_q_line.set_data([], [])
            BC_h_line.set_data([], [])
            BC_q_point
            BC_h_point

            return Qline, WLline, hLline, day_text, BC_q_line, BC_h_line, BC_q_point, BC_h_point

        # animation function. this is called sequentially
        def animate_q(i):
            x = Sub.XSname
            y = Sub.Result1D['q'][Sub.Result1D['day'] == counter[i][0]][
                Sub.Result1D['hour'] == counter[i][1]].values

            day = Sub.ReferenceIndex.loc[counter[i][0], 'date']

            day_text.set_text('day = ' +
                              str(day + dt.timedelta(hours=counter[i][1])))
            Qline.set_data(x, y)

            y = Sub.Result1D['wl'][Sub.Result1D['day'] == counter[i][0]][
                Sub.Result1D['hour'] == counter[i][1]].values
            WLline.set_data(x, y)

            y = Sub.Result1D['h'][Sub.Result1D['day'] == counter[i][0]][
                Sub.Result1D['hour'] == counter[i]
                [1]].values * 2 + Sub.crosssections['gl'][
                    Sub.crosssections.index[len(Sub.XSname) - 1]]
            hLline.set_data(x, y)

            # BC Q (ax2)
            x = Sub.QBC.columns.values
            #    if XS == 0:
            #        y = BC_q_T.loc[Qobs.index[counter[i][0]-1]].values
            #    else:
            y = Sub.QBC.loc[Sub.ReferenceIndex.loc[counter[i][0],
                                                   'date']].values
            BC_q_line.set_data(x, y)

            # BC H (ax3)
            #    if XS == 0:
            #        y = BC_h_T.loc[Qobs.index[counter[i][0]-1]].values
            #    else:
            y = Sub.HBC.loc[Sub.ReferenceIndex.loc[counter[i][0],
                                                   'date']].values

            BC_h_line.set_data(x, y)

            #BC Q point (ax2)
            x = counter[i][1]
            #    if XS == 0:
            #        y= Qobs.index[counter[i][0]-1]
            #    else :
            y = Sub.ReferenceIndex.loc[counter[i][0], 'date']
            ax2.scatter(x, Sub.QBC[x][y])

            #BC h point (ax3)
            ax3.scatter(x, Sub.QBC[x][y])

            return Qline, WLline, hLline, day_text, BC_q_line, BC_h_line, ax2.scatter(
                x, Sub.QBC[x][y], s=300), ax3.scatter(x, Sub.HBC[x][y], s=300)

        # plt.tight_layout()

        #Writer = animation.FFMpegWriter
        #Writer= Writer(fps=30, bitrate=1800, #, metadata=dict(artist='Me')
        #               extra_args=['-vcodec', 'libx264'])
        #animation.FFMpegFileWriter(**kwargs = {"outfile" : 'basic_animation.mp4'})

        anim = animation.FuncAnimation(fig,
                                       animate_q,
                                       init_func=init_q,
                                       frames=np.shape(counter)[0],
                                       interval=Interval,
                                       blit=True)
        if Save != False:
            if Save == "gif":
                assert len(Path) >= 1 and Path.endswith(
                    ".gif"), "please enter a valid path to save the animation"
                writergif = animation.PillowWriter(fps=SaveFrames)
                anim.save(Path, writer=writergif)
            else:
                try:
                    if Save == 'avi' or Save == 'mov':
                        writervideo = animation.FFMpegWriter(fps=SaveFrames,
                                                             bitrate=1800)
                        anim.save(Path, writer=writervideo)
                    elif Save == 'mp4':
                        writermp4 = animation.FFMpegWriter(fps=SaveFrames,
                                                           bitrate=1800)
                        anim.save(Path, writer=writermp4)
                except FileNotFoundError:
                    print(
                        "please visit https://ffmpeg.org/ and download a version of ffmpeg compitable with your operating system, for more details please check the method definition"
                    )

        #anim.save('basic_animation.mp4', writer =Writer) #fps=30,
        plt.show()
def wrapper(r_exp_list, position, velocity, k, m, step_size, numsteps):
    print("called wrapper")  # not Dr. Dre, sadly.  I wish I had him on dial.
    first_time = time.time(
    )  # because I have lots of other "start_time" defninitons sprinkled throughout
    for n in r_exp_list:  # iterate through a list of exponent values for r in newton's law of gravitation
        print(
            "current job: r**(-", n, ")"
        )  # when runtimes get long it's nice to know how far through you are
        # np.random.seed(1)
        # x0 = -2 + 4 * np.random.random((N_trajectories, 3))   # randomizes starting position for each trajectory
        # v0 = -1 + 2 * np.random.random((N_trajectories, 3))   # randomizes starting velocity for each trajectory
        # p0 = zip(x0,v0)                                       # iterates through and returns x0 and v0 pairs in one tuple, p0.  For ease in the list comprehension below.
        # arguments for simulate: simulate(position, velocity, k, m, n, step_size, num_steps):
        global x_t  # so that the animate function can still use this
        x_t = np.asarray(
            [
                backend.simulate(position, velocity, k, m, n, step_size,
                                 numsteps)
            ]
        )  # if you're outputting from your own function, make sure that it's an array of position vectors (also arrays!)
        # Set up figure & 3D axis for animation
        global fig  #
        fig = plt.figure()  #
        global ax  #
        ax = fig.add_axes([0, 0, 1, 1], projection='3d')  #
        ax.axis(
            'on'
        )  # sets the background axes "on".  Change to "off" if you want a more artistic/minimalistic viewing experience

        # choose a different color for each trajectory
        global colors
        colors = plt.cm.jet(np.linspace(0, 1, 1))

        # set up lines and points
        global lines
        lines = sum([ax.plot([], [], [], '-', c=c) for c in colors], [])
        global pts
        pts = sum([ax.plot([], [], [], 'o', c=c) for c in colors], [])

        # prepare the axes limits
        ax.set_xlim((-3, 3))
        ax.set_ylim((-3, 3))
        ax.set_zlim((-3, 3))

        # set point-of-view: specified by (altitude degrees, azimuth degrees)
        ax.view_init(90, 0)
        # instantiate the animator.
        start_time = time.time()
        anim = animation.FuncAnimation(fig,
                                       animate,
                                       init_func=init,
                                       frames=2000,
                                       interval=1,
                                       blit=True)
        print("anim_time is", time.time() - start_time)
        start_time = time.time()
        plt.show()
        mywriter = animation.FFMpegWriter(
            bitrate=4000
        )  # you have to install FFMpeg if you want to write out to mp4
        anim.save('single_orbit_n=' + str(n) + 'position_' + str(position) +
                  'velocity' + str(velocity) + 'timestep' + str(step_size) +
                  'numsteps' + str(numsteps) + '.mp4',
                  writer='ffmpeg',
                  fps=60,
                  extra_args=['-vcodec',
                              'libx264'])  # autogenerate filenames and export
        print("saved the file!")
        print("savetime is", time.time() - start_time)
        input()
        plt.close()
    print('combined runtime is', time.time() - first_time)
Beispiel #17
0
def animate_tuning(fname_tuning_data,
                   fname_input_data,
                   fname_movie_out,
                   input_data_col=2,
                   time_mode="real",
                   fps=30,
                   label=True,
                   xlabel="Input"):
    """Generate a movie from the spike raster

    Parameters
    ----------
    fname_tuning: string
        tuning data filename
        first column is the input stimulus used to generate the tuning data
        remaining columns are the tuning data
    fname_input_data: string
        input data filename
        first column is sim time
        first column is real time
        subsequent columns are each inputs dimension
    fname_movie_out: string
        if string, filename of output movie
    nrn_idx: list-like or none
        indices of neurons to use to generate wav file
        if None, uses all neurons, one per wav file channel
    time_mode: "real" or "sim"
        Whether to use the spike's real or simulation time
    fps: int
        frames per second
    label: boolean
        whether or not to label the tuning curves and display a legend
    xlabel: string
        x axis label
    """
    fig, ax = plot_tuning(fname_tuning_data,
                          show=False,
                          label=label,
                          xlabel=xlabel)
    ylim = ax.get_ylim()

    file_data = np.loadtxt(fname_input_data)
    sim_time = file_data[:, 0]
    real_time = file_data[:, 1]
    input_data = file_data[:, input_data_col]

    if time_mode == "real":
        time = real_time
    elif time_mode == "sim":
        time = sim_time

    n_frames = int(np.round(time[-1] * fps))
    time_idx = 0
    len_time = len(time)
    in_dat_line = ax.plot([input_data[time_idx], input_data[time_idx]],
                          [ylim[0], ylim[1]], 'r:')[0]
    ax.set_ylim(ylim)

    moviewriter = animation.FFMpegWriter(fps=fps)
    with moviewriter.saving(fig, fname_movie_out, dpi=100):
        for frame_idx in range(n_frames):
            curr_time = frame_idx / fps
            moved = False
            while time[time_idx] <= curr_time:
                time_idx += 1
                moved = True
            if moved:
                time_idx -= 1
            if time_idx < len_time:
                in_dat_line.set_data(
                    [input_data[time_idx], input_data[time_idx]],
                    [ylim[0], ylim[1]])
                moviewriter.grab_frame()
Beispiel #18
0
def main(unused_argv):
    tic = time.time()
    inFile = Path(FLAGS.fileName).absolute()
    env = jeffRat.mocap()  #SET SKELETON/ENVIRONMENT for MOCAP HERE
    # data = parse(FLAGS.fileName, FLAGS.varName)  # Parse specified clip.
    data = parse(os.fspath(FLAGS.fileName),
                 FLAGS.varName)  # Parse specified clip.

    max_frame = min(FLAGS.max_frame, data.mocap_pos.shape[0])
    frame_step = int(data.fpsIn // FLAGS.fpsOut)
    max_num_frames = (max_frame - FLAGS.start_frame) // frame_step + 1
    qpos = np.zeros((max_num_frames, env.physics.data.qpos.size),
                    dtype=np.float64)
    qvel = np.zeros((max_num_frames, env.physics.data.qvel.size),
                    dtype=np.float64)
    qacc = np.zeros((max_num_frames, env.physics.data.qacc.size),
                    dtype=np.float64)

    xpos = np.zeros((max_num_frames, env.physics.data.xpos.shape[0],
                     env.physics.data.xpos.shape[1]),
                    dtype=np.float64)

    tendonLen = np.zeros((max_num_frames, env.physics.data.ten_length.size),
                         dtype=np.float64)
    tendonVel = np.zeros((max_num_frames, env.physics.data.ten_velocity.size),
                         dtype=np.float64)
    badFrame = []

    if FLAGS.play or FLAGS.record or not FLAGS.qOnly:
        # Set up formatting for the movie files
        video = np.zeros((max_num_frames, height, width, 3), dtype=np.uint8)
        if FLAGS.record:
            metadata = dict(title=FLAGS.model_filename + ': ' + FLAGS.fileName,
                            artist='Jeff Rhoades/Jesse Marshall/DeepMind',
                            comment=FLAGS.varName)
            writer = animation.FFMpegWriter(fps=FLAGS.fpsOut,
                                            metadata=metadata,
                                            bitrate=-1)

    print('Getting video......', end='')
    if FLAGS.silent:
        for i in range(FLAGS.start_frame, max_frame, frame_step):
            i1 = (i - FLAGS.start_frame) // frame_step
            if FLAGS.play or FLAGS.record or not FLAGS.qOnly:
                video[i1], qpos[i1], tendonLen[i1], tendonVel[i1], bF, qvel[
                    i1], qacc[i1], xpos[i1] = getFrame(data, env, i)
            else:
                qpos[i1], tendonLen[i1], tendonVel[i1], bF, qvel[i1], qacc[
                    i1], xpos[i1] = getJoints(data, env, i)
            badFrame.append(bF)
    else:
        with progressbar.ProgressBar(max_value=max_num_frames,
                                     poll_interval=5) as bar:
            for i in range(FLAGS.start_frame, max_frame, frame_step):
                i1 = (i - FLAGS.start_frame) // frame_step
                if FLAGS.play or FLAGS.record or not FLAGS.qOnly:
                    video[i1], qpos[i1], tendonLen[i1], tendonVel[
                        i1], bF, qvel[i1], qacc[i1], xpos[i1] = getFrame(
                            data, env, i)
                else:
                    qpos[i1], tendonLen[i1], tendonVel[i1], bF, qvel[i1], qacc[
                        i1], xpos[i1] = getJoints(data, env, i)
                badFrame.append(bF)
                bar.update(i1)
    print('...done.')
    toc = time.time() - tic
    vid_dt = max_num_frames // FLAGS.fpsOut
    print('%.2f x real speed' % (toc / vid_dt))
    print(str(sum(badFrame)) + ' bad frames.')

    if FLAGS.save:
        qnames = []
        qnames[:5] = [env.physics.named.data.qpos.axes[0].names[0]
                      ] * 6  #First 7 are pos and quaternion of root frame
        qnames.extend(env.physics.named.data.qpos.axes[0].names)

        tendonNames = env.physics.named.data.ten_length.axes[0].names

        out = dict()
        out['qpos'] = qpos
        out['qvel'] = qvel
        out['qacc'] = qacc
        out['xpos'] = xpos
        out['qnames'] = qnames
        out['tendonLen'] = tendonLen
        out['tendonVel'] = tendonVel
        out['tendonNames'] = tendonNames
        out['fpsIn'] = data.fpsIn
        out['fpsOut'] = FLAGS.fpsOut
        out['badFrame'] = badFrame
        out['mocap_pos'] = data.mocap_pos
        out['markerNames'] = data.bods
        out['model'] = FLAGS.model_filename
        out['medianPose'] = data.medianPose
        today = datetime.datetime.today()
        path = inFile.parents[1]
        modelFile = Path(FLAGS.model_filename)
        if FLAGS.play or FLAGS.record or not FLAGS.qOnly:
            out['vid'] = video

        if FLAGS.outName:
            outName = FLAGS.outName
        else:
            outName = os.fspath(
                Path(
                    path, "dataOutput", inFile.stem + "_via_" +
                    modelFile.stem + "_" + str(FLAGS.start_frame) + "_thru_" +
                    str(max_frame) + "_fps" + str(FLAGS.fpsOut) + "_" +
                    today.strftime('%Y%m%d_%H%M')))

        print('Saving Matlab file to ' + outName +
              '................................................')
        try:
            sio.savemat(outName, out, do_compression=True)
            print('done.')
        except:
            print('failed.')

    if FLAGS.play:
        fig = plt.figure()
        plt.waitforbuttonpress()
        ticky = time.time()
        for i in range(video.shape[0]):
            if i == 0:
                img = plt.imshow(video[i])
            else:
                img.set_data(video[i])
                fig.canvas.flush_events()
            clock_dt = time.time() - ticky
            ticky = time.time()
            # Real-time playback not always possible as clock_dt > .03
            plt.draw()
            time.sleep(max(0.01, 1 / FLAGS.fpsOut - clock_dt))
            # plt.pause(max(0.01, 1/fpsOut - clock_dt))  # Need min display time > 0.0.
        plt.waitforbuttonpress()

    if FLAGS.record:
        print('Saving mp4', end='')
        try:
            fig = plt.figure()
            img = plt.imshow(video[0])
            plt.rcParams[
                'animation.ffmpeg_path'] = r'C:\Users\RatControl\ffmpeg\bin\ffmpeg.exe'
            with writer.saving(fig, outName + '.mp4', dpi=FLAGS.dpi):
                for i in range(video.shape[0]):
                    img.set_data(video[i])
                    plt.draw()
                    writer.grab_frame()
                    print('.', end='')
            writer.finish()
            print('done.')
        except:
            print('.......failed.')
Beispiel #19
0
# ===========================================================
for i in range(0, 50):
    u[0][i] = 1.0
for k in range(1, rows):
    u[k][0] = 1.0
f = 100
# ===========================================================
for m in range(1, rows):
    for n in range(1, cols - 1):
        # predictor
        ubar = u[m - 1][n] - 0.5 * h * ((u[m - 1][n + 1])**2 -
                                        (u[m - 1][n]**2))
        # corrector
        fbar = 0.5 * (ubar**2)
        fbarm = 0.5 * (u[m][n - 1]**2)
        u[m][n] = 0.5 * (u[m - 1][n] + ubar - h * (fbar - fbarm))
#ax.plot(u[100][0:101], x[0:101], c='k')
anim = animation.FuncAnimation(fig,
                               animate,
                               init_func=init,
                               interval=10,
                               frames=f)
mywriter = animation.FFMpegWriter(fps=10, codec="libx264")
anim.save('mac2.mp4', writer=mywriter)
plt.draw()
plt.show()
('\n'
 'FFMpegWriter block taken from:\n'
 'https://github.com/BV-DR/foamBazar/blob/master/pythonScripts/waveProbesPP.py\n'
 )
    def render(self, mode="video", output_file=None):
        from matplotlib import animation
        import matplotlib.pyplot as plt

        # plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
        x_offset = 0.2
        y_offset = 0.4
        cmap = plt.cm.get_cmap("hsv", 10)
        robot_color = "black"
        arrow_style = patches.ArrowStyle("->", head_length=4, head_width=2)
        display_numbers = True

        if mode == "traj":
            fig, ax = plt.subplots(figsize=(7, 7))
            ax.tick_params(labelsize=16)
            ax.set_xlim(-5, 5)
            ax.set_ylim(-5, 5)
            ax.set_xlabel("x(m)", fontsize=16)
            ax.set_ylabel("y(m)", fontsize=16)

            # add human start positions and goals
            human_colors = [cmap(i) for i in range(len(self.humans))]
            for i in range(len(self.humans)):
                human = self.humans[i]
                human_goal = mlines.Line2D(
                    [human.get_goal_position()[0]],
                    [human.get_goal_position()[1]],
                    color=human_colors[i],
                    marker="*",
                    linestyle="None",
                    markersize=15,
                )
                ax.add_artist(human_goal)
                human_start = mlines.Line2D(
                    [human.get_start_position()[0]],
                    [human.get_start_position()[1]],
                    color=human_colors[i],
                    marker="o",
                    linestyle="None",
                    markersize=15,
                )
                ax.add_artist(human_start)

            robot_positions = [
                self.states[i][0].position for i in range(len(self.states))
            ]
            human_positions = [
                [self.states[i][1][j].position for j in range(len(self.humans))]
                for i in range(len(self.states))
            ]

            for k in range(len(self.states)):
                if k % 4 == 0 or k == len(self.states) - 1:
                    robot = plt.Circle(
                        robot_positions[k],
                        self.robot.radius,
                        fill=False,
                        color=robot_color,
                    )
                    humans = [
                        plt.Circle(
                            human_positions[k][i],
                            self.humans[i].radius,
                            fill=False,
                            color=cmap(i),
                        )
                        for i in range(len(self.humans))
                    ]
                    ax.add_artist(robot)
                    for human in humans:
                        ax.add_artist(human)

                # add time annotation
                global_time = k * self.time_step
                if global_time % 4 == 0 or k == len(self.states) - 1:
                    agents = humans + [robot]
                    times = [
                        plt.text(
                            agents[i].center[0] - x_offset,
                            agents[i].center[1] - y_offset,
                            "{:.1f}".format(global_time),
                            color="black",
                            fontsize=14,
                        )
                        for i in range(self.human_num + 1)
                    ]
                    for time in times:
                        ax.add_artist(time)
                if k != 0:
                    nav_direction = plt.Line2D(
                        (self.states[k - 1][0].px, self.states[k][0].px),
                        (self.states[k - 1][0].py, self.states[k][0].py),
                        color=robot_color,
                        ls="solid",
                    )
                    human_directions = [
                        plt.Line2D(
                            (self.states[k - 1][1][i].px, self.states[k][1][i].px),
                            (self.states[k - 1][1][i].py, self.states[k][1][i].py),
                            color=cmap(i),
                            ls="solid",
                        )
                        for i in range(self.human_num)
                    ]
                    ax.add_artist(nav_direction)
                    for human_direction in human_directions:
                        ax.add_artist(human_direction)
            plt.legend([robot], ["Robot"], fontsize=16)
            plt.show()
        elif mode == "video":
            fig, ax = plt.subplots(figsize=(7, 7))
            ax.tick_params(labelsize=12)
            ax.set_xlim(-11, 11)
            ax.set_ylim(-11, 11)
            ax.set_xlabel("x(m)", fontsize=14)
            ax.set_ylabel("y(m)", fontsize=14)
            show_human_start_goal = False

            # add human start positions and goals
            human_colors = [cmap(i) for i in range(len(self.humans))]
            if show_human_start_goal:
                for i in range(len(self.humans)):
                    human = self.humans[i]
                    human_goal = mlines.Line2D(
                        [human.get_goal_position()[0]],
                        [human.get_goal_position()[1]],
                        color=human_colors[i],
                        marker="*",
                        linestyle="None",
                        markersize=8,
                    )
                    ax.add_artist(human_goal)
                    human_start = mlines.Line2D(
                        [human.get_start_position()[0]],
                        [human.get_start_position()[1]],
                        color=human_colors[i],
                        marker="o",
                        linestyle="None",
                        markersize=8,
                    )
                    ax.add_artist(human_start)
            # add robot start position
            robot_start = mlines.Line2D(
                [self.robot.get_start_position()[0]],
                [self.robot.get_start_position()[1]],
                color=robot_color,
                marker="o",
                linestyle="None",
                markersize=8,
            )
            robot_start_position = [
                self.robot.get_start_position()[0],
                self.robot.get_start_position()[1],
            ]
            ax.add_artist(robot_start)
            # add robot and its goal
            robot_positions = [state[0].position for state in self.states]
            goal = mlines.Line2D(
                [self.robot.get_goal_position()[0]],
                [self.robot.get_goal_position()[1]],
                color=robot_color,
                marker="*",
                linestyle="None",
                markersize=15,
                label="Goal",
            )
            robot = plt.Circle(
                robot_positions[0], self.robot.radius, fill=False, color=robot_color
            )
            # sensor_range = plt.Circle(robot_positions[0], self.robot_sensor_range, fill=False, ls='dashed')
            ax.add_artist(robot)
            ax.add_artist(goal)
            plt.legend([robot, goal], ["Robot", "Goal"], fontsize=14)

            # add humans and their numbers
            human_positions = [
                [state[1][j].position for j in range(len(self.humans))]
                for state in self.states
            ]
            humans = [
                plt.Circle(
                    human_positions[0][i],
                    self.humans[i].radius,
                    fill=False,
                    color=cmap(i),
                )
                for i in range(len(self.humans))
            ]

            # disable showing human numbers
            if display_numbers:
                human_numbers = [
                    plt.text(
                        humans[i].center[0] - x_offset,
                        humans[i].center[1] + y_offset,
                        str(i),
                        color="black",
                    )
                    for i in range(len(self.humans))
                ]

            for i, human in enumerate(humans):
                ax.add_artist(human)
                if display_numbers:
                    ax.add_artist(human_numbers[i])

            # add time annotation
            time = plt.text(
                0.4, 0.9, "Time: {}".format(0), fontsize=16, transform=ax.transAxes
            )
            ax.add_artist(time)

            # visualize attention scores
            # if hasattr(self.robot.policy, 'get_attention_weights'):
            #     attention_scores = [
            #         plt.text(-5.5, 5 - 0.5 * i, 'Human {}: {:.2f}'.format(i + 1, self.attention_weights[0][i]),
            #                  fontsize=16) for i in range(len(self.humans))]

            # compute orientation in each step and use arrow to show the direction
            radius = self.robot.radius
            orientations = []
            for i in range(self.human_num + 1):
                orientation = []
                for state in self.states:
                    agent_state = state[0] if i == 0 else state[1][i - 1]
                    if self.robot.kinematics == "unicycle" and i == 0:
                        direction = (
                            (agent_state.px, agent_state.py),
                            (
                                agent_state.px + radius * np.cos(agent_state.theta),
                                agent_state.py + radius * np.sin(agent_state.theta),
                            ),
                        )
                    else:
                        theta = np.arctan2(agent_state.vy, agent_state.vx)
                        direction = (
                            (agent_state.px, agent_state.py),
                            (
                                agent_state.px + radius * np.cos(theta),
                                agent_state.py + radius * np.sin(theta),
                            ),
                        )
                    orientation.append(direction)
                orientations.append(orientation)
                if i == 0:
                    arrow_color = "black"
                    arrows = [
                        patches.FancyArrowPatch(
                            *orientation[0], color=arrow_color, arrowstyle=arrow_style
                        )
                    ]
                else:
                    arrows.extend(
                        [
                            patches.FancyArrowPatch(
                                *orientation[0],
                                color=human_colors[i - 1],
                                arrowstyle=arrow_style
                            )
                        ]
                    )

            for arrow in arrows:
                ax.add_artist(arrow)
            global_step = 0

            if len(self.trajs) != 0:
                human_future_positions = []
                human_future_circles = []
                for traj in self.trajs:
                    human_future_position = [
                        [
                            tensor_to_joint_state(traj[step + 1][0])
                            .human_states[i]
                            .position
                            for step in range(self.robot.policy.planning_depth)
                        ]
                        for i in range(self.human_num)
                    ]
                    human_future_positions.append(human_future_position)

                for i in range(self.human_num):
                    circles = []
                    for j in range(self.robot.policy.planning_depth):
                        circle = plt.Circle(
                            human_future_positions[0][i][j],
                            self.humans[0].radius / (1.7 + j),
                            fill=False,
                            color=cmap(i),
                        )
                        ax.add_artist(circle)
                        circles.append(circle)
                    human_future_circles.append(circles)

            def update(frame_num):
                nonlocal global_step
                nonlocal arrows
                global_step = frame_num
                robot.center = robot_positions[frame_num]

                for i, human in enumerate(humans):
                    human.center = human_positions[frame_num][i]
                    if display_numbers:
                        human_numbers[i].set_position(
                            (human.center[0] - x_offset, human.center[1] + y_offset)
                        )
                for arrow in arrows:
                    arrow.remove()

                for i in range(self.human_num + 1):
                    orientation = orientations[i]
                    if i == 0:
                        arrows = [
                            patches.FancyArrowPatch(
                                *orientation[frame_num],
                                color="black",
                                arrowstyle=arrow_style
                            )
                        ]
                    else:
                        arrows.extend(
                            [
                                patches.FancyArrowPatch(
                                    *orientation[frame_num],
                                    color=cmap(i - 1),
                                    arrowstyle=arrow_style
                                )
                            ]
                        )

                for arrow in arrows:
                    ax.add_artist(arrow)
                    # if hasattr(self.robot.policy, 'get_attention_weights'):
                    #     attention_scores[i].set_text('human {}: {:.2f}'.format(i, self.attention_weights[frame_num][i]))

                time.set_text("Time: {:.2f}".format(frame_num * self.time_step))

                if len(self.trajs) != 0:
                    for i, circles in enumerate(human_future_circles):
                        for j, circle in enumerate(circles):
                            circle.center = human_future_positions[global_step][i][j]

            def plot_value_heatmap():
                if self.robot.kinematics != "holonomic":
                    print("Kinematics is not holonomic")
                    return
                # for agent in [self.states[global_step][0]] + self.states[global_step][1]:
                #     print(('{:.4f}, ' * 6 + '{:.4f}').format(agent.px, agent.py, agent.gx, agent.gy,
                #                                              agent.vx, agent.vy, agent.theta))

                # when any key is pressed draw the action value plot
                fig, axis = plt.subplots()
                speeds = [0] + self.robot.policy.speeds
                rotations = self.robot.policy.rotations + [np.pi * 2]
                r, th = np.meshgrid(speeds, rotations)
                z = np.array(self.action_values[global_step % len(self.states)][1:])
                z = (z - np.min(z)) / (np.max(z) - np.min(z))
                z = np.reshape(
                    z,
                    (
                        self.robot.policy.rotation_samples,
                        self.robot.policy.speed_samples,
                    ),
                )
                polar = plt.subplot(projection="polar")
                polar.tick_params(labelsize=16)
                mesh = plt.pcolormesh(th, r, z, vmin=0, vmax=1)
                plt.plot(rotations, r, color="k", ls="none")
                plt.grid()
                cbaxes = fig.add_axes([0.85, 0.1, 0.03, 0.8])
                cbar = plt.colorbar(mesh, cax=cbaxes)
                cbar.ax.tick_params(labelsize=16)
                plt.show()

            def print_matrix_A():
                # with np.printoptions(precision=3, suppress=True):
                #     print(self.As[global_step])
                h, w = self.As[global_step].shape
                print("   " + " ".join(["{:>5}".format(i - 1) for i in range(w)]))
                for i in range(h):
                    print(
                        "{:<3}".format(i - 1)
                        + " ".join(
                            [
                                "{:.3f}".format(self.As[global_step][i][j])
                                for j in range(w)
                            ]
                        )
                    )
                # with np.printoptions(precision=3, suppress=True):
                #     print('A is: ')
                #     print(self.As[global_step])

            def print_feat():
                with np.printoptions(precision=3, suppress=True):
                    print("feat is: ")
                    print(self.feats[global_step])

            def print_X():
                with np.printoptions(precision=3, suppress=True):
                    print("X is: ")
                    print(self.Xs[global_step])

            def on_click(event):
                if anim.running:
                    anim.event_source.stop()
                    if event.key == "a":
                        if hasattr(self.robot.policy, "get_matrix_A"):
                            print_matrix_A()
                        if hasattr(self.robot.policy, "get_feat"):
                            print_feat()
                        if hasattr(self.robot.policy, "get_X"):
                            print_X()
                        # if hasattr(self.robot.policy, 'action_values'):
                        #    plot_value_heatmap()
                else:
                    anim.event_source.start()
                anim.running ^= True

            fig.canvas.mpl_connect("key_press_event", on_click)
            anim = animation.FuncAnimation(
                fig, update, frames=len(self.states), interval=self.time_step * 500
            )
            anim.running = True

            if output_file is not None:
                # save as video
                ffmpeg_writer = animation.FFMpegWriter(
                    fps=10, metadata=dict(artist="Me"), bitrate=1800
                )
                # writer = ffmpeg_writer(fps=10, metadata=dict(artist='Me'), bitrate=1800)
                anim.save(output_file, writer=ffmpeg_writer)

                # save output file as gif if imagemagic is installed
                # anim.save(output_file, writer='imagemagic', fps=12)
            else:
                plt.show()
        else:
            raise NotImplementedError
elif plot_histograms:
    Writer = animation.writers['ffmpeg']
    writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)

    fname = f'./Figures/KDE_Histograms_bp_distribution_BN(AFTER_RELU)_H={H}_T={NumIters}_lr={learning_rate}_N={N}_Activation={activationFunc}_Initialization = {initFunc}_minibatch = {mini_batch_option}_bin_width = 0.1_2'
    ani.save(fname + '.mp4', writer=writer)
    show()

elif plot_loss:
    fname = f'./Figures/semigLog_BN(before RELU)_Training_loss_vs_time_fitted_nn_snapshots_H={H}_T={NumIters}_lr={learning_rate}_N={N}_Activation={activationFunc}_Initialization = {initFunc}_minibatch = {mini_batch_option}.pdf'
    xlabel('Epoch')
    ylabel('Loss')

elif makeVid:
    mywriter = animation.FFMpegWriter(fps=10,
                                      metadata=dict(artist='Me'),
                                      bitrate=18000)
    # fname = f'./Figures/Animated_curve_fitted_nn_snapshots_H={H}_T={NumIters}_lr={learning_rate}_N={N}_Activation={activationFunc}_Initialization = {initFunc}_minibatch = {mini_batch_option}_'+tag + f'percent_prune= {percent}'
    fname = './Figures/' + tag  #change later
    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=50,
                                    blit=True,
                                    repeat_delay=1000)
    ani.save(fname + '.mp4', writer=mywriter)
    # print(fname)

fname = './Figures/' + tag
if not makeVid:
    savefig(fname)
print(fname)
Beispiel #22
0
    def saveAnimationToFile(self):

        self.writer = anim.FFMpegWriter(fps=15, bitrate=5000)
        self.animHandler.save("t.mp4", writer=self.writer)
        return
Beispiel #23
0
        ax2.figure.canvas.draw()

    ymin, ymax = ax2.get_ylim()

    if history['cost'][i] >= ymax:
        ax2.set_ylim(ymin, history['cost'][i] + 0.5)
        ax2.figure.canvas.draw()
    elif history["cost"][i] < ymin:
        ax2.set_xlim(ymin / 2, ymax)
        ax2.figure.canvas.draw()

    line1.set_data(X, prediction)
    line2.set_data(x_cost, y_cost)

    return line1, line2


ani = animation.FuncAnimation(fig,
                              animate,
                              init_func=init,
                              frames=len(y_pred),
                              blit=True,
                              interval=1,
                              repeat=False)

# saves the animation as .mp4 (takes time, comment if needed)
mywriter = animation.FFMpegWriter(fps=60)
ani.save(f'{name} Gradient Descent.mp4', writer=mywriter)
# show the animation
plt.show()
Beispiel #24
0
def run_odex_convection_2d(config, do_plot, outfile=None):
    print('odex: 2D convection...')

    # PDE initial data
    npoints = 64
    xx = np.linspace(0,npoints-1,npoints, dtype=np.float64)
    xx,yy = np.meshgrid(xx,xx)
    u0 = np.exp(-60*((xx/npoints-.5)**2+(yy/npoints-.5)**2))

    # PDE and solver parameters
    c  = [0.5, 0.25]     # Wave speed
    k  = 1.              # Unit grid spacing
    t0 = 0.              # Simulation start time
    nsteps = 2048        # Number of time steps
    spectral = True

    # Movie writer setup
    if outfile is not None:
        moviewriter = anim.FFMpegWriter(fps=30)
        fig = plt.figure()
        ax = fig.gca(projection='3d')
        moviewriter.setup(fig, outfile, dpi=100)

        def observer(t,state):
            ax.clear()
            ax.plot_wireframe(xx, yy, state)
            ax.set_zlim(0,1)
            moviewriter.grab_frame()
    else:
        observer = None

    # Spectral gradient helper
    def islope():
        n   = len(u0);
        kn  = 2.*np.pi/n/k;
        iw = 1j*kn*np.array(list(range(0, int(n/2)+1)) + list(range(-int(n/2)+1,0)))
        return np.meshgrid(iw, iw)
    ikx, iky = islope()

    def spectral_gradient(u, k):
        U = np.fft.fft2(u)
        return np.real(np.fft.ifft2(ikx*U)), np.real(np.fft.ifft2(iky*U))

    def central_difference(u, k):
        n  = len(u)
        ux = np.empty(np.shape(u))
        uy = np.empty(np.shape(u))

        ux[:,1:n-1] = (u[:,2:n]-u[:,:n-2])/(2*k)
        ux[:,0    ] = (u[:,1  ]-u[:, n-1])/(2*k)
        ux[:,n-1  ] = (u[:,0  ]-u[:, n-2])/(2*k)

        uy[1:n-1,:] = (u[2:n,:]-u[:n-2,:])/(2*k)
        uy[0,    :] = (u[1,  :]-u[ n-1,:])/(2*k)
        uy[n-1,  :] = (u[0,  :]-u[ n-2,:])/(2*k)

        return ux, uy

    if spectral:
        derivfn = spectral_gradient
    else:
        derivfn = central_difference

    def gradient(u, k):
        return derivfn(u, k)

    # PDE system: transport
    def system(t, u):
        ux,uy = gradient(u, k)
        ut = -c[0]*ux-c[1]*uy
        return ut

    # Construct the extrapolation stepper
    stepper = odex.make_extrapolation_stepper(system, u0, config=config, parallel=True)

    # Set up maximum time step size
    hmax = k*(max(stepper.stepcounts)+1)*stepper.isbn/(2*max(c))
    if spectral: hmax = hmax/np.pi
    dt = hmax*.99

    # Step the system once to ensure one-time setup completes before profiling
    stepper.step(u0, t0, dt, 1, dense_output=None, observer=None)

    # Solve the system, profiling
    start = time.time()
    un = stepper.step(u0, t0, dt, nsteps, dense_output=None, observer=observer)
    duration = time.time()-start
    stepper.join()

    if outfile is not None:
        moviewriter.finish()
        plt.close()

    # Compute the error, print the results
    print('  duration: {}'.format(duration))

    # Compute the mean evaluation time of the PDE system
    iters = 100
    mean_eval_time = 0
    for ii in range(iters):
        start = time.time()
        system(t0,u0)
        mean_eval_time += time.time()-start
    mean_eval_time /= iters

    # Print the profiling results
    estimated_duration = nsteps*np.sum(np.array(stepper.stepcounts)+1)*mean_eval_time
    print('  mean eval duration: {}'.format(mean_eval_time))
    print('  estimated duration: {}'.format(estimated_duration))
    print('  efficiency: {0:.2f}%'.format(100*estimated_duration/duration))

    # Plot initial and final transport results
    if do_plot:
        fig = plt.figure()
        ax = fig.gca(projection='3d')
        ax.plot_wireframe(xx, yy, u0)

        fig = plt.figure()
        ax = fig.gca(projection='3d')
        ax.plot_wireframe(xx, yy, un)

        plt.grid()
        plt.show()

    return duration
Beispiel #25
0
def next_gen(arr):
    """
    This function applies Conway's rules and creates the next generation.
    """
    sum_of_neighbors = ndimage.convolve(
        grid, kernel,
        mode='constant')  #computes the sum of the eight neighbors of each cell
    born = (sum_of_neighbors == 3) & (
        grid == 0)  #rule if dead cell has 3 alive neighbors it becomes alive
    die = ((sum_of_neighbors < 2) | (sum_of_neighbors > 3)) & (
        grid == 1
    )  #rule if live cell does not have exactly 2 or 3 live neighbors it dies
    grid[born] = 1  #apply the rules to the grid
    grid[die] = 0
    ax.imshow(
        grid, cmap='viridis'
    )  #shows image of numpy array holding the state of the environment
    ax.grid(False)


anim = animation.FuncAnimation(fig, next_gen, interval=10,
                               frames=100)  #animates the game
plt.xticks([])
plt.yticks([])
plt.title('Conway\'s Game Of Life \n Programmed by Sydney Riemer')
plt.show()
FFwriter = animation.FFMpegWriter(fps=5)
f = r'C:\\Users\\sydne\\Desktop\\Riemer_Assignment4_P1.avi'
anim.save(f, writer=FFwriter)
    def run_simulation(self):
        self.people = [
            Person('Normal', self.total_size, self.recover_period,
                   self.mortality) for i in range(self.population)
        ]

        if self.social_spacing:
            people_ss = self.people[-self.social_spacing:]

            for person in self.people:
                if person in people_ss:
                    person.social_space()

        self.people[0].contaminate()

        self.fig = plt.figure()
        self.ax = plt.axes(xlim=(0, self.total_size),
                           ylim=(0, self.total_size))
        plt.xticks([])
        plt.yticks([])

        self.d_sick, = self.ax.plot([
            person.position_x
            for person in self.people if person.status == 'Sick'
        ], [
            person.position_y
            for person in self.people if person.status == 'Sick'
        ], 'ro')
        self.d_recovered, = self.ax.plot([
            person.position_x
            for person in self.people if person.status == 'Recovered'
        ], [
            person.position_y
            for person in self.people if person.status == 'Recovered'
        ], 'yo')
        self.d_healthy, = self.ax.plot([
            person.position_x
            for person in self.people if person.status == 'Healthy'
        ], [
            person.position_y
            for person in self.people if person.status == 'Healthy'
        ], 'bo')
        self.d_dead, = self.ax.plot([
            person.position_x
            for person in self.people if person.status == 'Dead'
        ], [
            person.position_y
            for person in self.people if person.status == 'Dead'
        ], 'ko')
        self.d = [self.d_sick, self.d_healthy, self.d_recovered]

        sick = len(self.d_sick.get_data()[0])
        healthy = len(self.d_healthy.get_data()[0])
        recovered = len(self.d_recovered.get_data()[0])
        dead = len(self.d_dead.get_data()[0])

        self.data = []
        self.data.append([sick, healthy, recovered, dead])

        self.end_animation = False

        def animate(i):
            # self.people = contamination(self.people,self.contamination_rate)
            self.contamination()
            for person in self.people:
                person.update_position()

            if self.end_animation:
                plt.close()

            if len([x for x in self.people if x.is_contaminated()]) == 0:
                for person in self.people:
                    person.stop()
                self.end_animation = True

            self.d_sick.set_data([
                person.position_x
                for person in self.people if person.status == 'Sick'
            ], [
                person.position_y
                for person in self.people if person.status == 'Sick'
            ])
            self.d_recovered.set_data(
                [
                    person.position_x
                    for person in self.people if person.status == 'Recovered'
                ],
                [
                    person.position_y
                    for person in self.people if person.status == 'Recovered'
                ],
            )
            self.d_healthy.set_data([
                person.position_x
                for person in self.people if person.status == 'Healthy'
            ], [
                person.position_y
                for person in self.people if person.status == 'Healthy'
            ])
            self.d_dead.set_data([
                person.position_x
                for person in self.people if person.status == 'Dead'
            ], [
                person.position_y
                for person in self.people if person.status == 'Dead'
            ])

            sick = len(self.d_sick.get_data()[0])
            healthy = len(self.d_healthy.get_data()[0])
            recovered = len(self.d_recovered.get_data()[0])
            dead = len(self.d_dead.get_data()[0])

            self.data.append([sick, healthy, recovered, dead])

            print(
                '{} -> Sick: {}, Healthy: {}, Recovered: {}, Dead: {}, Probability: {}'
                .format(i, sick, healthy, recovered, dead,
                        (3 * dead + sick) / (self.population)))

            return self.d,

        anim = FuncAnimation(self.fig, animate, frames=10000, interval=20)

        if self.output_filename:
            mywriter = animation.FFMpegWriter(fps=60)
            try:
                output_file = self.output_filename + '_SimulationVideo.mp4' ()
                anim.save(output_file, writer=mywriter)
            except:
                print(
                    'An error occurred while trying to save the simulation video'
                )

        plt.show()
Beispiel #27
0
#!/usr/bin/python3

import scipy
from scipy.io import wavfile
import numpy as np
import utils

import matplotlib.pyplot as plt
import matplotlib.animation as animation

FPS = 30
plt.style.use('dark_background')
fig, ax = plt.subplots()
fw = animation.FFMpegWriter(FPS)
fig.set_size_inches(192, 27)

sample_rate, data = wavfile.read('alone.wav')
#mono = (data.T[0] + data.T[1]) / 2.0 # average stereo into mono
left = data.T[0]
right = data.T[1]
secs = int(len(left) / sample_rate)+1
spf = int(sample_rate / FPS)
q = sample_rate / spf

with fw.saving(fig, "plot2.mp4", 14):
    for frame in range(secs * FPS):
        print(frame)
        ax.clear()
        ax.axis('off')
        ax.set_ylim(-30000, 30000)
Beispiel #28
0
from utilities.stack_decoder import decode_cp
from matplotlib import pyplot as plt
from matplotlib import animation
from utilities.cpHMM_BW import cpEM_BW
import time

#Set Paths
root = "../results"
folder = "method_validation"
subfolder = "BW_PDF_Animation"

outpath = os.path.join(root, folder, subfolder)

plt.rcParams[
    'animation.ffmpeg_path'] = 'C:\\Users\\Nicholas\\Downloads\\ffmpeg\\ffmpeg\\bin\\ffmpeg.exe'
FFwriter = animation.FFMpegWriter()

out_name = 'bw_test'

# memory
w = 4
# Fix 5race length for now
T = 200
# Number of traces per batch
batch_size = 20
R = np.array([[-.008, .009, .01], [.006, -.014, .025], [.002, .005, -.035]
              ]) * 10.2
A = sp.linalg.expm(R, q=None)
print(A)
pi = [.2, .3, .5]
v = np.array([0.0, 25.0, 100.0])
    ims[1].set_data(E_vel[0].T)
    ims[2].set_data(E_acc[0].T)
    ims[3].set_data(E_total[1].T)
    ims[4].set_data(E_vel[1].T)
    ims[5].set_data(E_acc[1].T)
    ims[6].set_data(E_total[2].T)
    ims[7].set_data(E_vel[2].T)
    ims[8].set_data(E_acc[2].T)
    ims[9].set_data(B_total[0].T)
    ims[10].set_data(B_vel[0].T)
    ims[11].set_data(B_acc[0].T)
    ims[12].set_data(B_total[1].T)
    ims[13].set_data(B_vel[1].T)
    ims[14].set_data(B_acc[1].T)
    ims[15].set_data(B_total[2].T)
    ims[16].set_data(B_vel[2].T)
    ims[17].set_data(B_acc[2].T)
    return ims,


def _init_animate():
    """Necessary for matplotlib animate."""
    pass


dt = 2*np.pi/charges[0].w/24
ani = FuncAnimation(fig, _update_animation, interval=1000/24,
                    frames=240, blit=False, init_func=_init_animate)
ani.save('Animations/'+savename+'.mp4',
         writer=animation.FFMpegWriter(fps=24), dpi=500)
#### initialize
## grid initialized with A = 1, B = 0 & small area B = 1
## Laplacian with 3x3 convolution with: 
## --> center weight: -1
## --> adjacent neighbors: .2
## --> diagonals = .05

# values for kernel
w_c = -1
w_a = 0.2 
w_d = 0.05

# kernel 
laplace_kernel = np.asarray([[w_d, w_a, w_d], [w_a, w_c, w_a], [w_d, w_a, w_d]])

# starting matrices
A, B = starting_mat2(N = N)

# run simulations 
ims = update_n2(A, B, laplace_kernel, t = t, n_sim = n_sim, N = N)

# show animation
ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True,
                                repeat_delay=300)

writervideo = animation.FFMpegWriter(fps=20)
ani.save(filename = "anims/test3.mp4", writer=writervideo)

# plt show
#plt.show()