Exemple #1
0
from lib.handle import Handle

from lorenz import Lorenz

l = Lorenz(params=(40, 16, 4), size=3)
h = Handle(l)
l_123 = h.lyapunov_exponent_123()
print(l_123)
h.graph()
Exemple #2
0
import numpy
import pylab

from numpy import asarray

from lorenz import Lorenz

l = Lorenz(10.0, 28.0, 8.0/3.0)
dtraj, traj = [], []
DT, NSTEP = 0.01, 1000
dx, dy, dz = 0.0, 10.0, 10.0
x, y, z = 0.0, 10.0, 10.0

dtraj.append((dx, dy, dz))
traj.append((x, y, z))
for i in range(NSTEP):
   dx, dy, dz = l.diff(x, y, z, dx, dy, dz, DT)
   x, y, z = l.step(x, y, z, DT)
   dtraj.append((dx, dy, dz))
   traj.append((x, y, z))

trajadj = []
xadj, yadj, zadj = 0.0, 10.0, 10.0
trajadj.append((xadj, yadj, zadj))
for i in range(NSTEP-1, 0-1, -1):
   x, y, z = traj[i]
   xadj, yadj, zadj = l.adj(x, y, z, xadj, yadj, zadj, DT)
   trajadj.append((xadj, yadj, zadj))
trajadj.reverse()

aggr = (asarray(trajadj) * asarray(dtraj)).sum(axis=1)
Exemple #3
0
from lorenz import Lorenz
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

A = Lorenz([-1,1,0])
u1 = A.solve(50,.01)
B = Lorenz([-1.001,1.001,0.001])
u2 = B.solve(50,.01)


print('---------------------------------------------------------------')
print('',u1,'\n',u2)
print('---------------------------------------------------------------')
print(u1[0][0],u2[0][0])
print(u1[-1][0],u2[-1][0])



fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_zlabel("Z Axis")
ax.set_title("Lorenz Attractor")
plt.plot(u1[:,0],u1[:,1],u1[:,2]) 
plt.plot(u2[:,0],u2[:,1],u2[:,2])
plt.show()
import numpy as np
import pickle
from lorenz import Lorenz
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
"""   Initialize lorenz object by loading parameters from the training data file   """
f = open("learning_algorithm2_training_data", "rb")
d = pickle.load(f)
sigma = d['sigma']
b = d['b']
r = d['r']
lrz = Lorenz(sigma, b, r)
lrz.X = d['X']
lrz.U = d['U']
"""  
check robustness of trained control
generates n trajectories and store their start and end state
if the end state lies within a ball of radius 0.14 of the desired state,
control objective is achieved and documented as a 1 in the task. otherwise it is documented as 0.
sum of all the tasks divided by number of tasks gives the accuracy of the learning algorithm
"""
m = 1000  # number of trajectories to generate for checking accuracy of learned algorithm
n = 1000  # number of time steps for each trajectory
lrz.dt = 0.01  # set default time step to 0.01
xstart = np.zeros((m, 3))  # stores the initial state of the n trajectories
xend = np.zeros((m, 3))  # stores the final state of the n trajectories
task = np.zeros(
    (m, 1), dtype=float
)  # for each trajectory task stores 1 (resp. 0) for control objective
# (resp. not) achieved
from lorenz import Lorenz
from mpl_toolkits.mplot3d import Axes3D
import pickle
import matplotlib.pyplot as plt
"""   Initialize lorenz object by loading parameters from the training data file   """
f = open("learning_algorithm2_training_data", "rb")
d = pickle.load(f)
sigma = d['sigma']
b = d['b']
r = d['r']
lrz = Lorenz(sigma, b, r)
lrz.X = d['X']
lrz.U = d['U']
"""  Initialize lorenz object state and compute trajectories with learning based control, lyapunov based control, 
and without any control  """
n = 6000  # number of time steps
lrz.state = [-4, -4, -1]
y_l, u_l, t_l = lrz.trajectory(n, 0)
lrz.state = [-4, -4, -1]
y_m, u_m, t_m = lrz.trajectory(n, 1)
lrz.state = [-4, -4, -1]
y_wc, t_wc = lrz.trajectory_no_control(n)
"""  trajectory visualization  """
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111, projection='3d')
ax.plot(y_wc[:, 0],
        y_wc[:, 1],
        y_wc[:, 2],
        'r',
        linewidth=2,
        label="uncontrolled trajectory")
Exemple #6
0
import numpy
import pylab

from numpy import asarray

from lorenz import Lorenz

l = Lorenz(10.0, 28.0, 8.0/3.0)
DT, NSTEP = 0.01, 100

delta = 0.1
deltas = []
grads = []
for i in range(5):
   dtraj = []
   traj0 = []
   traj1 = []
   dx, dy, dz = 0.0, 10.0, 10.0
   x0, y0, z0 = 0.0, 10.0, 10.0
   x1, y1, z1 = x0 + delta * dx, y0 + delta * dy, z0 + delta * dz
   
   dtraj.append((dx, dy, dz))
   traj0.append((x0, y0, z0))
   traj1.append((x1, y1, z1))
   for i in range(NSTEP):
      dx, dy, dz = l.diff(x0, y0, z0, dx, dy, dz, DT)
      x0, y0, z0 = l.step(x0, y0, z0, DT)
      x1, y1, z1 = l.step(x1, y1, z1, DT)
      dtraj.append((dx, dy, dz))
      traj0.append((x0, y0, z0))
      traj1.append((x1, y1, z1))
Exemple #7
0
from lorenz import Lorenz
sigma = 10
rho = 28
beta = 8/3
L1 = Lorenz([-1,1,0],sigma,rho,beta)
u1 = L1.solve(50,.01)
L2 = Lorenz([-1.001,1.001,.001],sigma,rho,beta)
u2 = L2.solve(50,.01)
print(u1[0,0],u2[0,0])
print(u1[-1,0],u2[-1,0]) 
print(L1.df(2))
print(L1.test(u1))
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from lorenz import Lorenz
import pickle

sigma = 10
b = 8 / 3
r = 1.5
lrz = Lorenz(sigma, b, r)  # initialize lorenz object with given parameters

n_samples = 1000  # set number of training samples
lrz.X, lrz.U = np.zeros((n_samples, 3)), np.zeros(
    (n_samples, 1))  # initialize training data to 0
"""  Training  
randomly initialize the state of the lorenz object and set lrz.X[i, :] to the initial state
lorenz object takes one step with -ve control and gets reward r1
reset the lorenz state back to starting state and take another step with +ve control which gives reward r2
Set policy lrz.U[i, 0] to -1 or 1 depending upon which policy maximizes reward
"""
for i in range(n_samples):
    lrz.X[i, :] = lrz.reset()
    lrz.step(-lrz.max_control)
    r1 = lrz.reward()
    lrz.state = lrz.X[i, :]
    lrz.step(lrz.max_control)
    r2 = lrz.reward()
    lrz.U[i, 0] = 2 * np.argmax([r1, r2]) - 1

data = {
    'sigma': sigma,
Exemple #9
0
import numpy
import pylab

from numpy import asarray

from lorenz import Lorenz

l = Lorenz(10.0, 28.0, 8.0/3.0)
DT, NSTEP, NITER, STEP = 0.01, 1000, 100, 1.0E-11
ctrl = [0] * (NSTEP + 1)

obj0 = 1.0E+18
for iiter in range(NITER):
   traj = []
   x, y, z = 0.0, 10.0, 10.0
   
   traj.append((x, y, z))
   for i in range(NSTEP):
      x += ctrl[i]
      x, y, z = l.step(x, y, z, DT)
      traj.append((x, y, z))
   # obj = (asarray(traj)[int(NSTEP/2):,:]**2).sum()
   obj = (x+1)**2 + (y+1)**2 + (z-20)**2

   # if iiter % 100 == 0 or iiter == NITER - 1:
   #    t = asarray(traj[int(NSTEP/2):])
   #    pylab.figure()
   #    pylab.subplot(2,2,1)
   #    pylab.plot(t[:,0], t[:,1])
   #    pylab.xlabel('x'); pylab.ylabel('y')
   #    pylab.subplot(2,2,2)
Exemple #10
0
import numpy
import pylab

from lorenz import Lorenz

l = Lorenz(10.0, 28.0, 8.0/3.0)
traj = []
DT, NSTEP = 0.01, 10000
x, y, z = 0.0, 10.0, 10.0

traj.append((x, y, z))
for i in range(NSTEP):
   x, y, z = l.step(x, y, z, DT)
   traj.append((x, y, z))

traj = numpy.asarray(traj)
pylab.subplot(2,2,1)
pylab.plot(traj[:,0], traj[:,1])
pylab.xlabel('x'); pylab.ylabel('y')
pylab.subplot(2,2,2)
pylab.plot(traj[:,0], traj[:,2])
pylab.xlabel('x'); pylab.ylabel('z')
pylab.subplot(2,2,3)
pylab.plot(traj[:,1], traj[:,2])
pylab.xlabel('y'); pylab.ylabel('z')