Ejemplo n.º 1
0
class serv_ESN:
    def __init__(self,):
        weight_scale = 1.0  # .8
        weight_inp = 0.2
        weight_fb = 10 ** (-3)
        alpha = 0.99  # .35#.2
        fback = False  # False
        inital_washout = 100  # 100
        padding_s = 300
        units = 28 * 28
        indim = 6
        outdim = 6

        self.esn = ESN(units, indim, outdim, weight_scale, weight_inp, weight_fb, alpha, fback)

        # self.webapp = webapp
        local_dir = os.path.dirname(__file__)
        self.esn.load(local_dir + "/trainied.pickle")
        self.stepper = self.esn.step_taped()
        self.outputs = np.zeros((3, outdim))
        print "ESN:: init"

    def serv_close(self,):
        return True

    def serv_train(self,):
        return True

    def serv_step(self, val_in):

        state, output, this = self.stepper(val_in, self.outputs, 0.0)
        output += np.random.random(output.shape)
        return state, output
 def fit_and_predict(self, sensor_names, training_samples, training_RULs,
                     configuration_samples, configuration_RULs,
                     validation_samples, validation_RULs, testing_samples,
                     testing_RULs, testing_time):
     self.configuration_samples = configuration_samples
     self.configuration_RULs = configuration_RULs
     self.validation_samples = validation_samples
     self.validation_RULs = validation_RULs
     self.testing_time = testing_time
     x = self.optimize_network()
     self.x = x
     n_reservoir, spectral_radius, sparsity, input_scaling, input_shifting, output_scaling, output_shifting, state_noise = \
         x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7]
     self.esnet = ESN.ESN(n_inputs=len(sensor_names),
                          n_outputs=1,
                          n_reservoir=int(n_reservoir),
                          spectral_radius=spectral_radius,
                          sparsity=sparsity,
                          random_state=42,
                          input_shift=input_shifting,
                          input_scaling=input_scaling,
                          teacher_scaling=output_scaling,
                          teacher_forcing=True,
                          teacher_shift=output_shifting,
                          noise=state_noise)
     self.esnet.fit(np.array(training_samples), np.array(training_RULs))
     predicted_RULs = self.esnet.predict(np.array(testing_samples), True)
     self.predicted_RULs = [
         item for sublist in predicted_RULs for item in sublist
     ]
     self.testing_RULs = testing_RULs
    def check_accuracy(self, x):
        n_reservoir, spectral_radius, sparsity, input_scaling, input_shifting, output_scaling, output_shifting, state_noise = \
        x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7]
        esn = ESN.ESN(n_inputs=len(sensor_names),
                      n_outputs=1,
                      n_reservoir=int(n_reservoir),
                      spectral_radius=spectral_radius,
                      sparsity=sparsity,
                      random_state=42,
                      input_shift=input_shifting,
                      input_scaling=input_scaling,
                      teacher_scaling=output_scaling,
                      teacher_forcing=True,
                      teacher_shift=output_shifting,
                      noise=state_noise)
        esn.fit(np.array(self.configuration_samples),
                np.array(self.configuration_RULs))
        predictions = esn.predict(np.array(self.validation_samples))
        predicted_RULs = [item for sublist in predictions for item in sublist]

        errors = [
            abs(a - b) * 400.0
            for a, b in zip(predicted_RULs, self.validation_RULs)
        ]
        return np.mean(errors)
Ejemplo n.º 4
0
    def __init__(self, num_params, num_inputs, num_actions):

        self.num_units = num_params//num_actions
        self.num_inputs = num_inputs
        self.num_actions = num_actions
        
        self.echo = ESN(
            N       = self.num_units,
            dt      = 1.0,
            tau     = 5.0,
            alpha   = 0.1,
            beta    = 0.9,
            epsilon = 1.0e-10)
        
        self.input_weights = np.random.randn(self.num_inputs, self.num_units)
        self.out_weights = np.random.randn(self.num_units, self.num_actions)
Ejemplo n.º 5
0
class Agent:

    def __init__(self, num_params, num_inputs, num_actions):

        self.num_units = num_params//num_actions
        self.num_inputs = num_inputs
        self.num_actions = num_actions
        
        self.echo = ESN(
            N       = self.num_units,
            dt      = 1.0,
            tau     = 5.0,
            alpha   = 0.1,
            beta    = 0.9,
            epsilon = 1.0e-10)
        
        self.input_weights = np.random.randn(self.num_inputs, self.num_units)
        self.out_weights = np.random.randn(self.num_units, self.num_actions)

    def save(self, filename):
        with open(filename, "w") as f:
            yaml.dump(self, f)

    @staticmethod
    def load(filename):
        with open(filename, "r") as f:
            return yaml.load(f, Loader=yaml.UnsafeLoader)


    def setParams(self, params):

        self.out_weights = params.reshape(self.num_units, self.num_actions).copy()
    
    def reset(self):

        self.echo.reset()

    def step(self, status):
        
        units_status = np.matmul(status.reshape(1, -1), self.input_weights)
        units = self.echo.step(units_status.ravel())
        action = np.matmul(units, self.out_weights)
        action = np.hstack([ np.tanh(action[0]), sigmoid(action[1])])
        action *= [0.1, 0.5]
        return action
Ejemplo n.º 6
0
    def __init__(self,):
        weight_scale = 1.0  # .8
        weight_inp = 0.2
        weight_fb = 10 ** (-3)
        alpha = 0.99  # .35#.2
        fback = False  # False
        inital_washout = 100  # 100
        padding_s = 300
        units = 28 * 28
        indim = 6
        outdim = 6

        self.esn = ESN(units, indim, outdim, weight_scale, weight_inp, weight_fb, alpha, fback)

        # self.webapp = webapp
        local_dir = os.path.dirname(__file__)
        self.esn.load(local_dir + "/trainied.pickle")
        self.stepper = self.esn.step_taped()
        self.outputs = np.zeros((3, outdim))
        print "ESN:: init"
Ejemplo n.º 7
0
    def __init__(self, y0=0.0, goal=1.0, timesteps=1000, lmbd=1e-4, input_amplitude=50, **kargs):
        """
        y0          float:  stating point
        goal        float:  final point 
        timesteps   int:    number of steps of the desired trajectory
        lmbd        int:    ridge regularization parameter
        """
        self.goal = goal
        self.y0 = y0
        self.timesteps = timesteps
        self.LMBD = lmbd
        self.input_amplitude = input_amplitude

        # Init the esn
        self.res = ESN(stime=self.timesteps, **kargs)
        # Init the weights to the ESN with sparse random values.
        # The input to the ESN is a 4-elements vector:
        # [ y0, 1-y0, goal, 1-goal ]
        self.input2res_w = np.random.randn(self.res.N, 4)
        self.input2res_w *= np.random.rand(self.res.N, 4) < 0.1

        self.readout_w = np.zeros(self.res.N)
Ejemplo n.º 8
0
import os
import pickle

data_dir = "./output/"
output_dir = "./nets/"
filenames = os.listdir(data_dir)
filenames.sort()
hidden_size = 10
input_size = 11
output_size = 2
steer_out = 1

#file_ind = find_ind("forza_1", filenames)
for index in range(0, len(filenames) - 3, 6):

    esn = ESN(input_size, output_size, hidden_size, bias=False)
    for file_index in range(index, index + 6):

        input_train, target_train, N_max = read_file(
            os.path.join(data_dir, filenames[file_index]), steer_out)

        epochs = 2

        #train
        for epoch in range(epochs):
            for ind in range(N_max - 1):
                inp = input_train[ind].reshape(-1, 1)
                desired_target = target_train[ind + 1].reshape(-1, 1)

                if ind >= esn.washout:
                    out = esn.online_train(inp, desired_target)
Ejemplo n.º 9
0
from esn import ESN

# Prepare a synthetic dataset
dataset = torch.Tensor( [ math.sin(x*0.5) + 2 * round(math.cos(x*0.5)) for x in range(2000) ] )
dataset = dataset / dataset.abs().max()

# Washout length
washout = 200

# Split training set and test set
training_x = dataset[0:1200].view(-1,1)
training_y = dataset[1:1201]
test_x = dataset[1200:-1].view(-1,1)
test_y = dataset[1201:]

model = ESN(1, reservoir_size=50, contractivity_coeff=1.2, density=0.9)

# Collect states for training set
X = model(training_x)

# Washout
X = X[washout:]
Y = training_y[washout:]

# Train the model by Moore-Penrose pseudoinversion.
W = X.pinverse() @ Y

# Evaluate the model on the test set
# We pass the latest training state in order to avoid the need for another washout
X_test = model(test_x, X[-1])
predicted_test = X_test @ W
 def optimize_network(self):
     x = ESN.diffev_minimize(self.check_accuracy, self.bounds, self.popsize,
                             self.mutate, self.recombination, self.maxiter)
     return x
Ejemplo n.º 11
0
data = readBinary(path,
                  precision=4,
                  nsteps=total_length * time_thinning_step,
                  npoints=output_size * space_thinning_step)
data = data.getData(step=time_thinning_step).T[::space_thinning_step, :]

train_data = data[::obs_thinning_step, :train_length]
target_data = data[:, 1:train_length + 1]
test_observed = data[::obs_thinning_step,
                     train_length:train_length + test_length]
test_target = data[:, train_length + 1:train_length + test_length + 1]

model = ESN(input_size=input_size,
            output_size=output_size,
            reservoir_size=reservoir_size,
            adjacency_density=0.0006,
            spectral_radius=0.1,
            input_scale=0.5)

W_out, reservoir = model.train(train_data,
                               target_data=target_data,
                               washout=washout,
                               ridge_param=ridge_param)

predict = model.predict(reservoir,
                        test_observed,
                        ptb_func=None,
                        ptb_scale=1.0,
                        nexttime=nexttime,
                        extended_interval=1000)
Ejemplo n.º 12
0
Archivo: gen.py Proyecto: minbin/esn
args = parser.parse_args()

if not args.i:
    print('[ERROR] use -i FILE_NAME to set an input file')
    exit()

data = np.load(args.i)
print('loaded %d points' % len(data))

if args.t == 'dnf':
    data = np.log(data)
elif args.t == 'ig':
    data = data[0::2] / 10000 - 1.1

esn = ESN(n_in=1, n_fb=1, n_units=args.u, spectral_radius=args.s)

trainlen = args.trainlen
predlen = args.predlen
print('using: %d, predicting: %d' % (trainlen, predlen))

print('fitting...')
fit = esn.fit(np.ones(trainlen), data[:trainlen])
print('predicting...')
pred = esn.predict(np.ones(predlen), cont=True)

if args.t == 'dnf':
    data = np.exp(data)
    pred = np.exp(pred)
elif args.t == 'ig':
    data = data + 1.1 * 10000
Ejemplo n.º 13
0
class ESN_discrete(object):
    """
    Learn trajectoriess to goals.
    Changing the spatial goal also 
    changes the shape of the trajectory.
    """

    def __init__(self, y0=0.0, goal=1.0, timesteps=1000, lmbd=1e-4, input_amplitude=50, **kargs):
        """
        y0          float:  stating point
        goal        float:  final point 
        timesteps   int:    number of steps of the desired trajectory
        lmbd        int:    ridge regularization parameter
        """
        self.goal = goal
        self.y0 = y0
        self.timesteps = timesteps
        self.LMBD = lmbd
        self.input_amplitude = input_amplitude

        # Init the esn
        self.res = ESN(stime=self.timesteps, **kargs)
        # Init the weights to the ESN with sparse random values.
        # The input to the ESN is a 4-elements vector:
        # [ y0, 1-y0, goal, 1-goal ]
        self.input2res_w = np.random.randn(self.res.N, 4)
        self.input2res_w *= np.random.rand(self.res.N, 4) < 0.1

        self.readout_w = np.zeros(self.res.N)

    def imitate_path(self, y_des):
        """
        Learn through ridge regression the weights 
        to the readout unit to reproduce the 'y_des' 
        trajectories.
        """

        Y = np.array([])
        P = np.array([]).reshape(self.res.N, 0)
        X = np.array([]).reshape(self.res.N, 0)

        for path in y_des:

            # learning start-end points
            y0 = path[0]
            goal = path[-1]

            # desired output
            y = self.interpolate(path)
            y = y - goal

            # input pattern
            p_init = np.dot(self.input2res_w, [y0, 1 - y0, goal, 1 - goal])

            # Record the reservoir activity
            self.res.reset()
            for t in xrange(self.timesteps):
                p = p_init * (t == 0)
                self.res.step(self.input_amplitude * p)
                self.res.store(t)
                # append to the input time-series
                P = np.hstack([P, p.reshape(self.res.N, 1)])
            # network activity time-series
            x = self.activations()
            # append activities
            X = np.hstack([X, x])
            # append desired outputs
            Y = np.hstack([Y, y])

        # Ridge regression
        w = self.readout_w
        L = self.LMBD
        # including the bias
        N = self.res.N
        w += np.dot(np.linalg.inv(np.dot(X, X.T) + L * np.eye(N, N)), np.dot(X, Y))

    def activations(self):
        return self.res.data[self.res.out_lab] * np.outer(
            np.ones(self.res.N), np.exp(-np.linspace(1, 0, self.timesteps))
        )

    def rollout(self, y0=0.0, goal=1.0):

        # input pattern
        p_init = np.dot(self.input2res_w, [y0, 1 - y0, goal, 1 - goal])

        # Record the reservoir activity
        self.res.reset()
        for t in xrange(self.timesteps):
            p = p_init * (t == 0)
            self.res.step(self.input_amplitude * p)
            self.res.store(t)
        # network activity time-series
        x = self.activations()

        return np.dot(self.readout_w, x) + goal

    def interpolate(self, path):

        import scipy.interpolate

        x = np.linspace(0, self.timesteps, len(path))
        y = np.zeros(self.timesteps)
        y_gen = scipy.interpolate.interp1d(x, path)

        for t in range(self.timesteps):
            y[t] = y_gen(t)
        return y
Ejemplo n.º 14
0
signal.signal(signal.SIGTERM, lambda: sys.exit(0))

PORT = os.getenv("PORT", 80)
SIZE = int(os.getenv("SIZE", 1024))
DENSITY = float(os.getenv("DENSITY", 0.1))
SPECTRAL_RADIUS = float(os.getenv("RADIUS", 0.99))
BIAS = os.getenv("BIAS", "true").lower() in (True, "true")
ACTIVATION = os.getenv("ACTIVATION", "tanh")
MODEL_PATH = os.getenv("MODEL_PATH", "models/esn")
SAVE_STEPS = int(os.getenv("SAVE_STEPS", 10000))

esn = None
try:
    print("Loading", MODEL_PATH)
    esn = ESN.load(MODEL_PATH)
    print(MODEL_PATH, "loaded")
except FileNotFoundError as e:
    print(e)
    print('"{}" not found. Creating new model.'.format(MODEL_PATH))
    esn = ESN(SIZE,
              density=DENSITY,
              spectral_radius=SPECTRAL_RADIUS,
              bias=BIAS,
              activation=ACTIVATION)
    esn.save(MODEL_PATH)

app = Flask(__name__)
api = Api(app)

step_counter = 0
Ejemplo n.º 15
0
def test(sys, weight_scale, weight_inp, weight_fb, alpha, inital_washout, padding_s ):

	units = 28*28
	indim = 6
	outdim = 6
	

	esn = ESN(
		units, indim, outdim, weight_scale,weight_inp,weight_fb, alpha, fback
		)
	esn.load("trainied.pickle")
	stepper = esn.step_taped()
	

	dtsets = read_dataset(sys.argv[1], sys.argv[2])
	# import pdb;pdb.set_trace()
	inputs, outputs, padIdxs, idxs = dtsets[0]

	
	plot_output =[]
	plot_state =[]

	import time
	start = time.time()
	###########TRAIN
	# all_states = []
	# all_this = []
	# all_states, all_this= train(idxs, padIdxs, esn, stepper, inputs, outputs)

	# M_tonos = np.linalg.pinv(all_states)
	# # import pdb; pdb.set_trace()
	# all_this = np.arctanh(all_this)
	# W_trans = np.dot(M_tonos,all_this)
	# esn.W_out.set_value(W_trans)
	# print W_trans
	
	###########END TRAIN
	print "Time taken ", time.time() - start
	#########TESTING#############
	
	outputs1 = np.zeros(outputs.shape)
	outputs1[1:] = outputs[:-1]

	state, output, this = stepper(
		inputs, outputs, 0.)

	print output.shape
	plot_state.extend(state[:,:units])
	plot_output.extend(output)

	
	#########TESTING#############
	if int(sys.argv[3]) == 1:
		f, axarr = plt.subplots(4, sharex=True)
		for oid,tpt in enumerate(np.array(plot_output).transpose()):
			try:
				axarr[0].plot(tpt,label="output"+str(oid))
			except:
				pass
		axarr[0].set_title('output')
		# axarr[0].legend()
		axarr[1].plot(outputs,label="outputs")
		axarr[2].plot(plot_state,label="state")
		axarr[2].set_title('state')
		# axarr[1].legend()
		axarr[3].plot(inputs,label="inputs")
		axarr[3].set_title('inputs')
		# axarr[2].legend()
		# plt.draw()
		# plt.figure()
		# plt.plot(inputs)
		# # plt.figure()
		# plt.plot(outputs)
		plt.show()