def fire(self): # X = αX + (1-α)[WI*I + W*X] X = plus(times(X, self.alpha), times(plus(dot(WI, I), dot(W, X)), 1 - self.alpha)) X = tanh(X) #sigmoid(X) # Y = WO*X Y = dot(WO, X)
def fire(self): # X = αX + (1-α)[WI*I + W*X] self.X = plus( times(self.X, self.alpha), times(plus(dot(self.WI, [1] + self.I), dot(self.W, [1] + self.X)), 1 - self.alpha)) self.X = tanh(self.X) # Y = WO*X self.Y = dot(self.WO, [1] + self.X)
def ljapexp(q=150, iterations=40, gamma0=10**-8, sigma=0.1): # input, hidden and output layer size p = 1 #q = 150 r = 1 #300 # strength of input activations INPUT_STRENGTH = 10**0 def input_dist(): return INPUT_STRENGTH*uniform(-1,1) def normgen(mu, sigma): def n(): return normalvariate(mu, sigma) return n def unigen(a,b): def u(): return uniform(a,b) return u def zerogen(): def zero(): return 0 return zero # matrix and initial activation randomization r = Reservoir(p, q, r) r.randomize_matrices(normgen(0,1)) r.WI = random_matrix(q,p+1, unigen(-0.1,0.1)) r.W = random_matrix(q,q+1, normgen(0,sigma)) #r.randomize_vectors(normgen(0,1)) # make a copy of the reservoir r2 = copy.deepcopy(r) #perturbation = random_vector(q) #perturbation = times(perturbation, gamma0 / vector_len(perturbation)) priemersum = 0 for perturbed_node in range(q): r.randomize_vectors(normgen(0,1)) perturbation = [0 for _ in range(q)] perturbation[perturbed_node] = gamma0 r2.X = plus(r2.X, perturbation) difvec = plus(times(r.X, -1), r2.X) exponentsum = 0 lambdy = [0]*iterations for it in range(0, iterations): # introduce a random input r.I = random_vector(r.p, input_dist) r2.I = r.I # run one setp r.fire() r2.fire() # calculate the difference difvec = plus(times(r.X, -1), r2.X) gammaK = vector_len(difvec) # renormalize r2.X = plus(r.X, times(difvec, gamma0 / gammaK)) # record lambda mylog = math.log(gammaK / gamma0) lambdy[it] = mylog priemer = sum(lambdy) / iterations disperzia = sum([ (x-priemer)*(x-priemer) for x in lambdy]) / iterations priemersum += priemer print("\rnodeindex=%d" % perturbed_node, end="") return priemersum / q
r.WI = random_matrix(q, p + 1, unigen(-0.1, 0.1)) r.W = random_matrix(q, q + 1, normgen(0, 10**(-0.8))) r.randomize_vectors(normgen(0, 1)) # make a copy of the reservoir r2 = copy.deepcopy(r) #perturbation = random_vector(q) #perturbation = times(perturbation, gamma0 / vector_len(perturbation)) perturbation = [0] * q perturbation[randrange(q)] = gamma0 #print(perturbation) r2.X = plus(r2.X, perturbation) difvec = plus(times(r.X, -1), r2.X) #print("difvec = %s" % difvec) f = open('output.dat', 'w') exponentsum = 0 lambdy = [0] * ITERATIONS for it in range(0, ITERATIONS): r.I = random_vector(r.p, input_dist) r2.I = r.I r.fire() r2.fire() difvec = plus(times(r.X, -1), r2.X) gammaK = vector_len(difvec) r2.X = plus(r.X, times(difvec, gamma0 / gammaK)) mylog = math.log(gammaK / gamma0)
def fire(self): # X = αX + (1-α)[WI*I + W*X] self.X = plus(times(self.X, self.alpha), times(plus( dot(self.WI, [1]+self.I), dot(self.W,[1]+self.X)), 1 - self.alpha)) self.X = tanh(self.X) # Y = WO*X self.Y = dot(self.WO, [1]+self.X)
def ljapexp(q=150, iterations=40, gamma0=10**-8, sigma=0.1): # input, hidden and output layer size p = 1 #q = 150 r = 1 #300 # strength of input activations INPUT_STRENGTH = 10**0 def input_dist(): return INPUT_STRENGTH * uniform(-1, 1) def normgen(mu, sigma): def n(): return normalvariate(mu, sigma) return n def unigen(a, b): def u(): return uniform(a, b) return u def zerogen(): def zero(): return 0 return zero # matrix and initial activation randomization r = Reservoir(p, q, r) r.randomize_matrices(normgen(0, 1)) r.WI = random_matrix(q, p + 1, unigen(-0.1, 0.1)) r.W = random_matrix(q, q + 1, normgen(0, sigma)) #r.randomize_vectors(normgen(0,1)) # make a copy of the reservoir r2 = copy.deepcopy(r) #perturbation = random_vector(q) #perturbation = times(perturbation, gamma0 / vector_len(perturbation)) priemersum = 0 for perturbed_node in range(q): r.randomize_vectors(normgen(0, 1)) perturbation = [0 for _ in range(q)] perturbation[perturbed_node] = gamma0 r2.X = plus(r2.X, perturbation) difvec = plus(times(r.X, -1), r2.X) exponentsum = 0 lambdy = [0] * iterations for it in range(0, iterations): # introduce a random input r.I = random_vector(r.p, input_dist) r2.I = r.I # run one setp r.fire() r2.fire() # calculate the difference difvec = plus(times(r.X, -1), r2.X) gammaK = vector_len(difvec) # renormalize r2.X = plus(r.X, times(difvec, gamma0 / gammaK)) # record lambda mylog = math.log(gammaK / gamma0) lambdy[it] = mylog priemer = sum(lambdy) / iterations disperzia = sum([(x - priemer) * (x - priemer) for x in lambdy]) / iterations priemersum += priemer print("\rnodeindex=%d" % perturbed_node, end="") return priemersum / q
r.WI = random_matrix(q,p+1, unigen(-0.1,0.1)) r.W = random_matrix(q,q+1, normgen(0,10**(-0.8))) r.randomize_vectors(normgen(0,1)) # make a copy of the reservoir r2 = copy.deepcopy(r) #perturbation = random_vector(q) #perturbation = times(perturbation, gamma0 / vector_len(perturbation)) perturbation = [0]*q perturbation[randrange(q)] = gamma0 #print(perturbation) r2.X = plus(r2.X, perturbation) difvec = plus(times(r.X, -1), r2.X) #print("difvec = %s" % difvec) f = open('output.dat','w') exponentsum = 0 lambdy = [0]*ITERATIONS for it in range(0, ITERATIONS): r.I = random_vector(r.p, input_dist) r2.I = r.I r.fire() r2.fire() difvec = plus(times(r.X, -1), r2.X) gammaK = vector_len(difvec) r2.X = plus(r.X, times(difvec, gamma0 / gammaK)) mylog = math.log(gammaK / gamma0)