示例#1
0
 def itera(self):
     npart = self.initial["npart"]
     masses = self.initial["masses"]
     #Creo una matriu per guardar totes les dades
     All_positions = np.zeros((npart, 3, self.params.niter))
     All_velocities = np.zeros((npart, 3, self.params.niter))
     posicions = self.initial["posicions_inicials"]
     vels = self.initial["velocitats_inicials"]
     logging.info("Starting simulation")
     for step in t(range(self.params.niter)):
         accs = acceleracions(posicions, masses, self.params.A,
                              self.params.B)
示例#2
0
def optimitza(metode):
	if metode == 1:
		c1 = np.linspace(1.0,3.0,50)[0]
		k1 = np.linspace(0.1,0.8,50)[10]
		[c,k,sigma,mu] = [c1,k1,1.1,0.013]
		d2 = minimize(nova_fitness, [c,k,sigma,mu], args=(), 
			method='Nelder-Mead', tol=None, callback= print, 
			options={'disp': False, 'initial_simplex': None, 
			'maxiter': None, 'xatol': 0.0001, 'return_all': False, 
			'fatol': 0.0001, 'maxfev': None})
		#d2 = minimize(nova_fitness,[c,k,sigma,mu], method='Nelder-Mead', options={'disp': True, })
		print(d2)
	
	if metode == 2:
		bo = BayesianOptimization(lambda c, k, sigma, mu: -nova_fitness([c,k,sigma,mu]), {'c': (1.0, 3.0), 'k': (0.1, 0.8), 'sigma': (1, 2), 'mu': (0.002, 0.01)})
		# Run it again with different acquisition function
		bo.maximize(init_points = 200, n_iter=100)

		#bo.maximize(init_points=50, n_iter=25, kappa=2)
		print(bo.res['max'])
		print(bo.res['all'])

	if metode == 3:
		#Brute Force
		minim = 1e8
		M = np.zeros((30, 30))
		i = 0
		for mu in t(np.linspace(0.002,0.02,30)):
			j = 0
			for sigma in np.linspace(0,1,30):
				d = nova_fitness([1,1,sigma,mu])
				M[i][j] = min(d,99)
				j+= 1
				if d < minim:
					minim = d
					print(d, "es el nou minim")
			i+=1
		np.save("matriu_ckfixes", M)

	if metode == 4:
		[sigma,mu] = [1,0]
		d2 = minimize(nova_nova_fitness, [sigma,mu], args=(), 
			method='Nelder-Mead', tol=None, callback= print, 
			options={'disp': False, 'initial_simplex': None, 
			'maxiter': None, 'xatol': 0.0001, 'return_all': False, 
			'fatol': 0.0001, 'maxfev': None})
		#d2 = minimize(nova_fitness,[c,k,sigma,mu], method='Nelder-Mead', options={'disp': True, })
		print(d2)
 def itera(self):
     npart = self.initial["npart"]
     masses = self.initial["masses"]
     #Creo una matriu per guardar totes les dades
     self.positions = np.zeros((npart, 3, self.params.niter))
     self.velocities = np.zeros((npart, 3, self.params.niter))
     self.accelerations = np.zeros((npart, 3, self.params.niter))
     pos_act = self.initial["posicions_inicials"]
     vel_act = self.initial["velocitats_inicials"]
     integrador = integradors.string2func(self.params.integrador)
     logging.info("Començant simulació, " + str(self.params.niter) +
                  " iteracions, timestep = " + str(self.params.timestep))
     for step in t(range(self.params.niter)):
         pos_ant = self.positions[:, :, step - 1]
         self.positions[:, :, step] = pos_act
         self.velocities[:, :, step] = vel_act
         acc_act = acceleracions(pos_act, masses, self.params.A,
                                 self.params.B)
         self.accelerations[:, :, step] = acc_act
         pos_act, vel_act = integrador(pos_act, vel_act, acc_act, timestep)
示例#4
0
opt_func = tf.train.AdamOptimizer(learn_rate)
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tf_vars), clip_ratio)
opt_op = opt_func.apply_gradients(zip(grads, tf_vars))

# Perform training
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    train_pred = sess.run(predict,
                          feed_dict={
                              features_pl: train_set,
                              keep_prob_pl: 1.0
                          })
    print(-1, " ", np.mean(np.equal(train_stances, train_pred)))

    for epoch in t(range(epochs)):
        total_loss = 0
        indices = list(range(n_train))
        r.shuffle(indices)

        for i in t(range(n_train // batch_size_train)):
            batch_indices = indices[i * batch_size_train:(i + 1) *
                                    batch_size_train]
            batch_features = [train_set[i] for i in batch_indices]
            batch_stances = [train_stances[i] for i in batch_indices]

            batch_feed_dict = {
                features_pl: batch_features,
                stances_pl: batch_stances,
                keep_prob_pl: train_keep_prob
            }
示例#5
0
import numpy as np
from tqdm import tqdm as t
from IPython import embed
import pandas as pd
import scipy
from llegeix_escriu import esc
from llitelastic import *
from random import uniform
""" de llit elastic hereda la funcio fitness(v)
que va de R^2 --> R 
		(el, t0) --> R


ESPAI DE PARAMETRES SOBRE EL QUE VOLEM OPTIMITZAR:
K: 50 - 400
TensióInicial: 0.5 - 4
"""
n = 4

M = np.zeros((10, 10))
v1 = np.linspace(50, 400, len(M))
v2 = np.linspace(0.5, 4, len(M[0]))
for i in t([2 * n, 2 * n + 1]):
    for j in t(range(len(M[0]))):
        M[i][j] = fitness([v1[i], v2[j]])

np.save("llits_grid" + str(n), M)
示例#6
0
def tqdm(iterable=None, desc=None, total=None, leave=True, **kwargs):
    """ascii=Trueでncols=100なtqdm。"""
    from tqdm import tqdm as t

    return t(iterable, desc, total, leave, ascii=True, ncols=100, **kwargs)