def optimise_on_room(args):

    # Intialise X
    X_0 = np.zeros((args.num_bulbs, 3)).ravel()
    # Initalise Room
    # TODO: Replace hardcoded arguments with command line args
    room = Room(l=args.L,
                b=args.B,
                h=args.H,
                mesh_resolution=10,
                mesh_type='horizontal',
                plane_a=None,
                plane_b=None,
                plane_c=None,
                plane_d=None,
                plane_height=None,
                obj_weight=None,
                transform=False,
                objective_type='simple_min')

    if args.algorithm == 'steepest_descent':
        alpha_0 = np.ones_like(X_0) * 1e-2
        X_optimal = steepest_descent(X_0, alpha_0, room.objective_function,
                                     room.gradient)
    else:
        raise NotImplementedError('%s algorithm not implemented.' %
                                  args.algorithm)
 def train(self, X_train, Y_train, tol=1.0E-7, algo=1, print_iter=False):
     # TODO reexpression of class labels as vectors
     self.X_train = X_train
     if self.is_classification:
         # we assume we have been passed a vector of integer labels
         self.Y_train = np.zeros((Y_train.shape[0], np.amax(Y_train)+1), dtype=np.int)
         for i in range(Y_train.shape[0]):
             self.Y_train[i,Y_train[i,0]] = 1
     else:
         self.Y_train = Y_train
     self.tolerance = tol
     if algo == 0:
         optimizer = steepest_descent(self)
     elif algo == 1:
         optimizer = l_bfgs(self,20,0,0.5,0)
     else:
         print 'optimizer not recognized'
     max_iter = 5000
     converged = False
     cur_iter = 0
     print 'beginning optimization of neural network'
     for i in range(max_iter):
         cur_iter = cur_iter + 1
         converged = optimizer.next_step() 
         if (converged):
             break
         if (print_iter):
             print "  "+str(cur_iter)+"  "+"{:.12f}".format(optimizer.value)+"  "+\
                 str(optimizer.error)+"  "+optimizer.comment
     if (converged):
         print "  "+str(cur_iter)+"  "+"{:.12f}".format(optimizer.value)+"  "+\
             str(optimizer.error)+"  Optimization Converged"
     else:
         print "  "+str(cur_iter)+"  "+"{:.12f}".format(optimizer.value)+"  "+\
             str(optimizer.error)+"  Optimization Failed"
     return converged
Example #3
0
edge_file = sys.argv[2]
poly_file = sys.argv[3]

# Parameters
lx = 9 * (2 / (3 * (3**0.5)))**0.5
ly = 4 * (2 / (3**0.5))**0.5
ka = 1.
A0 = 1.  # current preferred area for polygon
gamma = 0.04 * ka * A0  # hexagonal network
# gamma = 0.1 * ka * A0 # soft network
Lambda = 0.12 * ka * (A0**(3 / 2))  # hexagonal network
# Lambda = -0.85 * ka * A0**(3/2) # soft network
lmin = 0.2
delta_t = 0.05
eta = 1.

# get parameter dictionary
parameters = get_parameters(lx, ly, ka, gamma, Lambda, eta, lmin, delta_t)

# get vertices
vertices = read_vertices(vertex_file)

# get edges
edges = read_edges(edge_file)

# get polygons
poly_indices = read_poly_indices(poly_file)
polys = build_polygons(poly_indices, A0)

steepest_descent(vertices, edges, polys, parameters)
Example #4
0
#!/usr/bin/env python3

""" Testing for HW#1 question 3(b):
    testing steepest descent on a given 5x5 system
"""

from steepest_descent import steepest_descent
import numpy as np

A = np.array([
    [10,  1,  2,  3,  4],
    [ 1,  9, -1,  2, -3],
    [ 2, -1,  7,  3, -5],
    [ 3,  2,  3, 12, -1],
    [ 4, -3, -5, -1, 15]], )

b = np.array([[12,-27,14,-17,12]]).T


if __name__ == "__main__":

    x = steepest_descent(A, b)

    print("x = ", x.T)
Example #5
0
lx = L[0]
ly = L[1]
ka = 1.


A0 = 1.
gamma = 0.04 * ka * A0 # hexagonal network
# gamma = 0.1 * ka * A0 # soft network
Lambda = 0.12 * ka * (A0**(3/2)) # hexagonal network
# # Lambda = -0.85 * ka * A0**(3/2) # soft network


lmin = 0.01
delta_t = 0.05


# get parameter dictionary
parameters = get_parameters(lx, ly, ka, gamma, Lambda, lmin, delta_t)

# get vertices
vertices = read_vertices(vertex_file)

# get edges
edges = read_edges(edge_file)

# get polygons
poly_indices = read_poly_indices(poly_file)
polys = build_polygons(poly_indices, A0)

steepest_descent(vertices, edges, polys, parameters, folder)
Example #6
0
p = p[(len(u) - 1):len(u) - 1 + N_THETA]

# Determina el filtro óptimo Wiener
w_wiener = inv(R).dot(p)

# Encuentra el filtro óptimo Wiener con descenso por gradiente
mus = [1e-6]
# Diferentes tamaños de paso
w0 = np.zeros(N_THETA)
for mu in mus:
    N = 5000
    # Número de iteraciones

    # Llama a la función de filtrado óptimo Wiener con descenso por gradiente.
    # Las filas de Wt representan los filtros en diferentes instantes.
    Wt = steepest_descent(R, p, w0, mu, N)

    # Calcula instante a instante el error cuadrático medio de los
    # coeficientes del filtro respecto de los coeficientes del filtro óptimo.
    mse_coeffs = np.mean((Wt - w_wiener)**2, 1)

    plt.plot(mse_coeffs, label='mu=10^%i' % math.log10(mu))

# Representación
plt.xlabel('Número de iteración')
plt.ylabel('MSE de los coeficientes')
plt.title('Filtrado óptimo Wiener con descenso por gradiente')
plt.grid(True)
plt.legend(loc='upper right')
plt.show()