Example #1
0
def main():
    data = loadmat('ex4data1.mat')
    #Se pueden consultar las claves con data.keys()
    y = data['y']
    X = data['X']
    #almacena los datos leidos en X,y
    #sample = np.random.choice(X.shape[0], 100)
    #fig, ax = dd.displayData(X[sample])
    #plt.axis('off')
    #plt.show()
    weights = loadmat('ex4weights.mat')
    theta1, theta2 = weights['Theta1'], weights['Theta2']
    thetaVec = np.concatenate((np.ravel(theta1), np.ravel(theta2)))
    y = (y - 1)
    m = len(X)
    y_onehot = np.zeros((m, 10))  #5000x10
    for i in range(m):
        y_onehot[i][y[i]] = 1
    DELTA1, DELTA2 = backprop(thetaVec, 400, 25, 10, X, y_onehot, 1)
    checkNNGradients(backprop, 1)
Example #2
0
def main():
    data = loadmat("ex4data1.mat")

    y = data["y"].ravel()
    X = data["X"]

    num_entradas = X.shape[1]
    num_ocultas = 25
    num_etiquetas = 10

    # Transforma Y en una matriz de vectores, donde cada vector está formado por todo
    # 0s excepto el valor marcado en Y, que se pone a 1
    # 3 ---> [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
    lenY = len(y)
    y = (y - 1)
    y_onehot = np.zeros((lenY, num_etiquetas))
    for i in range(lenY):
        y_onehot[i][y[i]] = 1

    # Inicialización de dos matrices de pesos de manera aleatoria
    Theta1 = pesosAleatorios(400, 25)  # (25, 401)
    Theta2 = pesosAleatorios(25, 10)  # (10, 26)

    # Lectura de los pesos del archivo
    #weights = loadmat("ex4weights.mat")
    #Theta1 = weights["Theta1"] # (25, 401)
    #Theta2 = weights["Theta2"] # (10, 26)

    # Crea una lista de Thetas
    Thetas = [Theta1, Theta2]

    # Concatenación de las matrices de pesos en un solo vector
    unrolled_Thetas = [Thetas[i].ravel() for i, _ in enumerate(Thetas)]
    nn_params = np.concatenate(unrolled_Thetas)

    # Chequeo del gradiente
    checkNNGradients(backprop, 1)
Example #3
0
# net.randInitializeWeights()

print(Theta1.shape, Theta2.shape)
print(net.params['W1'].shape, net.params['W2'].shape)

# =============== Part 7: Implement Backpropagation ===============
#  Once your cost matches up with ours, you should proceed to implement the
#  backpropagation algorithm for the neural network. You should add to the
#  code you've written in nnCostFunction.m to return the partial
#  derivatives of the parameters.
#
print('Checking Backpropagation... ')

#  Check gradients by running checkNNGradients
checkNNGradients()

# input("Program paused. Press Enter to continue...")

# =============== Part 8: Implement Regularization ===============
#  Once your backpropagation implementation is correct, you should now
#  continue to implement the regularization with the cost and gradient.
#

print('Checking Backpropagation (Regularization) ... ')

#  Check gradients by running checkNNGradients
Lambda = 3.0
# checkNNGradients(Lambda)

# Also output the costFunction debugging values
# Concatenación de las matrices de pesos en un solo vector
unrolled_Thetas = [Thetas[i].ravel() for i, _ in enumerate(Thetas)]
params = np.concatenate(unrolled_Thetas)

# Obtención de los pesos óptimos entrenando una red con los pesos aleatorios
optTheta = opt.minimize(fun=backprop,
                        x0=params,
                        args=(num_entradas, capa_oculta, num_labels, X,
                              y_onehot, landa),
                        method='TNC',
                        jac=True,
                        options={'maxiter': 70})

#Calculo de la precision del gradiante gracias a checkNNGradients
print("Diferencia de precision de gradiantes: ",
      str(np.sum(checkNNGradients(backprop, 1))), ", maximo aceptado = 10e-9")

# Desglose de los pesos óptimos en dos matrices
Theta1Final = np.reshape(optTheta.x[:capa_oculta * (num_entradas + 1)],
                         (capa_oculta, (num_entradas + 1)))

Theta2Final = np.reshape(optTheta.x[capa_oculta * (num_entradas + 1):],
                         (num_labels, (capa_oculta + 1)))

# H, resultado de la red al usar los pesos óptimos
a1, z2, a2, z3, h = PropagacionHaciaDelante(X, Theta1Final, Theta2Final)

# Cálculo de la precisión de la red neuronal
print("{0:.2f}% de precision".format(calcAciertos(y, h)))
Example #5
0
m1, n1 = np.shape(initial_Theta1)
m2, n2 = np.shape(initial_Theta2)

# Unroll parameters
initial_nn_params = np.r_[(initial_Theta1.ravel().reshape(m1 * n1, 1),
                           initial_Theta2.ravel().reshape(m2 * n2, 1))]

## =============== Part 7: Implement Backpropagation ===============
#  Once your cost matches up with ours, you should proceed to implement the backpropagation algorithm
#  for the neural network. You should add to the code you've written in nnCostFunction.m to
#  return the partial derivatives of the parameters.

print('Checking Backpropagation... \n')

#  Check gradients by running checkNNGradients
checkNNGradients.checkNNGradients()

input('Program paused. Press enter to continue.')

## =============== Part 8: Implement Regularization ===============
#  Once your backpropagation implementation is correct, you should now continue to
#  implement the regularization with the cost and gradient.

print('Checking Backpropagation (w/ Regularization) ...\n ')

# Check gradients by running checkNNGradients
xlambda = 3
checkNNGradients.checkNNGradients(xlambda)

# Also output the costFunction debugging values
debug_J, degub_grad = nnCostFunction.regularization(nn_params,
Example #6
0
print("\nChecking Cost Function with Reguralization ... \n")

lambd = 3.0
checkNNCost(lambd)

print('This value should be about 2.1433733821')


input('\nProgram paused. Press enter to continue!!!')

# ================================ Step 7: Implement Backpropagation  ================================

print("\nChecking Backpropagation without Regularization ...\n")

lambd = 0.0
checkNNGradients(lambd)
input('\nProgram paused. Press enter to continue!!!')


# ================================ Step 8: Implement Backpropagation with Regularization ================================

print("\nChecking Backpropagation with Regularization ...\n")

lambd = 3.0
checkNNGradients(lambd)

input('\nProgram paused. Press enter to continue!!!')


# ================================ Step 9: Training Neural Networks & Prediction ================================
print("\nTraining Neural Network... \n")
Example #7
0
    return cost, gradient


X, y = Data_Management.load_mat("ex4data1.mat")

#indexRand = np.random.randint(0, 5001, 100)
#displayData.displayData(X[indexRand[:]])
#plt.show()

weights = loadmat('ex4weights.mat')
theta1, theta2 = weights['Theta1'], weights['Theta2']
theta1 = pesos_aleat(np.shape(theta1)[1]-1,np.shape(theta1)[0])
theta2 = pesos_aleat(np.shape(theta2)[1]-1,np.shape(theta2)[0])


#a1, a2, a3 = propagation(X, theta1, theta2)

theta_vector = np.concatenate((np.ravel(theta1), np.ravel(theta2)))


fmin = sciMin(fun=backdrop, x0=theta_vector,
 args=(np.shape(X)[1], 25, 10, X, y, learning_rate),
 method='TNC', jac=True,
 options={'maxiter': 70})

#backdrop(theta_vector, np.shape(X)[1], 25, 10, X, y, learning_rate)
print(check.checkNNGradients(backdrop, 1e-4))
#print(J(X, transform_y(y, 10), a3, 10, theta1, theta2))

#print(pesos_aleat(5, 6))
Example #8
0
print('Initializing Neural Network Parameters ...\n')
initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size)
initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels)

# Unroll parameters
initial_nn_params = np.hstack(
    (initial_Theta1.T.ravel(), initial_Theta2.T.ravel()))


# =============== Part 7: Implement Backpropagation ===============
# Once your cost matches up with ours, you should proceed to implement the
# backpropagation algorithm for the neural network. You should add to the
# code you've written in nnCostFunction.py to return the partial
# derivatives of the parameters.
print('Checking Backpropagation...')
checkNNGradients()


# =============== Part 8: Implement Regularization ===============
# Once your backpropagation implementation is correct, you should now
# continue to implement the regularization with the cost and gradient.
print('Checking Backpropagation (w/ Regularization) ...')

lamda = 3
checkNNGradients(lamda)
debug_J, _ = nnCostFunction(nn_params, input_layer_size,
                            hidden_layer_size, num_labels, X, y, lamda)

print('Cost at (fixed) debugging parameters (w/ lambda = 10): %f' % debug_J)
print('(this value should be about 0.576051)\n')
Example #9
0
initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels)

# Unroll parameters
initial_nn_params = hstack((initial_Theta1.ravel(order='F'),
                            initial_Theta2.ravel(order='F')))

## =============== Part 7: Implement Backpropagation ===============
#  Once your cost matches up with ours, you should proceed to implement the
#  backpropagation algorithm for the neural network. You should add to the
#  code you've written in nnCostFunction.py to return the partial
#  derivatives of the parameters.
#
print '\nChecking Backpropagation...'

#  Check gradients by running checkNNGradients
checkNNGradients()

print 'Program paused. Press enter to continue.'
raw_input()


## =============== Part 8: Implement Regularization ===============
#  Once your backpropagation implementation is correct, you should now
#  continue to implement the regularization with the cost and gradient.
#

print '\nChecking Backpropagation (w/ Regularization) ...'

#  Check gradients by running checkNNGradients
lambda_ = 3;
checkNNGradients(lambda_)
Example #10
0
    print "Part 6: Initializing Parameters\n"

    init_W1 = utils.randInitializeWeights(conf.INPUT_LAYER_SIZE,
                                          conf.HIDDEN_LAYER_SIZE)
    init_W2 = utils.randInitializeWeights(conf.HIDDEN_LAYER_SIZE,
                                          conf.NUM_LABELS)
    init_W = np.hstack((init_W1.flatten(0), init_W2.flatten(0)))
    init_W = init_W.reshape((len(init_W), 1))

    print "Part 7: Implement Regularization\n"

    print "Checking Backpropagation\n"
    LEARN_RATE = 3
    from checkNNGradients import checkNNGradients
    checkNNGradients(LEARN_RATE)
    J, _ = NN.nnCostFunction(W, conf.INPUT_LAYER_SIZE, conf.HIDDEN_LAYER_SIZE,
                             conf.NUM_LABELS, X, y, LEARN_RATE)
    print ("Cost at parameters (loaded from w1.txt and w2.txt): %f"
           "\n(this value should be about 0.576051)\n") % J

    print "Part 8: Training NN\n"

    def costFunc(p):
        return NN.nnCostFunction(p, conf.INPUT_LAYER_SIZE,
                                 conf.HIDDEN_LAYER_SIZE, conf.NUM_LABELS,
                                 X, y, conf.PART8_LEARN_RATE)

    nn_params = NN.trainNN(costFunc, init_W, conf.MAX_ITER)
    W1 = np.reshape(nn_params[:conf.HIDDEN_LAYER_SIZE *
                              (conf.INPUT_LAYER_SIZE + 1)],
Example #11
0
X = data['X']
Y = data['y']
Y = Y.astype(int)

num_etiquetas = 10
y = codificaY(Y, num_etiquetas)
y = y.astype(int)

num_entradas = 400
num_ocultas = 25
params_1 = pesosAleatorios(num_entradas, num_ocultas)
params_2 = pesosAleatorios(num_ocultas, num_etiquetas)
params_rn = np.concatenate((np.ravel(params_1), np.ravel(params_2)))
reg = 1

print(chk.checkNNGradients(backprop, 1))
#print(backprop(params_rn, num_entradas, num_ocultas, num_etiquetas,X, Y, reg))

res = opt.minimize(fun=backprop_rec, x0=params_rn, args=(num_entradas, num_ocultas, num_etiquetas, X, y, reg),
                    method="TNC", jac = True, options={"maxiter":70})

thetas = res.x
theta1 = np.reshape(thetas[:(num_ocultas * (num_entradas + 1))], (num_ocultas, (num_entradas+1)))
theta2 = np.reshape(thetas[num_ocultas * (num_entradas + 1):], (num_etiquetas, (num_ocultas + 1)))

#weights = loadmat("ex4weights.mat")
#theta1, theta2 = weights["Theta1"], weights["Theta2"]


print(calculate_precision(theta1, theta2, X, Y))
Example #12
0
initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels)

# Unroll parameters
initial_nn_params = []
initial_nn_params.extend(
    (list(initial_Theta1.flatten()) + list(initial_Theta2.flatten())))

initial_nn_params = np.array(initial_nn_params)

# =============== Part 7: Implement Backpropagation ===============
# add in nnCostFunction.py to return the partial derivatives of the parameters.

print('\nChecking Backpropagation... \n')

# Check gradients by running checkNNGradients
checkNNGradients()

input('\nProgram paused. Press enter to continue.\n')

# =============== Part 8: Implement Regularization ===============

print('\nChecking Backpropagation (w/ Regularization) ... \n')

#  Check gradients by running checkNNGradients
lambda_param = 3
checkNNGradients(lambda_param)

# Also output the costFunction debugging values

debug_J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
                            num_labels, X, y, lambda_param)
Example #13
0
def ex4():
    ## Machine Learning Online Class - Exercise 4 Neural Network Learning

    #  Instructions
    #  ------------
    #
    #  This file contains code that helps you get started on the
    #  linear exercise. You will need to complete the following functions
    #  in this exericse:
    #
    #     sigmoidGradient.m
    #     randInitializeWeights.m
    #     nnCostFunction.m
    #
    #  For this exercise, you will not need to change any code in this file,
    #  or any other files other than those mentioned above.
    #

    ## Initialization
    #clear ; close all; clc

    ## Setup the parameters you will use for this exercise
    input_layer_size = 400  # 20x20 Input Images of Digits
    hidden_layer_size = 25  # 25 hidden units
    num_labels = 10  # 10 labels, from 1 to 10
    # (note that we have mapped "0" to label 10)

    ## =========== Part 1: Loading and Visualizing Data =============
    #  We start the exercise by first loading and visualizing the dataset.
    #  You will be working with a dataset that contains handwritten digits.
    #

    # Load Training Data
    print('Loading and Visualizing Data ...')

    mat = scipy.io.loadmat('ex4data1.mat')
    X = mat['X']
    y = mat['y'].ravel()
    m = X.shape[0]

    # Randomly select 100 data points to display
    sel = np.random.choice(m, 100, replace=False)

    displayData(X[sel, :])
    plt.savefig('figure1.png')

    print('Program paused. Press enter to continue.')
    #pause;

    ## ================ Part 2: Loading Parameters ================
    # In this part of the exercise, we load some pre-initialized
    # neural network parameters.

    print('\nLoading Saved Neural Network Parameters ...')

    # Load the weights into variables Theta1 and Theta2
    mat = scipy.io.loadmat('ex4weights.mat')
    Theta1 = mat['Theta1']
    Theta2 = mat['Theta2']

    # Unroll parameters
    nn_params = np.concatenate([Theta1.ravel(), Theta2.ravel()])

    ## ================ Part 3: Compute Cost (Feedforward) ================
    #  To the neural network, you should first start by implementing the
    #  feedforward part of the neural network that returns the cost only. You
    #  should complete the code in nnCostFunction.m to return cost. After
    #  implementing the feedforward to compute the cost, you can verify that
    #  your implementation is correct by verifying that you get the same cost
    #  as us for the fixed debugging parameters.
    #
    #  We suggest implementing the feedforward cost *without* regularization
    #  first so that it will be easier for you to debug. Later, in part 4, you
    #  will get to implement the regularized cost.
    #
    print('\nFeedforward Using Neural Network ...')

    # Weight regularization parameter (we set this to 0 here).
    lambda_value = 0

    J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
                       num_labels, X, y, lambda_value)[0]

    print(
        'Cost at parameters (loaded from ex4weights): %f \n(this value should be about 0.287629)'
        % J)

    print('\nProgram paused. Press enter to continue.')
    #pause;

    ## =============== Part 4: Implement Regularization ===============
    #  Once your cost function implementation is correct, you should now
    #  continue to implement the regularization with the cost.
    #

    print('\nChecking Cost Function (w/ Regularization) ... ')

    # Weight regularization parameter (we set this to 1 here).
    lambda_value = 1

    J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
                       num_labels, X, y, lambda_value)[0]

    print(
        'Cost at parameters (loaded from ex4weights): %f \n(this value should be about 0.383770)'
        % J)

    print('Program paused. Press enter to continue.')
    #pause;

    ## ================ Part 5: Sigmoid Gradient  ================
    #  Before you start implementing the neural network, you will first
    #  implement the gradient for the sigmoid function. You should complete the
    #  code in the sigmoidGradient.m file.
    #

    print('\nEvaluating sigmoid gradient...')

    g = sigmoidGradient(np.array([1, -0.5, 0, 0.5, 1]))
    print('Sigmoid gradient evaluated at [1 -0.5 0 0.5 1]:')
    print(formatter('%f ', g))
    print('\n')

    print('Program paused. Press enter to continue.')
    #pause;

    ## ================ Part 6: Initializing Pameters ================
    #  In this part of the exercise, you will be starting to implment a two
    #  layer neural network that classifies digits. You will start by
    #  implementing a function to initialize the weights of the neural network
    #  (randInitializeWeights.m)

    print('\nInitializing Neural Network Parameters ...')

    initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size)
    initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels)

    # Unroll parameters
    initial_nn_params = np.concatenate(
        [initial_Theta1.ravel(),
         initial_Theta2.ravel()])

    ## =============== Part 7: Implement Backpropagation ===============
    #  Once your cost matches up with ours, you should proceed to implement the
    #  backpropagation algorithm for the neural network. You should add to the
    #  code you've written in nnCostFunction.m to return the partial
    #  derivatives of the parameters.
    #
    print('\nChecking Backpropagation... ')

    #  Check gradients by running checkNNGradients
    checkNNGradients()

    print('\nProgram paused. Press enter to continue.')
    #pause;

    ## =============== Part 8: Implement Regularization ===============
    #  Once your backpropagation implementation is correct, you should now
    #  continue to implement the regularization with the cost and gradient.
    #

    print('\nChecking Backpropagation (w/ Regularization) ... ')

    #  Check gradients by running checkNNGradients
    lambda_value = 3
    checkNNGradients(lambda_value)

    # Also output the costFunction debugging values
    debug_J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
                             num_labels, X, y, lambda_value)[0]

    print(
        '\n\nCost at (fixed) debugging parameters (w/ lambda = 10): %f \n(this value should be about 0.576051)\n\n'
        % debug_J)

    print('Program paused. Press enter to continue.')
    #pause;

    ## =================== Part 8: Training NN ===================
    #  You have now implemented all the code necessary to train a neural
    #  network. To train your neural network, we will now use "fmincg", which
    #  is a function which works similarly to "fminunc". Recall that these
    #  advanced optimizers are able to train our cost functions efficiently as
    #  long as we provide them with the gradient computations.
    #
    print('\nTraining Neural Network... ')

    #  After you have completed the assignment, change the MaxIter to a larger
    #  value to see how more training helps.
    options = {'maxiter': 50}

    #  You should also try different values of lambda
    lambda_value = 1

    # Create "short hand" for the cost function to be minimized
    costFunction = lambda p: nnCostFunction(
        p, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_value)

    # Now, costFunction is a function that takes in only one argument (the
    # neural network parameters)
    res = optimize.minimize(costFunction,
                            initial_nn_params,
                            jac=True,
                            method='TNC',
                            options=options)
    nn_params = res.x

    # Obtain Theta1 and Theta2 back from nn_params
    Theta1 = nn_params[0:hidden_layer_size * (input_layer_size + 1)].reshape(
        hidden_layer_size, input_layer_size + 1)

    Theta2 = nn_params[hidden_layer_size * (input_layer_size + 1):].reshape(
        num_labels, hidden_layer_size + 1)

    print('Program paused. Press enter to continue.')
    #pause;

    ## ================= Part 9: Visualize Weights =================
    #  You can now "visualize" what the neural network is learning by
    #  displaying the hidden units to see what features they are capturing in
    #  the data.

    print('\nVisualizing Neural Network... ')

    displayData(Theta1[:, 1:])
    plt.savefig('figure2.png')

    print('\nProgram paused. Press enter to continue.')
    #pause;

    ## ================= Part 10: Implement Predict =================
    #  After training the neural network, we would like to use it to predict
    #  the labels. You will now implement the "predict" function to use the
    #  neural network to predict the labels of the training set. This lets
    #  you compute the training set accuracy.

    pred = predict(Theta1, Theta2, X)

    print('\nTraining Set Accuracy: %f' % (np.mean(
        (pred == y).astype(int)) * 100))
def main():
    ''' Main function  '''

    ## %% =========== Part 1: Loading and Visualizing Data =============
    #%  We start the exercise by first loading and visualizing the dataset. 
    #%  You will be working with a dataset that contains handwritten digits.
    #%


    # Read the Matlab data
    m, n, X, y = getMatlabTrainingData()

    # number of features
    input_layer_size = n    
 

    # Select some random images from X
    print('Selecting random examples of the data to display.\n')
    sel = np.random.permutation(m)
    sel = sel[0:100]
    
    #  Re-work the data orientation of each training example
    image_size = 20
    XMatlab = np.copy(X) # Need a deep copy, not just the reference
    for i in range(m): 
        XMatlab[i, :] = XMatlab[i, :].reshape(image_size, image_size).transpose().reshape(1, image_size*image_size)

    # display the sample images
    displayData(XMatlab[sel, :])

    # Print Out the labels for what is being seen. 
    print('These are the labels for the data ...\n')
    print(y[sel, :].reshape(10, 10))

    # Pause program
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")  


#%% ================ Part 2: Loading Parameters ================
#% In this part of the exercise, we load some pre-initialized 
# % neural network parameters.

    print('\nLoading Saved Neural Network Parameters ...\n')

    # Load the weights into variables Theta1 and Theta2
    import scipy .io as sio
    fnWeights = '/home/jennym/Kaggle/DigitRecognizer/ex4/ex4weights.mat'
    weights = sio.loadmat(fnWeights)
    Theta1 = weights['Theta1']
    Theta2 = weights['Theta2']

    #% Unroll parameters 
    nn_params = np.hstack((Theta1.ravel(order='F'), Theta2.ravel(order='F')))

#%% ================ Part 3: Compute Cost (Feedforward) ================
#%  To the neural network, you should first start by implementing the
#%  feedforward part of the neural network that returns the cost only. You
#%  should complete the code in nnCostFunction.m to return cost. After
#%  implementing the feedforward to compute the cost, you can verify that
#%  your implementation is correct by verifying that you get the same cost
#%  as us for the fixed debugging parameters.
#%
#%  We suggest implementing the feedforward cost *without* regularization
#%  first so that it will be easier for you to debug. Later, in part 4, you
#%  will get to implement the regularized cost.
#%
    print('\nFeedforward Using Neural Network ...\n')

    #% Weight regularization parameter (we set this to 0 here).
    MLlambda = 0.0

    # Cluge, put y back to matlab version, then adjust to use python
    #  indexing later into y_matrix
    y[(y == 0)] = 10
    y = y - 1
    J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
                   num_labels, X, y, MLlambda)

    print('Cost at parameters (loaded from ex4weights): ' + str(J) + 
          '\n (this value should be about 0.287629)\n')

    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")  

#%% =============== Part 4: Implement Regularization ===============
#%  Once your cost function implementation is correct, you should now
#%  continue to implement the regularization with the cost.
#%

    print('\nChecking Cost Function (with Regularization) ... \n')

    # % Weight regularization parameter (we set this to 1 here).
    MLlambda = 1.0

    J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
                   num_labels, X, y, MLlambda)

    print('Cost at parameters (loaded from ex4weights): ' + str(J) +
         '\n(this value should be about 0.383770)\n');

    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")  


#%% ================ Part 5: Sigmoid Gradient  ================
#%  Before you start implementing the neural network, you will first
#%  implement the gradient for the sigmoid function. You should complete the
#%  code in the sigmoidGradient.m file.
#%

    print('\nEvaluating sigmoid gradient...\n')
    g = sigmoidGradient(np.array([1, -0.5,  0,  0.5, 1]))
    print('Sigmoid gradient evaluated at [1 -0.5 0 0.5 1]:\n  ')
    print(g)
    print('\n\n')

    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")  

 
#%% ================ Part 6: Initializing Parameters ================
#%  In this part of the exercise, you will be starting to implement a two
#%  layer neural network that classifies digits. You will start by
#%  implementing a function to initialize the weights of the neural network
#%  (randInitializeWeights.m)

    print('\nInitializing Neural Network Parameters ...\n')

    initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size)
    initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels)

    #% Unroll parameters
    initial_nn_params = np.hstack(( initial_Theta1.ravel(order = 'F'),
                                   initial_Theta2.ravel(order = 'F')))
    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")  


#%% =============== Part 7: Implement Backpropagation ===============
#%  Once your cost matches up with ours, you should proceed to implement the
#%  backpropagation algorithm for the neural network. You should add to the
#%  code you've written in nnCostFunction.m to return the partial
#%  derivatives of the parameters.
#%
    print('\nChecking Backpropagation... \n')

    #%  Check gradients by running checkNNGradients
    checkNNGradients()

    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")  

#%% =============== Part 8: Implement Regularization ===============
#%  Once your backpropagation implementation is correct, you should now
#%  continue to implement the regularization with the cost and gradient.
#%

    print('\nChecking Backpropagation (w/ Regularization) ... \n')

    #%  Check gradients by running checkNNGradients
    MLlambda = 3
    checkNNGradients(MLlambda)

    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")  

    #% Also output the costFunction debugging values
    debug_J, _  = nnCostFunction(nn_params, input_layer_size,
                          hidden_layer_size, num_labels, X, y, MLlambda)

    print('\n\n Cost at (fixed) debugging parameters (w/ lambda = ' + 
          '{0}): {1}'.format(MLlambda, debug_J))
    print('\n  (this value should be about 0.576051)\n\n')

    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")

#%% =================== Part 8b: Training NN ===================
#%  You have now implemented all the code necessary to train a neural 
#%  network. To train your neural network, we will now use "fmincg", which
#%  is a function which works similarly to "fminunc". Recall that these
#%  advanced optimizers are able to train our cost functions efficiently as
#%  long as we provide them with the gradient computations.
#%
    print ('\nTraining Neural Network... \n')

    #%  After you have completed the assignment, change the MaxIter to a larger
    #%  value to see how more training helps.
    #% jkm change maxIter from 50-> 400
    options = {'maxiter': MAXITER}

    #%  You should also try different values of lambda
    MLlambda = 1

    #% Create "short hand" for the cost function to be minimized
    costFunc = lambda p: nnCostFunction(p, input_layer_size, hidden_layer_size,
                               num_labels, X, y, MLlambda)

    #% Now, costFunction is a function that takes in only one argument (the
    #% neural network parameters)

    '''
    NOTES: Call scipy optimize minimize function
        method : str or callable, optional Type of solver. 
           CG -> Minimization of scalar function of one or more variables 
                 using the conjugate gradient algorithm.

        jac : bool or callable, optional Jacobian (gradient) of objective function. 
              Only for CG, BFGS, Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg. 
              If jac is a Boolean and is True, fun is assumed to return the gradient 
              along with the objective function. If False, the gradient will be 
              estimated numerically. jac can also be a callable returning the 
              gradient of the objective. In this case, it must accept the same 
              arguments as fun.
        callback : callable, optional. Called after each iteration, as callback(xk), 
              where xk is the current parameter vector.
'''
    # Setup a callback for displaying the cost at the end of each iteration 
    class Callback(object): 
        def __init__(self): 
            self.it = 0 
        def __call__(self, p): 
            self.it += 1 
            print "Iteration %5d | Cost: %e" % (self.it, costFunc(p)[0]) 
 
   
    result = sci.minimize(costFunc, initial_nn_params, method='CG', 
                   jac=True, options=options, callback=Callback()) 
    nn_params = result.x 
    cost = result.fun 
 
    # matlab: [nn_params, cost] = fmincg(costFunction, initial_nn_params, options);

    #% Obtain Theta1 and Theta2 back from nn_params
    Theta1 = np.reshape(nn_params[:hidden_layer_size * (input_layer_size + 1)],
               (hidden_layer_size, (input_layer_size + 1)), 
                order = 'F')

    Theta2 = np.reshape(nn_params[hidden_layer_size * (input_layer_size + 1):], 
               (num_labels, (hidden_layer_size + 1)), 
               order = 'F')  


    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")


#%% ================= Part 9: Visualize Weights =================
#%  You can now "visualize" what the neural network is learning by 
#%  displaying the hidden units to see what features they are capturing in 
#%  the data.#

    print('\nVisualizing Neural Network... \n')

    displayData(Theta1[:, 1:])

    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")


#%% ================= Part 10: Implement Predict =================
#%  After training the neural network, we would like to use it to predict
#%  the labels. You will now implement the "predict" function to use the
#%  neural network to predict the labels of the training set. This lets
#%  you compute the training set accuracy.

    pred = predict(Theta1, Theta2, X)

    # JKM - my array was column stacked - don't understand why this works
    pp = np.row_stack(pred)
    accuracy = np.mean(np.double(pp == y)) * 100

    print('\nTraining Set Accuracy: {0} \n'.format(accuracy))

    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")

  
# ========================================

    # All Done!
    return
Example #15
0
# Unroll parameters
initial_Theta1 = np.reshape(initial_Theta1, initial_Theta1.size, order='F')
initial_Theta2 = np.reshape(initial_Theta2, initial_Theta2.size, order='F')
initial_nn_params = np.hstack((initial_Theta1, initial_Theta2))


#%% =============== Part 6: Implement Backpropagation ===============
#  Now you will implement the backpropagation algorithm for the neural 
#  network. You should add code to nnCostFunction.m to return the partial
#  derivatives of the parameters.
#
if debugMode == True:
    print('Checking Backpropagation...')
    
    #  Check gradients by running checkNNGradients
    checkNNGradients()
    
    input('Program paused. Press enter to continue')


#%% =============== Part 7: Implement Regularization ===============
#  Once your backpropagation implementation is correct, you should now
#  continue to implement the regularization gradient.
#

    print('Checking Backpropagation (w/ Regularization) ... ')
    #
    ##  Check gradients by running checkNNGradients
    lambda_value = 3
    checkNNGradients(lambda_value)
    
Example #16
0
                                  hidden_layer_size, num_labels, X, y, Lambda)
D1, D2 = reshapeParams(flattenedD1D2)

checkGradient(myThetas, [D1, D2], X, y)

raw_input("Program paused. Press Enter to continue...")

## =============== Part 8: Implement Regularization ===============
#  Once your backpropagation implementation is correct, you should now
#  continue to implement the regularization with the cost and gradient.

print 'Checking Backpropagation (w/ Regularization) ... '

#  Check gradients by running checkNNGradients
Lambda = 3.0
checkNNGradients(Lambda)

# Also output the costFunction debugging values
debug_J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
                            num_labels, X, y, Lambda)

print 'Cost at (fixed) debugging parameters (w/ lambda = 10): %f (this value should be about 0.576051)\n\n' % debug_J

raw_input("Program paused. Press Enter to continue...")

## =================== Part 8: Training NN ===================
#  You have now implemented all the code necessary to train a neural
#  network. To train your neural network, we will now use "fmincg", which
#  is a function which works similarly to "fminunc". Recall that these
#  advanced optimizers are able to train our cost functions efficiently as
#  long as we provide them with the gradient computations.
Example #17
0
# Unroll parameters
initial_nn_params = np.concatenate((initial_Theta1.reshape(initial_Theta1.size,
                                                           order='F'),
                                    initial_Theta2.reshape(initial_Theta2.size,
                                                           order='F')))

## =============== Part 7: Implement Backpropagation ===============
#  Once your cost matches up with ours, you should proceed to implement the
#  backpropagation algorithm for the neural network. You should add to the
#  code you've written in nnCostFunction.m to return the partial
#  derivatives of the parameters.
#
print('Checking Backpropagation... ')

#  Check gradients by running checkNNGradients
cnng.checkNNGradients()

input('Program paused. Press enter to continue.\n')

## =============== Part 8: Implement Regularization ===============
#  Once your backpropagation implementation is correct, you should now
#  continue to implement the regularization with the cost and gradient.
#

print('\nChecking Backpropagation (w/ Regularization) ... \n')

#  Check gradients by running checkNNGradients
lambda_reg = 3
cnng.checkNNGradients(lambda_reg)

# Also output the costFunction debugging values
Example #18
0
initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels)

# Unroll parameters
initial_nn_params = hstack((initial_Theta1.ravel(order='F'),
                            initial_Theta2.ravel(order='F')))

## =============== Part 7: Implement Backpropagation ===============
#  Once your cost matches up with ours, you should proceed to implement the
#  backpropagation algorithm for the neural network. You should add to the
#  code you've written in nnCostFunction.py to return the partial
#  derivatives of the parameters.
#
print('\nChecking Backpropagation...')

#  Check gradients by running checkNNGradients
checkNNGradients()

print('\nProgram paused. Press enter to continue.')
input()


## =============== Part 8: Implement Regularization ===============
#  Once your backpropagation implementation is correct, you should now
#  continue to implement the regularization with the cost and gradient.
#

print('Checking Backpropagation (w/ Regularization) ...')

#  Check gradients by running checkNNGradients
lambda_ = 3;
checkNNGradients(lambda_)
Example #19
0
Theta2 = np.array(ex4Thetas['Theta2'])

nn_params = np.append(Theta1.flatten('F'), Theta2.flatten('F'), axis=0)

lam = 1

[J, grad] = nnCostFunctionVec(nn_params, input_layer_size, hidden_layer_size,
                              num_labels, X, y, lam)

print('Cost at parameters loaded from ex4Weights: ', J,
      ' \n(this value should be about 0.383770)\n ')

g = sigmoidGradient(np.array([-1, -0.5, 0, 0.5, 1]))
print("Sigmoid gradient evaluated at [-1, -0.5, 0, 0.5, 1]: ", g)

checkNNGradients(lam)

initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size)
initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels)

nn_params = np.append(initial_Theta1.flatten('F'), initial_Theta2.flatten('F'))

costFunc = lambda p: nnCostFunctionVec(p,
                                       input_layer_size,
                                       hidden_layer_size,
                                       num_labels,
                                       Xtraining,
                                       ytraining,
                                       lam,
                                       returnType='J')
gradFunc = lambda p: nnCostFunctionVec(p,
initial_Theta2 = randInitializeWeights(hidden_layer_size+1, num_labels)

# Unroll parameters
initial_nn_params = np.hstack((initial_Theta1.T.ravel(), initial_Theta2.T.ravel()))


## =============== Part 7: Implement Backpropagation ===============
#  Once your cost matches up with ours, you should proceed to implement the
#  backpropagation algorithm for the neural network. You should add to the
#  code you've written in nnCostFunction.m to return the partial
#  derivatives of the parameters.
#
print 'Checking Backpropagation... '

#  Check gradients by running checkNNGradients
checkNNGradients()

raw_input("Program paused. Press Enter to continue...")


## =============== Part 8: Implement Regularization ===============
#  Once your backpropagation implementation is correct, you should now
#  continue to implement the regularization with the cost and gradient.
#

print 'Checking Backpropagation (w/ Regularization) ... '

#  Check gradients by running checkNNGradients
Lambda = 3.0
checkNNGradients(Lambda)
Example #21
0
    # Unroll parameters
    initial_nn_params = np.hstack(
        (initial_Theta1.flatten(), initial_Theta2.flatten()))
    print(initial_nn_params.shape)

    # =============== Part 7: Implement Backpropagation ===============
    # Once your cost matches up with ours, you should proceed to implement the
    # backpropagation algorithm for the neural network. You should add to the
    # code you've written in nnCostFunction.m to return the partial
    # derivatives of the parameters.
    #

    print('Checking Backpropagation...')

    #  Check gradients by running checkNNGradients
    checkNNGradients()

    input('Program paused. Press enter to continue.')

    # =============== Part 8: Implement Regularization ===============
    # Once your backpropagation implementation is correct, you should now
    # continue to implement the regularization with the cost and gradient.
    #

    print('Checking Backpropagation (w/ Regularization) ...')

    #  Check gradients by running checkNNGradients
    lmbda = 3
    checkNNGradients(lmbda)

    # Also output the costFunction debugging values
Example #22
0
print "\nChecking Cost Function with Reguralization ... \n"

lambd = 3.0
checkNNCost(lambd)

print 'This value should be about 2.1433733821'


raw_input('\nProgram paused. Press enter to continue!!!')

# ================================ Step 7: Implement Backpropagation  ================================

print "\nChecking Backpropagation without Regularization ...\n"

lambd = 0.0
checkNNGradients(lambd)
raw_input('\nProgram paused. Press enter to continue!!!')


# ================================ Step 8: Implement Backpropagation with Regularization ================================

print "\nChecking Backpropagation with Regularization ...\n"

lambd = 3.0
checkNNGradients(lambd)

raw_input('\nProgram paused. Press enter to continue!!!')


# ================================ Step 9: Training Neural Networks & Prediction ================================
print "\nTraining Neural Network... \n"
initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size)
initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels)

# Unroll parameters
initial_nn_params = np.hstack((initial_Theta1.ravel(), initial_Theta2.ravel()))

## =============== Part 7: Implement Backpropagation ===============
#  Once your cost matches up with ours, you should proceed to implement the
#  backpropagation algorithm for the neural network. You should add to the
#  code you've written in nnCostFunction.m to return the partial
#  derivatives of the parameters.
#
print('\nChecking Backpropagation... \n')

#  Check gradients by running checkNNGradients
checkNNGradients()

### =============== Part 8: Implement Regularization ===============
##  Once your backpropagation implementation is correct, you should now
##  continue to implement the regularization with the cost and gradient.
##

print('\nChecking Backpropagation (w/ Regularization) ... \n')

#  Check gradients by running checkNNGradients
lmbda = 3
checkNNGradients(lmbda)

# Also output the costFunction debugging values
debug_J  = nnCostFunction(nn_params, input_layer_size, \
                          hidden_layer_size, num_labels, X, y, lmbda)
Example #24
0
# Unroll parameters
initial_nn_params = np.hstack((initial_Theta1.flatten(),initial_Theta2.flatten()))


# =============== Part 7: Implement Backpropagation ===============
#  Once your cost matches up with ours, you should proceed to implement the
#  backpropagation algorithm for the neural network. You should add to the
#  code you've written in nnCostFunction.m to return the partial
#  derivatives of the parameters.
#

#fprintf('\nChecking Backpropagation... \n');

#  Check gradients by running checkNNGradients
checkNNGradients(0)


'''
%% =============== Part 8: Implement Regularization ===============
%  Once your backpropagation implementation is correct, you should now
%  continue to implement the regularization with the cost and gradient.
%
'''

print('\nChecking Backpropagation (w/ Regularization) ... \n')

#  Check gradients by running checkNNGradients
_lambda = 3
checkNNGradients(_lambda)
# Unroll parameters
initial_nn_params = np.r_[initial_Theta1.ravel(), initial_Theta2.ravel()]

print("init nn params shape", initial_nn_params.shape)
pause()

"""## Part 7: Implement Backpropagation ===============
  Once your cost matches up with ours, you should proceed to implement the
  backpropagation algorithm for the neural network. You should add to the
  code you've written in nnCostFunction.m to return the partial
  derivatives of the parameters."""

print('\nChecking Backpropagation... \n')

#  Check gradients by running checkNNGradients
checkNNGradients()

print('\nProgram paused. Press enter to continue.\n')
pause()


"""## Part 8: Implement Regularization ===============
  Once your backpropagation implementation is correct, you should now
  continue to implement the regularization with the cost and gradient.
"""

print('\nChecking Backpropagation (w/ Regularization) ... \n')

#  Check gradients by running checkNNGradients
reg_lambda = 3;
checkNNGradients(reg_lambda)