Esempio n. 1
0
def NeuralNet(_data):
    _X = XArr.copy()
    _y = YArr.copy()

    _y = np.reshape(_y, (np.shape(_y)[0], 1))

    num_entradas = np.shape(_X)[1]
    num_ocultas = 25
    numTags = 1

    the1 = random_weights(num_entradas, num_ocultas)
    the2 = random_weights(num_ocultas, numTags)

    theta_vector = np.concatenate((np.ravel(the1), np.ravel(the2)))
    start = time.time()
    thetas = sciMin(fun = backdrop, x0 =theta_vector, args = (num_entradas, num_ocultas, numTags, _X, _y, lambda_),
    method = 'TNC', jac = True, options = {'maxiter': 70}).x
    end = time.time()

    the1 = np.reshape(thetas[:num_ocultas*(num_entradas+1)], (num_ocultas, (num_entradas+1)))
    the2 = np.reshape(thetas[num_ocultas* (num_entradas+1):], (numTags, (num_ocultas+1)))

    a,c = evaluateLearning(_y, forward_prop(_X, the1, the2)[0])

    print("----------------------------------------------\n")
    print("\n TRAINING EXECUTION TIME:", end - start, "seconds")
    
    print("PREDICTION NEURAL_NETWORK: " + str(a) + " %")
    models_times[1] = a
    print("----------------------------------------------\n")
Esempio n. 2
0
    def doIt(data):
        X, y = load_csv(data)
        X = np.array(X)
        y = np.array(y)
        y = np.reshape(y, (np.shape(y)[0], 1))

        # ------------------------------
        legendPos = np.where(y == 1)
        legendX = X[legendPos[0]]

        normiePos = np.where(y == 0)
        normieX = X[normiePos[0]]
        # ------------------------------

        num_entradas = np.shape(X)[1]
        num_ocultas = 25
        num_etiquetas = 1

        theta1 = pesos_aleat(num_entradas, num_ocultas)
        theta2 = pesos_aleat(num_ocultas, num_etiquetas)

        theta_vector = np.concatenate((np.ravel(theta1), np.ravel(theta2)))

        thetas = sciMin(fun=backdrop,
                        x0=theta_vector,
                        args=(num_entradas, num_ocultas, num_etiquetas, X, y,
                              lambda_),
                        method='TNC',
                        jac=True,
                        options={
                            'maxiter': 70
                        }).x

        theta1 = np.reshape(thetas[:num_ocultas * (num_entradas + 1)],
                            (num_ocultas, (num_entradas + 1)))
        theta2 = np.reshape(thetas[num_ocultas * (num_entradas + 1):],
                            (num_etiquetas, (num_ocultas + 1)))
        a, c = checkLearned(y, forward_propagate(X, theta1, theta2)[4])

        #b = c
        print("Precision de la red neuronal: " + str(a) + " %")
Esempio n. 3
0
    return cost, gradient


X, y = Data_Management.load_mat("ex4data1.mat")

#indexRand = np.random.randint(0, 5001, 100)
#displayData.displayData(X[indexRand[:]])
#plt.show()

weights = loadmat('ex4weights.mat')
theta1, theta2 = weights['Theta1'], weights['Theta2']
theta1 = pesos_aleat(np.shape(theta1)[1]-1,np.shape(theta1)[0])
theta2 = pesos_aleat(np.shape(theta2)[1]-1,np.shape(theta2)[0])


#a1, a2, a3 = propagation(X, theta1, theta2)

theta_vector = np.concatenate((np.ravel(theta1), np.ravel(theta2)))


fmin = sciMin(fun=backdrop, x0=theta_vector,
 args=(np.shape(X)[1], 25, 10, X, y, learning_rate),
 method='TNC', jac=True,
 options={'maxiter': 70})

#backdrop(theta_vector, np.shape(X)[1], 25, 10, X, y, learning_rate)
print(check.checkNNGradients(backdrop, 1e-4))
#print(J(X, transform_y(y, 10), a3, 10, theta1, theta2))

#print(pesos_aleat(5, 6))
Esempio n. 4
0
weights = loadmat('ex4weights.mat')
theta1, theta2 = weights['Theta1'], weights['Theta2']
theta1 = pesos_aleat(np.shape(theta1)[1] - 1, np.shape(theta1)[0])
theta2 = pesos_aleat(np.shape(theta2)[1] - 1, np.shape(theta2)[0])
num_entradas = np.shape(X)[1]
num_ocultas = 25
num_etiquetas = 10

theta_vector = np.concatenate((np.ravel(theta1), np.ravel(theta2)))

thetas = sciMin(fun=backdrop,
                x0=theta_vector,
                args=(num_entradas, num_ocultas, num_etiquetas, X,
                      transform_y(y, num_etiquetas), lambda_),
                method='TNC',
                jac=True,
                options={
                    'maxiter': 70
                }).x

theta1 = np.reshape(thetas[:num_ocultas * (num_entradas + 1)],
                    (num_ocultas, (num_entradas + 1)))
theta2 = np.reshape(thetas[num_ocultas * (num_entradas + 1):],
                    (num_etiquetas, (num_ocultas + 1)))

print("Precision de la red neuronal: " +
      str(checkLearned(y,
                       forward_propagate(X, theta1, theta2)[4])) + " %")

#print(check.checkNNGradients(backdrop, 0))
    theta2 = pesos_aleat(num_ocultas, num_etiquetas)

    theta_vector = np.concatenate((np.ravel(theta1), np.ravel(theta2)))

    auxErr = []
    auxErrTr = []
    thetaMin1 = None
    thetaMin2 = None
    errorMin = float("inf")

    for i in range(1, np.shape(trainX)[0]):
        thetas = sciMin(fun=backdrop,
                        x0=theta_vector,
                        args=(num_entradas, num_ocultas, num_etiquetas,
                              trainX[:i], trainY[:i], lambda_),
                        method='TNC',
                        jac=True,
                        options={
                            'maxiter': 70
                        }).x

        theta1 = np.reshape(thetas[:num_ocultas * (num_entradas + 1)],
                            (num_ocultas, (num_entradas + 1)))
        theta2 = np.reshape(thetas[num_ocultas * (num_entradas + 1):],
                            (num_etiquetas, (num_ocultas + 1)))

        auxErr.append(
            J(validationX, validationY,
              forward_propagate(validationX, theta1, theta2)[4], num_etiquetas,
              theta1, theta2))
        auxErrTr.append(
Esempio n. 6
0
XPoly = Data_Management.add_column_left_of_matrix(XPoly)
XPolyVal = Normalization.normalize2(generate_polynom_data(Xval, 8), mu, sigma)
XPolyVal = Data_Management.add_column_left_of_matrix(XPolyVal)
XPolyTest = Normalization.normalize2(generate_polynom_data(Xtest, 8), mu, sigma)
XPolyTest = Data_Management.add_column_left_of_matrix(XPolyTest)

lambdaAux = [ 0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10  ]

error_array = np.array([], dtype=float)
error_array_val = np.array([], dtype=float)
thetas = np.array([], dtype=float)
for l in range(len(lambdaAux)):
    theta = np.ones(XPoly.shape[1], dtype=float)
    
    theta_min = sciMin(fun=minimizar, x0=theta,
    args=(XPoly, y, lambdaAux[l]),
    method='TNC', jac=True,
    options={'maxiter': 70}).x
    
    error_array = np.append(error_array, J(theta_min, XPoly, y, lambdaAux[l]))
    error_array_val = np.append(error_array_val, J(theta_min, XPolyVal, yval, lambdaAux[l]))
    thetas = np.append(thetas, theta_min)

lambdaIndex = np.argmin(error_array_val)
plt.figure()
draw_plot(lambdaAux, error_array)
draw_plot(lambdaAux, error_array_val)
plt.show()

theta = np.ones(XPoly.shape[1], dtype=float)
theta_min = sciMin(fun=minimizar, x0=theta,
    args=(XPoly, y, lambdaAux[lambdaIndex]),