Beispiel #1
0
def funcaoCustoRegressaoLogistica(theta, X, y):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    grad0 = np.multiply(-y, np.log(sigmoide(X * theta.T)))
    grad1 = np.multiply((1 - y), np.log(1 - sigmoide(X * theta.T)))
    return np.sum(grad0 - grad1) / (len(X))
Beispiel #2
0
def custo_reglog(theta, X, y):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    grad0 = np.multiply(-y, np.log(sigmoide(X * theta.T)))
    grad1 = np.multiply((1 - y), np.log(1 - sigmoide(X * theta.T)))
    return np.sum(grad0 - grad1) / (len(X))
Beispiel #3
0
def custo_reglog(theta, X, y):
    #transforma os valores de theta, X e y em matrix
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    #primeira parcela da função de custo para Reg. Logística, caso y=0, então grad0 = 0
    grad0 = np.multiply(-y, np.log(sigmoide(X * theta.T)))
    #segunda parcela da função de custo, caso y=1, então grad1 = 0
    grad1 = np.multiply((1 - y), np.log(1 - sigmoide(X * theta.T)))
    #calcula o valor do custo J, de acordo com as parcelas grad0 e grad1 e o tamanho do conjunto de dados
    return np.sum(grad0 - grad1) / (len(X))
Beispiel #4
0
def costFunctionReg(theta, X, y, alpha):

    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)

    grad0 = np.multiply(-y, np.log(sigmoide(X * theta.T)))
    grad1 = np.multiply((1 - y), np.log(1 - sigmoide(X * theta.T)))
    reg = (alpha / 2 * len(X)) * np.sum(
        np.asarray(theta[:, 1:theta.shape[1]])**2)

    return np.sum(grad0 - grad1) / (len(X)) + reg
Beispiel #5
0
def custo_reglog_reg(theta, X, y, _lambda):
    m = len(X)
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    
    grad0 = np.multiply(-y, np.log(sigmoide(X * theta.T)))
    grad1 = np.multiply((1 - y), np.log(1 - sigmoide(X * theta.T)))
    
    # não considera theta0 para o cálculo
    theta_j = theta[:,1:]
    regularizacao = (_lambda / (2 * m)) * np.sum(np.dot(theta_j.T,theta_j))     
    return np.sum((grad0 - grad1) / m) + regularizacao
Beispiel #6
0
def costFunctionReg(theta, X, y, lamb):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)

    m = len(X)
    h = sigmoide(X * theta.T)

    grad0 = np.multiply(-y, np.log(h))
    grad1 = np.multiply((1 - y), np.log(1 - h))
    grad_sum = np.sum(grad0 - grad1) / m
    reg_term = lamb * np.sum(np.power(theta, 2)) / (2 * m)
    J = grad_sum + reg_term

    #gd
    grad = np.zeros((1, theta.shape[1]))

    erro = h - y
    term = np.multiply(erro, X)
    gd_term = np.sum(term, axis=0) / m

    grad[:, 1:] = gd_term[:, 1:] + (theta[:, 1:] / m) * lamb
    grad[:, 0] = gd_term[:, 0]

    return J, grad
def salidasy(input_vector, w, funCapas):
        y = []

        y.append(input_vector) #el algoritmo toma la entrada como una salida, se incluye el -1 

        b = 1 #constante de sigmoide
        niveles = len(w)
        
        input_capa = input_vector
        
        for i in range(niveles):
            z = w[i] @ input_capa
            y_aux = np.zeros((np.size(z), 1), np.float)
            
            for j in range(np.size(z)): 
                #y_aux[j] = 1 if z[j] >= 0 else -1
                
                if funCapas[i] == "sigmoid":
                    y_aux[j] = sigm.sigmoide(z[j], b)
                else :
                    y_aux[j] = z[j]

            y_aux = np.insert(y_aux,0,-1,0)
            y.append(y_aux)
            input_capa = y_aux
        
        return y
Beispiel #8
0
def plot_boundary(theta, grau):
    x = np.linspace(-1,1.5,50)
    y = np.linspace(-0.8,1.2,50)
    
    xx, yy = np.meshgrid(x, y)

    theta = np.matrix(theta)
    
    X_poly = mapFeature(xx.ravel(), yy.ravel(), grau)
    
    Z = sigmoide(X_poly.dot(theta.T))
    Z = Z.reshape(xx.shape)
    
    plt.title('lambda = 1')
    plt.contour(x, y, Z, [0.5], linewidths=1, colors='green')
    
    legendas = [Line2D([0], [0], marker='+', color='k', lw=0, label='Aceito (y = 1)'),
                       Line2D([0], [0], marker='o',color='y', lw=0, label='Rejeitado (y = 0)'),
                       Line2D([0], [0], color='g', lw=2, label='Fronteira de Decisão')]
    
    plt.legend(handles=legendas)
    
    dirname = os.path.dirname(__file__)
    plt.savefig(dirname + os.path.sep + '/plot4.2.png')
    plt.show()
Beispiel #9
0
def gradientReg(theta, X, y, lmd):

    m = len(y)

    grad = (1 / m) * X.T.dot(sigmoide(X.dot(theta.reshape(-1, 1))) - y) + (
        lmd / m) * np.r_[[[0]], theta[1:].reshape(-1, 1)]

    return grad.flatten()
Beispiel #10
0
def costFunctionReg(theta, X, y, lamb):
    #transforma os valores de theta, X e y em matrix
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)

    #primeira parcela da função de custo para Reg. Logística, caso y=0, então grad0 = 0
    grad0 = np.multiply(-y, np.log(sigmoide(X * theta.T)))
    #segunda parcela da função de custo, caso y=1, então grad1 = 0
    grad1 = np.multiply((1 - y), np.log(1 - sigmoide(X * theta.T)))

    #termo de regularização
    reg = np.sum(np.square(theta[:, 1:].T)) * lamb

    #calcula o valor do custo J, de acordo com as parcelas grad0 e grad1 e o tamanho do conjunto de dados
    J = (np.sum(grad0 - grad1) / (len(y))) + reg / (len(y) * 2)

    return J
def costFunctionReg(theta, X, y, lmd):

    m = len(y)
    
    h_theta = sigmoide(X * [theta])
    J = -1./m * (y.T.dot(np.log(h_theta)) + (1-y).T.dot(np.log(1 - h_theta)))
    J_reg = lmd/(2*m) * (theta[1:] ** 2).sum()
    J = J_reg +J

       
    return J[0][0]
Beispiel #12
0
def gd_reglog(theta, X, y):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)

    parametros = int(theta.ravel().shape[1])
    grad = np.zeros(parametros)

    erro = sigmoide(X * theta.T) - y

    for i in range(parametros):
        term = np.multiply(erro, X[:,i])
        grad[i] = np.sum(term) / len(X)

    return grad
def cg_log_Reg(theta,x,y,landa):
    
    m = len(y) 
    n=x.shape[1]
    theta=theta.reshape(len(theta),1)
    grad =np.zeros([theta.size,1])
    
    
    g=sigmoide(np.dot(x,theta))  
    J=(1/m)*(np.sum(np.dot((-y.T),np.log(g))-np.dot((1-y).T,np.log(1-g))))+(landa/(2*m))*np.sum(np.power(theta[1:len(theta)],2))
    
    grad[0]=(1/m)*np.dot((g-y).T,x[:,0])
    
    
    grad[1:n] =(np.dot((x[:,1:n]).T,(g-y)))/m +(landa/m)*(theta[1:n])#%se calcula el gradiente teniendo en cuenta cada una de las columnas de X que representan cada una de las caracteristicas
    
    return J,grad
Beispiel #14
0
def gradRegLog(theta, X, y, alpha):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)

    parametros = int(theta.ravel().shape[1])
    grad = np.zeros(parametros)

    erro = sigmoide(X * theta.T) - y

    for i in range(parametros):
        term = np.multiply(erro, X[:, i])

        if (i == 0):
            grad[i] = np.sum(term) / len(X)
        else:
            grad[i] = (np.sum(term) / len(X))
            +((alpha / len(X)) * theta[:, i])

    return grad
def cg_logistic(theta, x, y):

    'valores iniciales'
    m = len(y)  #numero de ejemplos de entrenamiento
    theta = theta.reshape(len(theta), 1)
    # grad = np.zeros(theta.shape)

    'funcion sigmoide'

    g = sigmoide(np.dot(x, theta))
    J = (1 / m) * (
        np.sum(np.dot((-y.T), np.log(g)) - np.dot((1 - y).T, np.log(1 - g))))
    #x=pd.DataFrame(x)

    grad = np.dot(x.T, (g - y)) / m
    # for gr in range(theta.size):

    #    gradiente[gr]=(1/m)*sum((g-y)*np.array(x[gr]).reshape(m,1))

    return J, grad
Beispiel #16
0
def gd_reglog_reg(theta, X, y, _lambda):
    m = len(X)
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)

    parametros = int(theta.ravel().shape[1])
    grad = np.zeros(parametros)

    erro = sigmoide(X * theta.T) - y

    for i in range(parametros):
        term = np.multiply(erro, X[:,i])
        if (i != 0):
            regularizacao = ((_lambda / m) * theta[:,i])
            grad[i] = (np.sum(term) / m) + regularizacao
        else:
            grad[i] = np.sum(term) / m 

    return grad
def plotDecision(data, X, theta, filename):

    #cálculo para fronteira de decisão
    x1_min, x1_max = X[:, 1].min(), X[:, 1].max(),
    x2_min, x2_max = X[:, 2].min(), X[:, 2].max(),
    xx1, xx2 = np.meshgrid(np.linspace(x1_min, x1_max),
                           np.linspace(x2_min, x2_max))
    poly = PolynomialFeatures(6)
    h = sigmoide(
        poly.fit_transform(np.c_[xx1.ravel(), xx2.ravel()]).dot(theta))
    h = h.reshape(xx1.shape)

    # gerando o gráfico de dispersão dos dados

    positivo = data[data['Aceito'].isin([1])]
    negativo = data[data['Aceito'].isin([0])]

    fig, ax = plt.subplots(figsize=(8, 6))
    ax.axis([-1, 1.5, -0.8, 1.2])
    ax.scatter(positivo['Teste 1'],
               positivo['Teste 2'],
               s=50,
               c='k',
               marker='+',
               label='y=1')
    ax.scatter(negativo['Teste 1'],
               negativo['Teste 2'],
               s=50,
               c='y',
               marker='o',
               label='y=0')
    ax.contour(xx1, xx2, h, 1, linewidths=1, colors='g')
    ax.legend()
    ax.set_xlabel('Microchip Test 1')
    ax.set_ylabel('Microchip Test 2')

    if not os.path.exists(os.path.dirname(filename)):
        os.makedirs(os.path.dirname(filename))

    plt.savefig(filename)
    plt.show()
Beispiel #18
0
def gdFunction(theta, X, y, lamb):
    #transforma os valores de theta, X e y em matrix
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    #contagem de thetas
    parametros = int(theta.ravel().shape[1])
    #cálculo do gradiente descendente
    grad = np.zeros(parametros)

    erro = sigmoide(X * theta.T) - y

    for i in range(parametros):
        term = np.multiply(erro, X[:, i])
        if i == 0:
            grad[i] = np.sum(term) / len(y)
        else:
            gdreg = (lamb / len(y)) * theta.T[i]
            grad[i] = (np.sum(term) / len(y)) + gdreg

    return grad
Beispiel #19
0
def costFunctionReg(theta, X, y, lambda_):
    '''
    Método que calcula e atribui os valores de theta
    '''
    m = len(y)

    theta_shape = theta.shape[0]

    # atualização de theta
    for theta_index in range(theta_shape):
        value = 0
        for line in range(m):
            z = sigmoide(theta.T.dot(X[line]))
            value += (z - y[line]) * X[line][theta_index]

            if theta_index > 0:
                value += (lambda_ / m) * theta[theta_index]

        theta[theta_index] = value

    J = custoRegLog_Norm(theta, X, y, lambda_)

    return theta
Beispiel #20
0
def predizer(theta, X):

    probabilidade = sigmoide(X * theta.T)

    return [1 if x >= 0.5 else 0 for x in probabilidade]
Beispiel #21
0
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize as opt
from sigmoide import sigmoide

# Chamando a função sigmoide à partir da main conforme solicitado
# no enunciado
print(sigmoide(np.array([0, 2])))


def funcaoCustoRegressaoLogistica(theta, X, y):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    grad0 = np.multiply(-y, np.log(sigmoide(X * theta.T)))
    grad1 = np.multiply((1 - y), np.log(1 - sigmoide(X * theta.T)))
    return np.sum(grad0 - grad1) / (len(X))


def gradiente_descendente(theta, X, y):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)

    parametros = int(theta.ravel().shape[1])
    grad = np.zeros(parametros)

    erro = sigmoide(X * theta.T) - y

    for i in range(parametros):
Beispiel #22
0
print(f'La funcion costo encontrada por fmin es: {costo}')
print('El valor esperado aproximado es: 0.203\n')
print('theta por fmin: \n')
print(theta_min)
print('Thetas esperados aproximados:\n')
print(' -25.161\n 0.206\n 0.201\n')

'Grafica del limite de desicion '

limite(theta_min, datos)
graficar(datos)
plt.show()

'Prediccion'

probabilidad = sigmoide(np.array(np.dot([1, 45, 85], theta_min.T)))
print(
    f'Para un estudiante con puntajes de 45 y 85 se predice una probabilidad de admision de {probabilidad}'
)
print('Valor esperado: 0.775 +/- 0.002\n\n')

'Precision del algoritmo'
p = prediccion(theta_min, x)
y = datos[2]
precision = pd.concat([y, p], axis=1)
precision.columns = ['real', 'prediccion']
valor_precision = np.sum(precision['real'] == precision['prediccion'])

print(f'La precision del algoritmo es de:{valor_precision}')
print('La precision esperada es de (aprox): 89.0\n')
print('\n')
def predict(X, theta):
    y_pred = sigmoide(X * np.matrix(theta).T)
    return np.where(y_pred < 0.5, 0, 1)