def findBestFitLine_using_gd(self, learning_rate=0.01):

        #Normalizing the inpurt before applying Gradient Descent
        x_temp_toScale = np.concatenate((self.X_train[:, 1:], self.Y_train), 1)
        normalScalar = NormalScalar.NormalScalar()
        normaled_val = normalScalar.fit_transform(x_temp_toScale)
        X_train = np.append([[1]] * len(normaled_val), normaled_val[:, :-1], 1)
        Y_train = normaled_val[:, -1:]

        #Applying Gradient Descent on cost funtion J
        (m, n) = X_train.shape
        J = lambda theta: ((X_train @ theta.reshape(n, 1) - Y_train).T @ (
            X_train @ theta.reshape(n, 1) - Y_train))[0][0] / (2 * m)
        gd = GradientDescent.GradientDescent()
        theta = gd.gradientDescent(J,
                                   np.array([0 for i in range(n)]),
                                   learning_rate=learning_rate,
                                   delta_val=0.000001,
                                   iterations=10000)

        # Inverse Scaling the value of theta
        theta[0] = normalScalar.std[-1] * (
            theta[0] -
            (sum(normalScalar.mean[:-1] *
                 (theta[1:] / normalScalar.std[:-1])))) + normalScalar.mean[-1]
        theta[1:] = (theta[1:] / normalScalar.std[:-1]) * normalScalar.std[-1]

        return theta
Пример #2
0
 def fitByGradintDescent(self,
                         init: np.array,
                         maxloop: int = 10000000,
                         sep: float = 0.00001,
                         eps: float = 1e-10):
     import GradientDescent
     gradientdescent = GradientDescent.GradientDescent(
         self.GetLossValue, self.DLossFun)
     gradientdescent.search(init=init, maxloop=maxloop, sep=sep, eps=eps)
     self.theta_ = gradientdescent.ans_
    def _getGrad(self):
        k = len(self.X[0])
        # w0 = np.zeros(k).reshape(-1, 1)
        w0 = np.random.uniform(-1, 1, k).reshape(-1, 1)
        
        # difffunc = lambda w: np.add(np.subtract(self.X.T.dot(self.X).dot(w), self.X.T.dot(self.t)), 0.5*self.lamb*np.sign(w))
        difffunc = lambda w: -2.0*(self.X.T).dot(self.t) + 2.0*(self.X.T).dot(self.X).dot(w) + self.lamb*np.sign(w)

        func = lambda w: ((self.t - self.X.dot(w)).T).dot(self.t - self.X.dot(w)) + self.lamb*np.sum(np.absolute(w))

        return gd.GradientDescent(self.grad_eta, func, difffunc, k, x0 = w0, eps = self.grad_eps, Nsteps = self.grad_steps, ylim = self.grad_ylim)
Пример #4
0
 def est_k_s(self, threshold):
     #0 idx is shape, or -k
     #2 idx is scale, or a or sigma
     if self.OptMethod == "gd":
         # version of GradientDescent
         theta = GradientDescent.GradientDescent(X=self.X,
                                                 step=0.0001,
                                                 acc=10e-06,
                                                 maxIter=1000,
                                                 showdetail=False,
                                                 threshold=threshold)
     elif self.OptMethod == "pso":
         # version of PSO
         theta = PSOptim.OptByPSO(
             X=self.X, threshold=threshold
         )  # if we use this, we need to install pyswarm package or put its code in the same filedir
     shape = np.sum(np.log(1 - theta * self.X)) / self.lenX()
     scale = -1 * shape / theta
     return (shape, scale)
Пример #5
0
def main():
    ## linux machine
    # df = load_data("../../data/baby-weights-dataset.csv")
    ## windows machine
    df = load_data(
        "C:\\Users\\51606\\Desktop\\dstoolkit\\data\\baby-weights-dataset.csv")
    print("Data has been loaded!")
    scaler = StandardScaler()
    X = scaler.fit_transform(df.drop("BWEIGHT", axis=1))
    X = np.insert(X, 0, 1, axis=1)
    y = df["BWEIGHT"]
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
    reg = gd.GradientDescent(method="sgd")
    print(reg.algorithm)
    print("***Training the model...***")
    reg.fit(X_train, y_train)
    print("***Training done!***")
    print(reg.weights)
    print(f"The number of iterations to stop: {reg.num_iter}")
    print(f"The corresponding training MSE: {reg.mse}")
    y_pred = reg.predict(X_test)
    print(f"The testing MSE: {mse(y_test,y_pred)}")
Пример #6
0

# Rosenbrock
def f(x, a=1, b=5):
    y = (a - x[0])**2 + b * (x[1] - x[0]**2)**2
    return y


x = sp.IndexedBase('x')
gradients = np.array([sp.diff(f(x), x[i]) for i in range(2)])
grads = sp.lambdify(x, gradients, 'numpy')

x_ = np.array([-2, 2])
alpha = 1E-2

gd.GradientDescent(f, grads, x_, alpha)

gd.ConjugateGradient(f, grads, x_)

############################################
""" 문제해결형 과제 (2) """
############################################
""" Branin Function """


def draw_branin(levels):

    a = 1
    b = 5.1 / (4 * np.pi**2)
    c = 5 / np.pi
    r = 6
    # This is required so that the changes are picked up properly by the Jupyter notebook
    importlib.reload(GrD)

    N_samples = 1000
    [X, Y] = randomSampleGenerator(N_samples)
    print(
        'Sample logistic function which is being predicted: y = 3 +5*x1 - 2*x2'
    )
    [X_train,
     Y_train] = [X[0:int(0.7 * N_samples), :], Y[0:int(0.7 * N_samples)]]
    [X_test, Y_test] = [
        X[int(0.7 * N_samples):N_samples, :], Y[int(0.7 * N_samples):N_samples]
    ]

    GD = GrD.GradientDescent(method="batch",
                             loss="logloss",
                             lr=0.001,
                             epochs=10000)
    print('\n\nGradient descent logloss: batch with no adaptive learning')
    coeff = GD.fit(X_train, Y_train)
    print('Optimized Coefficients:', coeff)
    YPredict = GD.predict(X_test)
    print('R squared score: ', GD.score(Y_test, YPredict))

    GD = GrD.GradientDescent(method="batch",
                             loss="logloss",
                             lr=0.001,
                             epochs=10000,
                             adaptive=True)
    print('\n\nGradient descent logloss: batch with adaptive learning')
    coeff = GD.fit(X_train, Y_train)
    print('Optimized Coefficients:', coeff)
Пример #8
0
import GradientDescent as gd


# Rosenbrock
def f(x, a=1, b=100):
    y = (a - x[0])**2 + b * (x[1] - x[0]**2)**2
    return y


x = sp.IndexedBase('x')
gradients = np.array([sp.diff(f(x), x[i]) for i in range(2)])
grads = sp.lambdify(x, gradients, 'numpy')

x_ = np.array([-2., 2.])

gd.GradientDescent(f, grads, x_, alpha=1E-1, verbose=True)

gd.ConjugateGradient(f, grads, x_)

gd.momentum(f, grads, x_, alpha=7E-4, verbose=True)

gd.nesterov(f, grads, x_, alpha=7E-4, verbose=True)
""" Huge-variate : e.g. Deep Learning """
gd.adagrad(f, grads, x_, alpha=3.05E-0, verbose=True)
""" RMSProp: Geoffrey Hinton """


def rmsprop(f,
            grads,
            x,
            alpha,
Пример #9
0
resultsPTest = p.test(testInput, testOutput)

print("Perceptron accuracy for train set : " + str(np.sum([x[0] for x in resultsPTrain[0]])/np.sum([x[1] for x in resultsPTrain[0]])))
print("Perceptron accuracy for test set : " + str(np.sum([x[0] for x in resultsPTest[0]])/np.sum([x[1] for x in resultsPTest[0]])))

IO.PlotCM(resultsPTrain[1], save = True, fileName = "perceptronConfusionTrain")
IO.PlotCM(resultsPTest[1], save = True, fileName = "perceptronConfusionTest")

'''
Assignment 5:
'''

'''
The different activation functions are in commented out lines in the module GradientDescent.py
'''
gd = GD.GradientDescent()

weights = np.random.rand(9)
weights = weights * 2 - 1   

learningRate = 0.5

mseplot = []
accuracyplot = []

for i in range(1000):
    weights = weights - learningRate * gd.grdmse(weights)
    mseplot.append(gd.mse(weights))
    
    a = np.abs(np.around(gd.xor_net(0,0,weights)) - 0)
    b = np.abs(np.around(gd.xor_net(0,1,weights)) - 1)
Пример #10
0
def RunLinRegressGradientDescent(x, y):
    learningRate = 0.02
    initCoefEstimate = np.array([1.0, 1.0])
    coefs = gd.GradientDescent(ComputeGradient, x, y, learningRate,
                               initCoefEstimate)
    gp.Plot2DResults('Gradient Descent', x, y, coefs)
Пример #11
0
def RunLogRegressGradientDescent(x, y):
    learningRate = 0.1
    initCoefEstimate = np.ones([1, x.shape[1]])
    [errors, coefs] = gd.GradientDescent(ComputeGradient, x, y, learningRate,
                                         initCoefEstimate)
    return coefs
Пример #12
0
import DataInit, AddBias, GradientDescent, Normalization, runMachine
import os.path
import numpy as np

BASE = os.path.dirname(os.path.abspath(__file__))

print(os.path.join(BASE, "DataNew1.csv"))
Path = os.path.join(BASE, "DataNew1.csv")

# Path = "..\MachineLearningMF\Data.txt"
'''somethin somthin'''
data = DataInit.DataInit()
CostFuntion = runMachine.runMachine()
GDescent = GradientDescent.GradientDescent()
Norm = Normalization.Normalization()
AddBias1 = AddBias.AddBias()
'''loac csv'''
data.loader(Path)  # ,제거
'''theta init'''
'''initiated optimal theta 17/9/2019'''
# 동 별 theta값 입력 테스트
# theta = np.array([[theta0], [theta1], [theta2], [theta3], [theta4]])
theta = np.array([[0], [0], [0], [0], [0]])
'''Normalize'''
data.x, mu, sigma = Norm.featureNormalize(data)
'''Add Bias Column'''
data = AddBias1.addB(data)
'''remove annotations when you compute theta again'''
'''run Gradient descent and Cost function'''
theta = GDescent.runGradient(data, theta, 0.0001, 100000)  #theta값 뽑아내기
theta = theta.reshape(1, 5)  #행렬 곱셈하기 위해 형태바꾸기