def gradient_descent(eps=1e-5, alpha=.001):
    import numpy as np
    from zs import normalization
    import math
    import time
    #start = time.time()
    #my_data1=np.genfromtxt('Data3.csv',delimiter=',')
    my_data = np.genfromtxt('Data1.csv', delimiter=',')
    out = normalization(my_data)

    #out = np.delete(out,0,1)
    my_data = np.insert(out, 0, 1, axis=1)
    num1, num2 = my_data.shape
    train = math.ceil(.7 * num1)
    test = num1 - train

    np.random.shuffle(my_data)
    training, testing = my_data[:train, :], my_data[train:, :]

    x_train = training[0:, 0:-1]

    x_test = testing[0:, 0:-1]
    y_train = training[:, -1]
    y_test = testing[:, -1]
    #print(x_train)
    #print(y_train)
    i = 0
    w = np.zeros(x_train.shape[1])
    x_t = x_train.transpose()
    b = np.dot(x_t, y_train)
    xtx = np.dot(x_t, x_train)
    while (1):
        #gradient descent code
        #print(x_t)
        trans2 = np.dot(xtx, w)
        #print(trans2)
        Djw = np.subtract(trans2, b)
        intermediate = alpha * Djw
        w_train = np.subtract(w, intermediate)

        i = i + 1
        diff = np.subtract(w_train, w)
        err = np.linalg.norm(diff, 2)
        #print(err)
        w = np.copy(w_train)
        #calculate error based one w
        if (err < eps):
            break

    yp = np.dot(x_test, w_train)
    error = np.subtract(yp, y_test)
    error = np.sum(error)
    error2 = (error**2) / test
    rms = np.sqrt(error2)
    print(rms)
    return (rms)
    np.savetxt('w_winequality.csv', w_train, delimiter=',')
示例#2
0
"""
Created on Fri Aug 17 17:46:30 2018

@author: SRIKANT
"""

import numpy as np
import math
from gradient_def import gradient_descent
from rms import rms_value
from zs import normalization


my_data=np.genfromtxt('Data2.csv',delimiter=',')

my_data= np.concatenate((normalization(my_data[:,:-1]),my_data[:,-1].reshape(my_data[:,-1].shape[0],1)),axis=1)

alpha_arr=np.array([])
#wfinal1=np.zeros((2, 1),dtype=float)
my_data=np.insert(my_data,0,1,axis=1)
for h in range(0,5):
    
    np.random.shuffle(my_data)
    num1,num2=my_data.shape
    train=math.ceil(.7*num1)
    test= num1-train
    training, testing = my_data[:train,:], my_data[train:,:]
    main_x=training[0:,0:-1]
    main_y=training[:,-1]
    rows,columns=training.shape
    k=10 #no of fold
示例#3
0
@author: srikant nayak
"""

import numpy as np
from zs import normalization
from ridge_regression import ridge_regression1
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import math
from sympy import plot
from sympy.plotting import plot3d
from sympy import symbols

my_data = np.genfromtxt('data1.csv', delimiter=',')
out = normalization(my_data)
my_data = np.insert(out, 0, 1, axis=1)
num1, num2 = my_data.shape
train = math.ceil(.7 * num1)
test = num1 - train
np.random.shuffle(my_data)
training, testing = my_data[:train, :], my_data[train:, :]
x_train = training[0:, 0:-1]
x_test = testing[0:, 0:-1]
y_train = training[:, -1]
y_test = testing[:, -1]

#my_data= np.concatenate((normalization(my_data[:,:-1]),my_data[:,-1].reshape(my_data[:,-1].shape[0],1)),axis=1)

zz = my_data[:, 2]
xt = my_data[:, :-1]
示例#4
0
"""
Created on Sun Oct 14 02:00:37 2018

@author: srikant nayak
"""

import numpy as np
from sklearn.model_selection import train_test_split
from zs import normalization
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt

my_data = np.genfromtxt('data1.csv', delimiter=',')

x_data = my_data[0:, 0:-1]
x_norm = normalization(x_data)
x = np.insert(x_norm, 0, 1, axis=1)

y = my_data[:, -1]
x_train, x_test, y_train, y_test = train_test_split(x,
                                                    y,
                                                    test_size=0.3,
                                                    stratify=y)
#def ridge_regression1(x_train,y_train,eps,alpha,lamda):
column = x.shape[1]
w = np.zeros(x.shape[1])
w_nxt = np.zeros(x.shape[1])
eps = .00001
alpha = .001
lamda = .00390
iteration = 0
示例#5
0
import numpy as np
from zs import normalization
from gradient_def import gradient_descent
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import pandas as pd
from sympy import plot
from sympy.plotting import plot3d
from sympy import symbols
my_data = np.genfromtxt('Data2.csv', delimiter=',')
my_data1 = np.genfromtxt('Data2.csv', delimiter=',')
my_data1 = np.insert(my_data, 0, 1, axis=1)

my_data = np.concatenate(
    (normalization(my_data[:, :-1]), my_data[:, -1].reshape(
        my_data[:, -1].shape[0], 1)),
    axis=1)

zz = my_data[:, 2]
xt = my_data1[:, :-1]
ys = my_data[:, 1]
xs = my_data[:, 0]

#ax.scatter(x,y,z)
#ax.plot3D(x,y,z)

x2 = int(wbest[1])
x1 = int(wbest[2])
x0 = int(wbest[0])
x, y = symbols('x y')
示例#6
0
"""

# -*- coding: utf-8 -*-
"""
Created on Sun Sep  2 15:56:01 2018

@author: srikant nayak
"""

#def gradient_descent(eps=1e-5,alpha=1e-3):
import numpy as np
import matplotlib.pyplot as plt
from zs import normalization
import math
my_data = np.genfromtxt('Data2.csv', delimiter=',')
out = normalization()
#out = np.delete(out,0,1)
my_data = np.insert(out, 0, 1, axis=1)
num1, num2 = my_data.shape
train = math.ceil(.7 * num1)
test = num1 - train

np.random.shuffle(my_data)
training, testing = my_data[:train, :], my_data[train:, :]

x_train = training[0:, 0:-1]

x_test = testing[0:, 0:-1]
y_train = training[:, -1]
y_test = testing[:, -1]
eps = .00001
示例#7
0
@author: srikant nayak
"""# -direct method of  -*-

from gradient_def import gradient_descent
import numpy as np
from zs import normalization
import math
from sklearn.model_selection import train_test_split
my_data = np.genfromtxt('data2.csv', delimiter=',')
import matplotlib.pyplot as plt

x = my_data[0:, 0:-1]
y = my_data[:, -1]
X = np.power(x, 8)
X = normalization(X)
X = np.insert(X, 0, 1, axis=1)

xtrain, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)

alpha = .00001
eps = .00001

w_old = np.zeros(xtrain.shape[1])
w_new = np.zeros((xtrain.shape[1]))

while (1):

    xtx = np.dot(xtrain.T, xtrain)
    xtxw = np.dot(xtx, w_old)