Пример #1
0
def run_softmax_gradient_check(X,Y, numargs):
	f = prepare_for_gradient_check(ms.loss_softmax, X, Y)
	df = prepare_for_gradient_check(ms.gradient_softmax_batch,X,Y)
	#w = np.asarray([random.random() for j in xrange(0,numargs)])
	w = np.zeros(X.shape[1])
	#print f(w)
	print gradient_check(f,df,numargs, x=[w])



###TESTING########################################################################
def sumSquare(x):
	'''x1^2 + x2^2'''
	return (x[0])*x[0] + (x[1])*x[1]

def dSumSquare(x):
	'''derivative of sum square'''
	return (2*x[0], 2*x[1])

if __name__=='__main__':
	#gradient_check(1,1,5)
	#gradient_check(1,2,3, False)
	#value_difference([2,3], .01, sumSquare, dSumSquare)
	full_trainarray = np.load('data/numpy/trainarray.npy')
	full_trainlabel = np.load('data/numpy/trainlabel.npy')
	full_testarray  = np.load('data/numpy/testarray.npy' )
	full_testlabel  = np.load('data/numpy/testlabel.npy' )

	X_test, Y_test   = fn.preprocess_data(full_testarray, full_testlabel, True)
	run_logistic_gradient_check(X_test, Y_test, len(np.zeros(X_test.shape[1])))
Пример #2
0
        self.reset()
        w = np.random.rand(X_train.shape[1])
        pass

#plot convergence results
    def plot_convergence(self):
        pass

#reset before initiating new method 
    def reset(self): 
        self.log_lkhd=np.array([0],dtype=np.float_)
        self.stepsz=np.array([0],dtype=np.float_)
        self.params=np.array([0],dtype=np.float_)
        self.w=self.X_train[:, np.random.randint(1, self.X_train.shape[2], 1)]
        self.gradient=[]


if __name__=='__main__':
    # Load dataset from MNIST
    full_trainarray = np.load(os.path.join('data','numpy','trainarray.npy'))
    full_trainlabel = np.load(os.path.join('data','numpy','trainlabel.npy'))
    full_testarray  = np.load(os.path.join('data','numpy','testarray.npy' ))
    full_testlabel  = np.load(os.path.join('data','numpy','testlabel.npy' ))
    
    
    X_train, Y_train = fn.preprocess_data(full_trainarray, full_trainlabel)
    X_test, Y_test = fn.preprocess_data(full_testarray, full_testlabel)
    gdo=GradientDescent(X_train, Y_train, X_test, Y_test)
    gdo.batch_gd()
    gdo.plot_convergence()
Пример #3
0
        pass

#plot convergence results

    def plot_convergence(self):
        pass


#reset before initiating new method

    def reset(self):
        self.log_lkhd = np.array([0], dtype=np.float_)
        self.stepsz = np.array([0], dtype=np.float_)
        self.params = np.array([0], dtype=np.float_)
        self.w = self.X_train[:,
                              np.random.randint(1, self.X_train.shape[2], 1)]
        self.gradient = []

if __name__ == '__main__':
    # Load dataset from MNIST
    full_trainarray = np.load(os.path.join('data', 'numpy', 'trainarray.npy'))
    full_trainlabel = np.load(os.path.join('data', 'numpy', 'trainlabel.npy'))
    full_testarray = np.load(os.path.join('data', 'numpy', 'testarray.npy'))
    full_testlabel = np.load(os.path.join('data', 'numpy', 'testlabel.npy'))

    X_train, Y_train = fn.preprocess_data(full_trainarray, full_trainlabel)
    X_test, Y_test = fn.preprocess_data(full_testarray, full_testlabel)
    gdo = GradientDescent(X_train, Y_train, X_test, Y_test)
    gdo.batch_gd()
    gdo.plot_convergence()