def train(self, X, Y):
        """
        Train a linear model using gradient descent, based on code in
        module gd.
        """

        # get the relevant options
        lossFn   = self.opts['lossFunction']         # loss function to optimize
        lambd    = self.opts['lambda']               # regularizer is (lambd / 2) * ||w||^2
        numIter  = self.opts['numIter']              # how many iterations of gd to run
        stepSize = self.opts['stepSize']             # what should be our GD step size?

        # define our objective function based on loss, lambd and (X,Y)
        def func(w):
            # should compute obj = loss(w) + (lambd/2) * norm(w)^2
            Yhat = sum(w*X, axis=1)   ### TODO: YOUR CODE HERE
            print lossFn.loss(Y, Yhat)
            obj  = lossFn.loss(Y,Yhat) + lambd/2*dot(w,w)    ### TODO: YOUR CODE HERE
            # return the objective
            return obj

        # define our gradient function based on loss, lambd and (X,Y)
        def grad(w):
            # should compute gr = grad(w) + lambd * w
            Yhat = sum(w*X, axis=1)    ### TODO: YOUR CODE HERE
            gr   = lossFn.lossGradient(X, Y, Yhat) + lambd*w    ### TODO: YOUR CODE HERE
            return gr

        # run gradient descent; our initial point will just be our
        # weight vector
        w, trajectory = gd(func, grad, self.weights, numIter, stepSize)

        # store the weights and trajectory
        self.weights = w
        self.trajectory = trajectory
    def train(self, X, Y):
        """
        Train a linear model using gradient descent, based on code in
        module gd.
        """

        # get the relevant options
        lossFn   = self.opts['lossFunction']  # loss function to optimize
        lambd    = self.opts['lambda']        # regularizer is (lambd / 2) * ||w||^2
        numIter  = self.opts['numIter']       # how many iterations of gd to run
        stepSize = self.opts['stepSize']      # what should be our GD step size?

        # define our objective function based on loss, lambd and (X,Y)
        def func(w):
            # should compute obj = loss(w) + (lambd/2) * norm(w)^2
            # TODO: YOUR CODE HERE
            Yhat = []
            for x in X:
                if type(w) == int:
                    Yhat.append(0)
                else:
                    Yhat.append(dot(w, x))

            Yhat = np.array(Yhat)

            # TODO: YOUR CODE HERE
            obj = lossFn.loss(Y, Yhat) + ((lambd / 2) * (norm(w))**2)

            # return the objective
            return obj

        # define our gradient function based on loss, lambd and (X,Y)
        def grad(w):
            # should compute gr = grad(w) + lambd * w
            # TODO: YOUR CODE HERE
            Yhat = []
            for x in X:
                if type(w) == int:
                    Yhat.append(0)
                else:
                    Yhat.append(dot(w, x))

            Yhat = np.array(Yhat)

            # TODO: YOUR CODE HERE
            gr = lossFn.lossGradient(X, Y, Yhat) + lambd * w

            return gr

        # run gradient descent; our initial point will just be our
        # weight vector
        w, trajectory = gd(func, grad, self.weights, numIter, stepSize)

        # store the weights and trajectory
        self.weights = w
        self.trajectory = trajectory
Beispiel #3
0
    def train(self, X, Y):
        """
        Train a linear model using gradient descent, based on code in
        module gd.
        """

        # get the relevant options
        lossFn = self.opts['lossFunction']  # loss function to optimize
        lambd = self.opts['lambda']  # regularizer is (lambd / 2) * ||w||^2
        numIter = self.opts['numIter']  # how many iterations of gd to run
        stepSize = self.opts['stepSize']  # what should be our GD step size?

        # define our objective function based on loss, lambd and (X,Y)
        def func(w):
            # should compute obj = loss(w) + (lambd/2) * norm(w)^2

            Yhat = []

            # for each example get the prediction
            for i in xrange(len(X)):
                Yhat.append(dot(w, X[i]))

# get the objective function based on the loss function and the predictions
            obj = lossFn.loss(Y, Yhat) + (lambd / 2) * norm(w)**2

            # return the objective
            return obj

        # define our gradient function based on loss, lambd and (X,Y)
        def grad(w):
            # should compute gr = grad(w) + lambd * w

            Yhat = []

            # get the predictions for each example
            for i in xrange(len(X)):
                Yhat.append(dot(w, X[i]))

# get the gradient based on the loss function, X's, Y's, and predictions
            gr = lossFn.lossGradient(X, Y, Yhat) + lambd * w

            return gr

        # run gradient descent; our initial point will just be our
        # weight vector
        self.weights = zeros(len(X[0]))
        w, trajectory = gd(func, grad, self.weights, numIter, stepSize)

        # store the weights and trajectory
        self.weights = w
        self.trajectory = trajectory
Beispiel #4
0
    def train(self, X, Y):
        """
        Train a linear model using gradient descent, based on code in
        module gd.
        """

        # get the relevant options
        lossFn = self.opts['lossFunction']  # loss function to optimize
        lambd = self.opts['lambda']  # regularizer is (lambd / 2) * ||w||^2
        numIter = self.opts['numIter']  # how many iterations of gd to run
        stepSize = self.opts['stepSize']  # what should be our GD step size?

        self.weights = zeros(size(X, 1))

        # print('X: ', X.shape)
        # print('Y: ', Y.shape)
        # print('w: ', self.weights.shape)

        # define our objective function based on loss, lambd and (X,Y)
        def func(w):
            # should compute obj = loss(w) + (lambd/2) * norm(w)^2

            Yhat = dot(X, w)

            obj = lossFn.loss(Y, Yhat) + (lambd / 2) * (linalg.norm(w)**2)

            # return the objective
            return obj

        # define our gradient function based on loss, lambd and (X,Y)
        def grad(w):
            # should compute gr = grad(w) + lambd * w

            Yhat = dot(X, w)

            gr = lossFn.lossGradient(X, Y, Yhat) + lambd * w

            return gr

        # run gradient descent; our initial point will just be our
        # weight vector
        w, trajectory = gd(func, grad, self.weights, numIter, stepSize)

        # store the weights and trajectory
        self.weights = w
        self.trajectory = trajectory
    def train(self, X, Y):
        """
        Train a linear model using gradient descent, based on code in
        module gd.
        """
        if (self.weights == 0):
            self.weights = zeros(X.shape[1])

        # get the relevant options
        lossFn = self.opts['lossFunction']  # loss function to optimize
        lambd = self.opts['lambda']  # regularizer is (lambd / 2) * ||w||^2
        numIter = self.opts['numIter']  # how many iterations of gd to run
        stepSize = self.opts['stepSize']  # what should be our GD step size?

        # define our objective function based on loss, lambd and (X,Y)
        def func(w):
            # should compute obj = loss(w) + (lambd/2) * norm(w)^2
            Yhat = []  ### TODO: YOUR CODE HERE
            for i in range(0, len(Y)):
                Yhat.append(dot(w, X[i, :]))

            obj = lossFn.loss(
                Y, Yhat) + (lambd * 0.5) * norm(w)**2  ### TODO: YOUR CODE HERE

            # return the objective
            return obj

        # define our gradient function based on loss, lambd and (X,Y)
        def grad(w):
            # should compute gr = grad(w) + lambd * w
            Yhat = []  ### TODO: YOUR CODE HERE
            for i in range(0, len(Y)):
                Yhat.append(dot(w, X[i, :]))

            gr = lossFn.lossGradient(
                X, Y, Yhat) + lambd * w  ### TODO: YOUR CODE HERE

            return gr

        # run gradient descent; our initial point will just be our
        # weight vector
        w, trajectory = gd(func, grad, self.weights, numIter, stepSize)

        # store the weights and trajectory
        self.weights = w
        self.trajectory = trajectory
Beispiel #6
0
	def train(self, X, Y):
		"""
		Train a linear model using gradient descent, based on code in
		module gd.
		"""

		# get the relevant options
		lossFn   = self.opts['lossFunction']		 # loss function to optimize
		lambd	= self.opts['lambda']			   # regularizer is (lambd / 2) * ||w||^2
		numIter  = self.opts['numIter']			  # how many iterations of gd to run
		stepSize = self.opts['stepSize']			 # what should be our GD step size?

		self.weights = zeros( len( X[0] ) )
		# define our objective function based on loss, lambd and (X,Y)
		def func(w):
			# should compute obj = loss(w) + (lambd/2) * norm(w)^2
			# Yhat = util.raiseNotDefined()	### TODO: YOUR CODE HERE
			Yhat = dot( w, X.T )
			
			#obj  = util.raiseNotDefined()	### TODO: YOUR CODE HERE
			obj = lossFn.loss( Y, Yhat ) + (lambd/2) * pow(norm(w), 2)
			# return the objective

			return obj

		# define our gradient function based on loss, lambd and (X,Y)
		def grad(w):
			# should compute gr = grad(w) + lambd * w
			#Yhat = util.raiseNotDefined()	### TODO: YOUR CODE HERE
			Yhat = dot( w, X.T ) 

			#debug( w )
			#gr   = util.raiseNotDefined()	### TODO: YOUR CODE HERE
			gr = lossFn.lossGradient( X, Y, Yhat ) + lambd * w
			
			return gr

		# run gradient descent; our initial point will just be our
		# weight vector
		# pdb.set_trace()
		w, trajectory = gd(func, grad, self.weights, numIter, stepSize)

		# store the weights and trajectory
		self.weights = w
		self.trajectory = trajectory
Beispiel #7
0
    def train(self, X, Y):
        """
        Train a linear model using gradient descent, based on code in
        module gd.
        """

        # get the relevant options
        lossFn = self.opts['lossFunction']  # loss function to optimize
        lambd = self.opts['lambda']  # regularizer is (lambd / 2) * ||w||^2
        numIter = self.opts['numIter']  # how many iterations of gd to run
        stepSize = self.opts['stepSize']  # what should be our GD step size?

        # define our objective function based on loss, lambd and (X,Y)
        def func(w):
            # should compute obj = loss(w) + (lambd/2) * norm(w)^2
            Yhat = X.T * self.weights

            sqloss = SquaredLoss()

            obj = SquaredLoss.loss(sqloss, Y, Yhat[0]) + lambd / 2 + norm(
                self.weights)**2

            # return the objective
            return obj

        # define our gradient function based on loss, lambd and (X,Y)
        def grad(w):
            # should compute gr = grad(w) + lambd * w
            Yhat = X.T * self.weights

            sqloss = SquaredLoss()

            gr = SquaredLoss.lossGradient(sqloss, X, Y,
                                          Yhat[0]) + lambd * (self.weights)

            return gr

        # run gradient descent; our initial point will just be our
        # weight vector
        w, trajectory = gd(func, grad, self.weights, numIter, stepSize)

        # store the weights and trajectory
        self.weights = w
        self.trajectory = trajectory
Beispiel #8
0
    def train(self, X, Y):
        """
        Train a linear model using gradient descent, based on code in
        module gd.
        """

        # get the relevant options
        lossFn   = self.opts['lossFunction']         # loss function to optimize
        lambd    = self.opts['lambda']               # regularizer is (lambd / 2) * ||w||^2
        numIter  = self.opts['numIter']              # how many iterations of gd to run
        stepSize = self.opts['stepSize']             # what should be our GD step size?

        # define our objective function based on loss, lambd and (X,Y)
        def func(w):
            # should compute obj = loss(w) + (lambd/2) * norm(w)^2
            Yhat = util.raiseNotDefined()    ### TODO: YOUR CODE HERE

            obj  = util.raiseNotDefined()    ### TODO: YOUR CODE HERE

            # return the objective
            return obj

        # define our gradient function based on loss, lambd and (X,Y)
        def grad(w):
            # should compute gr = grad(w) + lambd * w
            Yhat = util.raiseNotDefined()    ### TODO: YOUR CODE HERE

            gr   = util.raiseNotDefined()    ### TODO: YOUR CODE HERE

            return gr

        # run gradient descent; our initial point will just be our
        # weight vector
        w, trajectory = gd(func, grad, self.weights, numIter, stepSize)

        # store the weights and trajectory
        self.weights = w
        self.trajectory = trajectory
Beispiel #9
0
    def train(self, X, Y):

        lossFn = self.opts['lossFunction']  # loss function to optimize
        lambd = self.opts['lambda']  # regularizer is (lambd / 2) * ||w||^2
        numIter = self.opts['numIter']  # how many iterations of gd to run
        stepSize = self.opts['stepSize']  # what should be our GD step size?

        self.weights = zeros(size(X, 1))

        def func(w):
            # obj = loss(w) + (lambd/2) * norm(w)^2
            Yhat = X @ w
            obj = lossFn.loss(Y, Yhat) + (lambd / 2) * norm(w) * norm(w)
            return obj

        def grad(w):
            # gr = grad(w) + lambd * w
            Yhat = X @ w
            gr = lossFn.lossGradient(X, Y, Yhat) + lambd * w
            return gr

        w, trajectory = gd(func, grad, self.weights, numIter, stepSize)
        self.weights = w
        self.trajectory = trajectory
def test_gd(f, df, dim):
    package_ans(gd(f, df, np.transpose([np.zeros(dim)]), lambda i: 0.01, 1000))