def train( self, trainingData, trainingLabels, validationData, validationLabels, showPlot=True, showPacmanPlot=True):
        """
        Stochastic gradient descent to learn self.weights
        """
        numDimensions = trainingData[0].size
        
        if showPlot:
            # Initialize list to store loss per iteration for plotting later
            trainingLossPerIteration = []
                        
        self.weights = []
        for i in xrange(len(self.legalLabels)):
            self.weights.append(np.zeros(len(trainingData[0])))
        
        # Stochastic gradient descent
        for itr in xrange(self.max_iterations):
                
            for (datum, label) in zip(trainingData, trainingLabels):
                # We have a list of arrays of weights here, instead of a matrix,
                # so we end up looping over labels
                dw = self.der_loss_dw(datum, label, self.weights)
                for j in range(len(self.legalLabels)):
                    self.weights[j] = self.weights[j] - self.alpha*dw[j]

            if showPlot:
                predictions = self.classify(validationData)
                accuracyCount = [predictions[i] == validationLabels[i] for i in range(len(validationLabels))].count(True)
                print "Performance on validation set for iteration= %d: (%.1f%%)" % (itr, 100.0*accuracyCount/len(validationLabels))

                trainingLoss = self.classificationLoss(trainingData, trainingLabels)
                trainingLossPerIteration.append(trainingLoss)
                
                plotUtil.plotCurve(range(len(trainingLossPerIteration)), trainingLossPerIteration, 2, "Training Loss")
    def train( self, trainingData, trainingLabels, validationData, validationLabels, showPlot=True, showPacmanPlot=True):
        """
        Stochastic gradient descent to learn self.weights
        """
        numDimensions = trainingData[0].size
        
        # Initializes weights to zero
        self.weights = np.zeros(numDimensions)
        
        if showPlot:
            # Initialize list to store loss per iteration for plotting later
            trainingLossPerIteration = []
            # Initial loss
            trainingLoss = self.classificationLoss(trainingData, trainingLabels)
            trainingLossPerIteration.append(trainingLoss)

            # Check for offset term
            plotDims = numDimensions-1
            for datum in trainingData:
                if datum[-1] != 1:
                    plotDims += 1
                    break
                 
            if showPacmanPlot and plotDims <=2:
                if plotDims == 2:
                    pacmanDisplay = pacmanPlot.PacmanPlotClassification2D();
                    pacmanDisplay.plot(trainingData[:,:plotDims], trainingLabels)
                else:
                    pacmanDisplay = pacmanPlot.PacmanPlotLogisticRegression1D();
                    pacmanDisplay.plot(trainingData[:,0], trainingLabels)

                graphicsUtils.sleep(0.1)
            
        # Stochastic gradient descent
        for itr in xrange(self.max_iterations):
                
            for (datum, label) in zip(trainingData, trainingLabels):
                self.weights = stochasticGradientDescentUpdate(datum, label, self.weights, self.alpha, self.der_loss_dw)

            if showPlot:
                predictions = self.classify(validationData)
                accuracyCount = [predictions[i] == validationLabels[i] for i in range(len(validationLabels))].count(True)
                print "Performance on validation set for iteration= %d: (%.1f%%)" % (itr, 100.0*accuracyCount/len(validationLabels))

                trainingLoss = self.classificationLoss(trainingData, trainingLabels)
                trainingLossPerIteration.append(trainingLoss)
                
                if plotDims <= 2:
                    if showPacmanPlot:
                        pacmanDisplay.setWeights(self.weights)
                        graphicsUtils.sleep(0.1)
                    else:
                        if plotDims == 2:
                            plotUtil.plotClassification2D(trainingData[:,:plotDims],trainingLabels, self.weights, 1)
                        else:
                            plotUtil.plotLogisticRegression1D(trainingData[:,:plotDims],trainingLabels, self.weights, 1)
                plotUtil.plotCurve(range(len(trainingLossPerIteration)), trainingLossPerIteration, 2, "Training Loss")

        if showPlot and showPacmanPlot:
            graphicsUtils.end_graphics()
    def train( self, trainingData, trainingLabels, validationData, validationLabels, showPlot=True, showPacmanPlot=True):
        """
        Stochastic gradient descent to learn self.weights
        """
        numDimensions = trainingData[0].size
        
        if showPlot:
            # Initialize list to store loss per iteration for plotting later
            trainingLossPerIteration = []
                        
        self.weights = []
        for i in xrange(len(self.legalLabels)):
            self.weights.append(np.zeros(len(trainingData[0])))
        
        # Stochastic gradient descent
        for itr in xrange(self.max_iterations):
                
            for (datum, label) in zip(trainingData, trainingLabels):
                # We have a list of arrays of weights here, instead of a matrix,
                # so we end up looping over labels
                dw = self.der_loss_dw(datum, label, self.weights)
                for j in range(len(self.legalLabels)):
                    self.weights[j] = self.weights[j] - self.alpha*dw[j]

            if showPlot:
                predictions = self.classify(validationData)
                accuracyCount = [predictions[i] == validationLabels[i] for i in range(len(validationLabels))].count(True)
                print "Performance on validation set for iteration= %d: (%.1f%%)" % (itr, 100.0*accuracyCount/len(validationLabels))

                trainingLoss = self.classificationLoss(trainingData, trainingLabels)
                trainingLossPerIteration.append(trainingLoss)
                
                plotUtil.plotCurve(range(len(trainingLossPerIteration)), trainingLossPerIteration, 2, "Training Loss")
    def train( self, trainingData, trainingLabels, validationData, validationLabels, showPlot=True, showPacmanPlot=True):
        """
        Stochastic gradient descent to learn self.weights
        """
        numDimensions = trainingData[0].size
        
        # Initializes weights to zero
        self.weights = np.zeros(numDimensions)
        
        if showPlot:
            # Initialize list to store loss per iteration for plotting later
            trainingLossPerIteration = []
            # Initial loss
            trainingLoss = self.classificationLoss(trainingData, trainingLabels)
            trainingLossPerIteration.append(trainingLoss)

            # Check for offset term
            plotDims = numDimensions-1
            for datum in trainingData:
                if datum[-1] != 1:
                    plotDims += 1
                    break
                 
            if showPacmanPlot and plotDims <=2:
                if plotDims == 2:
                    pacmanDisplay = pacmanPlot.PacmanPlotClassification2D();
                    pacmanDisplay.plot(trainingData[:,:plotDims], trainingLabels)
                else:
                    pacmanDisplay = pacmanPlot.PacmanPlotLogisticRegression1D();
                    pacmanDisplay.plot(trainingData[:,0], trainingLabels)

                graphicsUtils.sleep(0.1)
            
        # Stochastic gradient descent
        for itr in xrange(self.max_iterations):
                
            for (datum, label) in zip(trainingData, trainingLabels):
                self.weights = stochasticGradientDescentUpdate(datum, label, self.weights, self.alpha, self.der_loss_dw)

            if showPlot:
                predictions = self.classify(validationData)
                accuracyCount = [predictions[i] == validationLabels[i] for i in range(len(validationLabels))].count(True)
                print "Performance on validation set for iteration= %d: (%.1f%%)" % (itr, 100.0*accuracyCount/len(validationLabels))

                trainingLoss = self.classificationLoss(trainingData, trainingLabels)
                trainingLossPerIteration.append(trainingLoss)
                
                if plotDims <= 2:
                    if showPacmanPlot:
                        pacmanDisplay.setWeights(self.weights)
                        graphicsUtils.sleep(0.1)
                    else:
                        if plotDims == 2:
                            plotUtil.plotClassification2D(trainingData[:,:plotDims],trainingLabels, self.weights, 1)
                        else:
                            plotUtil.plotLogisticRegression1D(trainingData[:,:plotDims],trainingLabels, self.weights, 1)
                plotUtil.plotCurve(range(len(trainingLossPerIteration)), trainingLossPerIteration, 2, "Training Loss")

        if showPlot and showPacmanPlot:
            graphicsUtils.end_graphics()
Example #5
0
    def trainGradient(self,
                      trainingData,
                      regressionData,
                      numIterations,
                      showPlot=True,
                      showPacmanPlot=True):
        print 'Training with gradient ...'

        if showPlot:
            # Initialize list to store loss per iteration for plotting later
            trainingLossPerIteration = []

            if showPacmanPlot:
                pacmanDisplay = pacmanPlot.PacmanPlotRegression()
                pacmanDisplay.plot(trainingData, regressionData)
                graphicsUtils.sleep(0.1)

        # Initializes weights to zero
        numDimensions = trainingData[0].size
        self.weights = np.zeros(numDimensions)

        # Stochastic gradient descent
        for i in xrange(numIterations):
            if i + 1 % 10 == 0:
                print "Iteration " + str(i + 1) + " of " + str(numIterations)

            for (datum, label) in zip(trainingData, regressionData):
                self.weights = stochasticGradientDescentUpdate(
                    datum, label, self.weights, self.alpha, self.der_loss_dw)

            if showPlot:
                trainingLoss = self.regressionLoss(trainingData,
                                                   regressionData)
                trainingLossPerIteration.append(trainingLoss)

                if showPacmanPlot:
                    pacmanDisplay.setWeights(self.weights)
                    graphicsUtils.sleep(0.05)
                else:
                    plotUtil.plotRegression(trainingData, regressionData,
                                            self.weights, 1)
                    plotUtil.plotCurve(range(len(trainingLossPerIteration)),
                                       trainingLossPerIteration, 2,
                                       "Training Loss")
        if showPlot and showPacmanPlot:
            graphicsUtils.end_graphics()
    def trainGradient(self, trainingData, regressionData, numIterations, showPlot=True, showPacmanPlot=True):
        print 'Training with gradient ...'

        if showPlot:
            # Initialize list to store loss per iteration for plotting later
            trainingLossPerIteration = []
            
            if showPacmanPlot:
                pacmanDisplay = pacmanPlot.PacmanPlotRegression();
                pacmanDisplay.plot(trainingData, regressionData)
                graphicsUtils.sleep(0.1)
            
        # Initializes weights to zero
        numDimensions = trainingData[0].size
        self.weights = np.zeros(numDimensions)
        
        # Stochastic gradient descent
        for i in xrange(numIterations):
            if i+1 % 10 == 0:
                print "Iteration " + str(i+1) + " of "+ str(numIterations)
                
            for (datum, label) in zip(trainingData, regressionData):
                self.weights = stochasticGradientDescentUpdate(datum, label, self.weights, self.alpha, self.der_loss_dw)

            if showPlot:
                trainingLoss = self.regressionLoss(trainingData, regressionData)
                trainingLossPerIteration.append(trainingLoss)
                
                if showPacmanPlot:
                    pacmanDisplay.setWeights(self.weights)
                    graphicsUtils.sleep(0.05)
                else:
                    plotUtil.plotRegression(trainingData,regressionData, self.weights, 1)
                    plotUtil.plotCurve(range(len(trainingLossPerIteration)), trainingLossPerIteration, 2, "Training Loss")
        if showPlot and showPacmanPlot:
            graphicsUtils.end_graphics()