コード例 #1
0
ファイル: neuralnetworksA4.py プロジェクト: jayzym/cs445
    def train(self,
              X,
              T,
              nIterations=100,
              verbose=False,
              weightPrecision=0,
              errorPrecision=0,
              saveWeightsHistory=False):

        if self.Xmeans is None:
            self.Xmeans = X.mean(axis=0)
            self.Xstds = X.std(axis=0)
            self.Xconstant = self.Xstds == 0
            self.XstdsFixed = copy(self.Xstds)
            self.XstdsFixed[self.Xconstant] = 1
        X = self._standardizeX(X)

        if T.ndim == 1:
            T = T.reshape((-1, 1))

        if self.Tmeans is None:
            self.Tmeans = T.mean(axis=0)
            self.Tstds = T.std(axis=0)
            self.Tconstant = self.Tstds == 0
            self.TstdsFixed = copy(self.Tstds)
            self.TstdsFixed[self.Tconstant] = 1
        self.classes = np.unique(T)
        Tindicators = ml.makeIndicatorVars(T)

        startTime = time.time()

        scgresult = ml.scg(self._pack(self.Vs, self.W),
                           self._objectiveF,
                           self._gradientF,
                           X,
                           Tindicators,
                           xPrecision=weightPrecision,
                           fPrecision=errorPrecision,
                           nIterations=nIterations,
                           verbose=verbose,
                           ftracep=True,
                           xtracep=saveWeightsHistory)

        self._unpack(scgresult['x'])
        self.reason = scgresult['reason']
        self.errorTrace = scgresult[
            'ftrace']  # * self.Tstds # to _unstandardize the MSEs
        self.numberOfIterations = len(self.errorTrace)
        self.trained = True
        self.weightsHistory = scgresult[
            'xtrace'] if saveWeightsHistory else None
        self.trainingTime = time.time() - startTime
        return self
コード例 #2
0
    def train(self, X, T, nIterations=100, verbose=False):

        if self.Xmeans is None:
            self.Xmeans = X.mean(axis=0)
            self.Xstds = X.std(axis=0)
            self.Xconstant = self.Xstds == 0
            self.XstdsFixed = copy(self.Xstds)
            self.XstdsFixed[self.Xconstant] = 1
        X = self._standardizeX(X)

        if T.ndim == 1:
            T = T.reshape((-1, 1))

        if self.Tmeans is None:
            self.Tmeans = T.mean(axis=0)
            self.Tstds = T.std(axis=0)
            self.Tconstant = self.Tstds == 0
            self.TstdsFixed = copy(self.Tstds)
            self.TstdsFixed[self.Tconstant] = 1
        T = self._standardizeT(T)

        # Local functions used by scg()

        def objectiveF(w):
            self._unpack(w)
            Y, _ = self._forward_pass(X)
            return 0.5 * np.mean((Y - T)**2)

        def gradF(w):
            self._unpack(w)
            Y, Z = self._forward_pass(X)
            delta = (Y - T) / (X.shape[0] * T.shape[1])
            dVs, dW = self._backward_pass(delta, Z)
            return self._pack(dVs, dW)

        scgresult = ml.scg(self._pack(self.Vs, self.W),
                           objectiveF,
                           gradF,
                           nIterations=nIterations,
                           verbose=verbose,
                           ftracep=True)

        self._unpack(scgresult['x'])
        self.reason = scgresult['reason']
        self.errorTrace = np.sqrt(
            scgresult['ftrace'])  # * self.Tstds # to unstandardize the MSEs
        self.numberOfIterations = len(self.errorTrace)
        self.trained = True
        return self
コード例 #3
0
    def train(self, X, T, nIterations=100, verbose=False):
        if self.Xmeans is None:
            self.Xmeans = X.mean(axis=0)
            self.Xstds = X.std(axis=0)
            self.Xconstant = self.Xstds == 0
            self.XstdsFixed = copy(self.Xstds)
            self.XstdsFixed[self.Xconstant] = 1
        X = self._standardizeX(X)

        self.classes, counts = np.unique(T, return_counts=True)
        self.mostCommonClass = self.classes[np.argmax(counts)]  # to break ties

        if self.no != len(self.classes):
            raise ValueError(
                " In NeuralNetworkClassifier, the number of outputs must equal\n the number of classes in the training data. The given number of outputs\n is %d and number of classes is %d. Try changing the number of outputs in the\n call to NeuralNetworkClassifier()."
                % (self.no, len(self.classes)))
        T = makeIndicatorVars(T)

        # Local functions used by gradientDescent.scg()
        def objectiveF(w):
            self._unpack(w)
            Y, _ = self._forward_pass(X)
            Y = self._multinomialize(Y)
            Y[Y == 0] = sys.float_info.epsilon
            return -np.mean(T * np.log(Y))

        def gradF(w):
            self._unpack(w)
            Y, Z = self._forward_pass(X)
            Y = self._multinomialize(Y)
            delta = (Y - T) / (X.shape[0] * (T.shape[1]))
            dVs, dW = self._backward_pass(delta, Z)
            return self._pack(dVs, dW)

        scgresult = ml.scg(self._pack(self.Vs, self.W),
                           objectiveF,
                           gradF,
                           nIterations=nIterations,
                           ftracep=True,
                           verbose=verbose)

        self._unpack(scgresult['x'])
        self.reason = scgresult['reason']
        self.errorTrace = scgresult['ftrace']
        self.numberOfIterations = len(self.errorTrace) - 1
        self.trained = True
        return self