Пример #1
0
    def minibatchTrain(self, train, test, rtest, batch_size):
        self.nn = self.RBM.nn
        slearn_rate = self.RBM.modelArgs.learn_rate
        max_iter = self.RBM.modelArgs.max_iter
        CD = self.RBM.modelArgs.CD
        lamda = self.RBM.modelArgs.lamda
        momentum = self.RBM.modelArgs.momentum
        min_learn_rate = self.RBM.modelArgs.min_learn_rate

        dW_old = np.zeros(self.nn.weights[0].shape)
        dv_old = np.zeros(self.nn.layers[0].bias.shape)
        dh_old = np.zeros(self.nn.layers[1].bias.shape)
        evaluate = EvaluateRBM(self.RBM)

        m, n = train.shape
        batches = range(0, m, batch_size)
        if batches[-1] != m:
            if (m - batches[-1]) < (batch_size / 2.0):
                batches[-1] = m
            else:
                batches.append(m)
        for i in range(max_iter):
            if i > 50:
                CD = 3
                momentum = 0.9
            for j in range(len(batches) - 1):
                start = batches[j]
                end = batches[j + 1]
                learn_rate = slearn_rate / (end - start)
                learn_rate = max(learn_rate, min_learn_rate)

                vispos = train[start:end, :]
                visneg = vispos.copy()
                hidpos = self.RBM.getHiddenActivation(vispos)
                hidneg = hidpos
                for k in range(CD):
                    visneg_data = self.RBM.getVisibleActivation(hidneg, vispos)
                    visneg.data = visneg_data
                    hidneg = self.RBM.getHiddenActivation(visneg)

                dW = momentum * dW_old + learn_rate *\
                    ((vispos.T * hidpos) -
                     (visneg.T * hidneg) - lamda * self.nn.weights[0])
                dvbias = momentum * dv_old + learn_rate *\
                    ((vispos - visneg).sum(axis=0) -
                     lamda * self.nn.layers[0].bias)
                dhbias = momentum * dh_old + 0.1 * learn_rate *\
                    ((hidpos - hidneg).sum(axis=0) -
                     lamda * self.nn.layers[1].bias)

                dW_old = dW
                dv_old = dvbias
                dh_old = dhbias

                self.nn.weights[0] += dW
                self.nn.layers[0].bias += dvbias
                self.nn.layers[1].bias += dhbias
            if i % 5 == 0:
                slearn_rate *= 0.95
                print evaluate.calculateRMSEandMAE(train, test, rtest)
Пример #2
0
    def sgdTrain(self, train, test, rtest):
        self.nn = self.RBM.nn
        learn_rate = self.RBM.modelArgs.learn_rate
        max_iter = self.RBM.modelArgs.max_iter
        CD = self.RBM.modelArgs.CD
        lamda = self.RBM.modelArgs.lamda
        momentum = self.RBM.modelArgs.momentum

        dW_old = np.zeros(self.nn.weights[0].shape)
        dv_old = np.zeros(self.nn.layers[0].bias.shape)
        dh_old = np.zeros(self.nn.layers[1].bias.shape)
        evaluate = EvaluateRBM(self.RBM)
        # traindata = train.data
        # testdata = test.data

        m, n = train.shape
        for i in range(max_iter):
            if i > 50:
                CD = 3
                momentum = 0.9
            for j in range(m - 1):
                vispos = train.getrow(j)
                visneg = vispos.copy()
                hidpos = self.RBM.getHiddenActivation(vispos)
                hidneg = hidpos
                for k in range(CD):
                    visneg_data = self.RBM.getVisibleActivation(hidneg, vispos)
                    visneg.data = visneg_data
                    hidneg = self.RBM.getHiddenActivation(visneg)

                dW = momentum * dW_old + learn_rate *\
                    ((vispos.T * hidpos) -
                     (visneg.T * hidneg) - lamda * self.nn.weights[0])
                dvbias = momentum * dv_old + learn_rate *\
                    ((vispos - visneg).sum(axis=0) -
                     lamda * self.nn.layers[0].bias)
                dhbias = momentum * dh_old + 0.1 * learn_rate *\
                    ((hidpos - hidneg).sum(axis=0) -
                     lamda * self.nn.layers[1].bias)

                dW_old = dW
                dv_old = dvbias
                dh_old = dhbias

                self.nn.weights[0] += dW
                self.nn.layers[0].bias += dvbias
                self.nn.layers[1].bias += dhbias
            if i % 5 == 0:
                slearn_rate *= 0.95
                print evaluate.calculateRMSEandMAE(train, test, rtest)
Пример #3
0
    def sgdTrain(self, train, test, rtest):
        self.nn = self.RBM.nn
        learn_rate = self.RBM.modelArgs.learn_rate
        max_iter = self.RBM.modelArgs.max_iter
        CD = self.RBM.modelArgs.CD
        lamda = self.RBM.modelArgs.lamda
        momentum = self.RBM.modelArgs.momentum

        dW_old = np.zeros(self.nn.weights[0].shape)
        dv_old = np.zeros(self.nn.layers[0].bias.shape)
        dh_old = np.zeros(self.nn.layers[1].bias.shape)
        evaluate = EvaluateRBM(self.RBM)
        # traindata = train.data
        # testdata = test.data

        m, n = train.shape
        for i in range(max_iter):
            if i > 50:
                CD = 3
                momentum = 0.9
            for j in range(m - 1):
                vispos = train.getrow(j)
                visneg = vispos.copy()
                hidpos = self.RBM.getHiddenActivation(vispos)
                hidneg = hidpos
                for k in range(CD):
                    visneg_data = self.RBM.getVisibleActivation(hidneg, vispos)
                    visneg.data = visneg_data
                    hidneg = self.RBM.getHiddenActivation(visneg)

                dW = momentum * dW_old + learn_rate *\
                    ((vispos.T * hidpos) -
                     (visneg.T * hidneg) - lamda * self.nn.weights[0])
                dvbias = momentum * dv_old + learn_rate *\
                    ((vispos - visneg).sum(axis=0) -
                     lamda * self.nn.layers[0].bias)
                dhbias = momentum * dh_old + 0.1 * learn_rate *\
                    ((hidpos - hidneg).sum(axis=0) -
                     lamda * self.nn.layers[1].bias)

                dW_old = dW
                dv_old = dvbias
                dh_old = dhbias

                self.nn.weights[0] += dW
                self.nn.layers[0].bias += dvbias
                self.nn.layers[1].bias += dhbias
            if i % 5 == 0:
                slearn_rate *= 0.95
                print evaluate.calculateRMSEandMAE(train, test, rtest)
Пример #4
0
    def minibatchTrain(self, train, test, rtest, batch_size):
        self.nn = self.RBM.nn
        slearn_rate = self.RBM.modelArgs.learn_rate
        max_iter = self.RBM.modelArgs.max_iter
        CD = self.RBM.modelArgs.CD
        lamda = self.RBM.modelArgs.lamda
        momentum = self.RBM.modelArgs.momentum
        min_learn_rate = self.RBM.modelArgs.min_learn_rate

        dW_old = np.zeros(self.nn.weights[0].shape)
        dv_old = np.zeros(self.nn.layers[0].bias.shape)
        dh_old = np.zeros(self.nn.layers[1].bias.shape)
        evaluate = EvaluateRBM(self.RBM)


        m, n = train.shape
        batches = range(0, m, batch_size)
        if batches[-1] != m:
            if (m - batches[-1]) < (batch_size / 2.0):
                batches[-1] = m
            else:
                batches.append(m)
        for i in range(max_iter):
            if i > 50:
                CD = 3
                momentum = 0.9
            for j in range(len(batches) - 1):
                start = batches[j]
                end = batches[j + 1]
                learn_rate = slearn_rate / (end - start)
                learn_rate = max(learn_rate, min_learn_rate)

                vispos = train[start:end, :]
                visneg = vispos.copy()
                hidpos = self.RBM.getHiddenActivation(vispos)
                hidneg = hidpos
                for k in range(CD):
                    visneg_data = self.RBM.getVisibleActivation(hidneg, vispos)
                    visneg.data = visneg_data
                    hidneg = self.RBM.getHiddenActivation(visneg)

                dW = momentum * dW_old + learn_rate *\
                    ((vispos.T * hidpos) -
                     (visneg.T * hidneg) - lamda * self.nn.weights[0])
                dvbias = momentum * dv_old + learn_rate *\
                    ((vispos - visneg).sum(axis=0) -
                     lamda * self.nn.layers[0].bias)
                dhbias = momentum * dh_old + 0.1 * learn_rate *\
                    ((hidpos - hidneg).sum(axis=0) -
                     lamda * self.nn.layers[1].bias)

                dW_old = dW
                dv_old = dvbias
                dh_old = dhbias

                self.nn.weights[0] += dW
                self.nn.layers[0].bias += dvbias
                self.nn.layers[1].bias += dhbias
            if i % 5 == 0:
                slearn_rate *= 0.95
                print evaluate.calculateRMSEandMAE(train, test, rtest)