Ejemplo n.º 1
0
    def test(self, inp, k=10):

        vp0 = sigm(inp)

        v0 = 1 * (vp0 > 0.5)

        vs = [v0]
        hs = []

        for k in range(k):

            ## gibbs

            # forward
            hp0 = sigm(np.dot(v0, self.w) + self.hbias)
            h0 = rng.binomial(1, hp0)
            hs.append(h0)

            # backward
            vp1 = sigm(np.dot(h0, self.w.T) + self.vbias)
            v1 = 1 * (vp1 > 0.5)
            vs.append(v1)

            v0 = v1.copy()

        return vs, hs
Ejemplo n.º 2
0
    def step(self, inp):

        vp0 = sigm(inp)

        v0 = 1 * (vp0 > 0.5)

        ## gibbs

        # forward
        hp0 = sigm(np.dot(v0, self.w) + self.hbias)
        h0 = rng.binomial(1, hp0)

        # backward
        vp1 = sigm(np.dot(h0, self.w.T) + self.vbias)
        v1 = rng.binomial(1, vp1)

        # forward
        hp1 = sigm(np.dot(v1, self.w) + self.hbias)
        h1 = rng.binomial(1, hp1)

        # learn
        self.w += self.eta * (np.matmul(v0.T, hp0) - np.matmul(v1.T, hp1))
        self.vbias += self.eta * np.mean(v0 - v1, 0)
        self.hbias += self.eta * np.mean(hp0 - hp1, 0)

        # error
        return np.mean((v1 - v0)**2)
Ejemplo n.º 3
0
    def calc_grad(self, w_G, xbias_NG, y_N):
        ''' Compute gradient of total loss for training logistic regression.

        Args
        ----
        w_G : 1D array, size G (G = n_features_including_bias)
            Combined vector of weights and bias
        xbias_NG : 2D array, size N x G (n_examples x n_features_including_bias)
            Input features, with last column of all ones
        y_N : 1D array, size N
            Binary labels for each example (either 0 or 1)

        Returns
        -------
        grad_wrt_w_G : 1D array, size G
            Entry g contains derivative of loss with respect to w_G[g]
        '''
        #G = w_G.size
        N = float(y_N.size)
        denom = N * np.log(2)

        # TODO calc gradient of L2 penalty term
        grad_L2_wrt_w_G = self.alpha * w_G
        # TODO calc gradient of log loss term
        p = sigm(np.dot(xbias_NG, w_G))
        p = np.asarray(p).reshape(-1)
        grad_logloss_wrt_w_G = np.dot(y_N - p, xbias_NG)
        grad_logloss_wrt_w_G = np.asarray(grad_logloss_wrt_w_G).reshape(-1)
        #print(grad_L2_wrt_w_G , grad_logloss_wrt_w_G)
        return (grad_L2_wrt_w_G - grad_logloss_wrt_w_G) / denom
Ejemplo n.º 4
0
    def predict_proba(self, x_NF):
        ''' Produce soft probabilistic predictions for provided input features

        Args
        ----
        x_NF : 2D array, size N x F (n_examples x n_features_excluding_bias)
            Input features (one row per example).

        Returns
        -------
        yproba_N2 : 2D array, size N x 2
            First column gives probability of zero label (negative)
            Second column gives probability of one label (positive)
            Each entry is a non-negative probability value within (0.0, 1.0)
            Each row sums to one
        '''
        N = x_NF.shape[0]

        ## TODO write code to do prediction for logistic regression!
        # Hint: Be sure to use a numerically stable logistic_sigmoid function
        wx = self.w_G[:-1]
        b = self.w_G[-1]

        # TODO replace the placeholder code below
        # Which just predicts 100% probability that class is 0
        yproba0_N1 = np.ones((N, 1))  # <-- TODO replace this line
        yproba1_N1 = np.zeros((N, 1))  # <-- TODO replace this line
        yproba1_N1 = sigm(np.dot(x_NF, wx).T + b)
        yproba0_N1 = 1 - yproba1_N1
        yproba_N2 = np.column_stack([yproba0_N1, yproba1_N1])
        return yproba_N2