コード例 #1
0
    def propagate(self, clamps=['input', 'transformation']):
        """Spreads activation through a network"""
        k = self.config.sigmoid_smoothing

        # First propagate forward from input to hidden layer
        h_input = self.x @ self.w_xh

        # Then propagate forward from transformation to hidden layer
        h_input += self.t @ self.w_th

        # Then propagate backward from output to hidden layer
        h_input += self.o @ self.w_ho.T

        # Then propagate backward from ca(t2) to hidden layer
        h_input += self.z @ self.w_hz.T

        # And add biases
        h_input += self.b_h

        # I thought this was wrong to update hidden layer's activations here
        # (rather than at the end of this routine) since it affects the calculations
        # that follow, so the forward and backward passes do not happen simultaneously.
        # But now I believe it is correct. The new activations form the basis of the
        # 'reconstructions' (Restricted Boltzman Machine terminology), the attempt by the
        # network to reconstruct the inputs from the hidden layer.
        self.h = sigmoid(h_input, k)

        # if input is free, propagate from hidden layer to input
        if not 'input' in clamps:
            # Propagate from the hidden layer to the input layer
            x_input = self.h @ self.w_xh.T
            # Add bias
            x_input += self.b_x
            self.x = sigmoid(x_input, k)

        # if transformation is free, propagate from hidden layer to transformation input
        if not 'transformation' in clamps:
            # Propagate from the hidden layer to the transformation layer
            t_input = self.h @ self.w_th.T
            # Add bias
            t_input += self.b_t
            self.t = sigmoid(t_input, k)

        # if output is free, propagate from hidden layer to output
        if not 'output' in clamps:
            # Propagate from the hidden layer to the output layer
            o_input = self.h @ self.w_ho
            # Add bias
            o_input += self.b_o
            self.o = sigmoid(o_input, k)

        # if output transformation is free, propagate from hidden layer to output
        if not 'output_transformation' in clamps:
            # Propagate from the hidden layer to the output transformation layer
            z_input = self.h @ self.w_hz
            # Add bias
            z_input += self.b_z
            self.z = sigmoid(z_input, k)

        # Smolensky propagation described here:
        # http://www.scholarpedia.org/article/Boltzmann_machine#Restricted_Boltzmann_machines
        # repeats the update of the hidden layer
        if self.config.smolensky_propagation:
            # First propagate forward from input to hidden layer
            h_input = self.x @ self.w_xh

            # Then propagate forward from transformation to hidden layer
            h_input += self.t @ self.w_th

            # Then propagate backward from output to hidden layer
            h_input += self.o @ self.w_ho.T

            # Then propagate backward from ca(t2) to hidden layer
            h_input += self.z @ self.w_hz.T

            # And add biases
            h_input += self.b_h

            self.h = sigmoid(h_input, k)
コード例 #2
0
 def output(self):
     return sigmoid(T.dot(self.X, self.W_yh) + self.b_y)
コード例 #3
0
    def propagate_smolensky(self, clamps=['input', 'transformation']):
        """Spreads activation through a network"""
        k = self.config.sigmoid_smoothing

        # First propagate forward from input to hidden layer
        h_input = self.x @ self.w_xh

        # Then propagate forward from transformation to hidden layer
        h_input += self.t @ self.w_th

        # Then propagate backward from output to hidden layer
        h_input += self.o @ self.w_ho.T

        # And add biases
        h_input += self.b_h

        # I thought this was wrong to update hidden layer's activations here
        # (rather than at the end of this routine) since it affects the calculations
        # that follow, so the forward and backward passes do not happen simultaneously.
        # But now I believe it is correct. The new activations form the basis of the
        # 'reconstructions' (Restricted Boltzman Machine terminology), the attempt by the
        # network to reconstruct the inputs from the hidden layer.
        self.h = sigmoid(h_input, k)

        # if input is free, propagate from hidden layer to input
        if not 'input' in clamps:
            # Propagate from the hidden layer to the input layer
            x_input = self.h @ self.w_xh.T
            x_input += self.t @ self.w_xt.T
            x_input += self.o @ self.w_xo.T

            # Add bias
            x_input += self.b_x

        if not 'transformation' in clamps:
            t_input = self.h @ self.w_th.T
            t_input += self.x @ self.w_xt
            t_input += self.o @ self.w_to.T

            # And add biases
            t_input += self.b_t

        # if output is free, propagate from hidden layer to output
        if not 'output' in clamps:
            # Propagate from the hidden layer to the output layer
            o_input = self.h @ self.w_ho
            o_input += self.x @ self.w_xo
            o_input += self.t @ self.w_to

            # Add bias
            o_input += self.b_o

        # if input is free, propagate from hidden layer to input
        if not 'input' in clamps:
            self.x = sigmoid(x_input, k)

        if not 'transformation' in clamps:
            self.t = sigmoid(t_input, k)

        # if output is free, propagate from hidden layer to output
        if not 'output' in clamps:
            self.o = sigmoid(o_input, k)

        # First propagate forward from input to hidden layer
        h_input = self.x @ self.w_xh

        # Then propagate forward from transformation to hidden layer
        h_input += self.t @ self.w_th

        # Then propagate backward from output to hidden layer
        h_input += self.o @ self.w_ho.T

        # And add biases
        h_input += self.b_h

        self.h = sigmoid(h_input, k)