Пример #1
0
    def _initialize_theta(self):
        filter_shape = self.filter_shape
        image_shape = self.image_shape
        poolsize = self.poolsize

        conv_in = np.prod(filter_shape[1:])
        conv_out = filter_shape[0] * np.prod(filter_shape[2:])
        pool_out = conv_out / poolsize**2

        conv_map_size = image_shape[-1] - filter_shape[-1] + 1
        assert conv_map_size > 0
        pool_map_size = int(conv_map_size / poolsize)
        assert pool_map_size > 0

        self.conv_w = shared(
            nn_random_paramters(conv_in, conv_out, shape=filter_shape))
        self.conv_b = shared(
            nn_random_paramters(conv_in, conv_out,
                                shape=(filter_shape[0], 1, 1)))
        self.pool_w = shared(
            nn_random_paramters(conv_out, pool_out,
                                shape=(filter_shape[0], 1, 1)))
        self.pool_b = shared(
            nn_random_paramters(conv_out, pool_out,
                                shape=(filter_shape[0], 1, 1)))
        self.output_shape = (image_shape[0], filter_shape[0],
                             pool_map_size, pool_map_size)

        return [self.conv_w, self.conv_b, self.pool_w, self.pool_b]
Пример #2
0
    def __init__(self, features, values):

        assert isinstance(features, np.ndarray) and features.ndim >= 1
        assert isinstance(values, np.ndarray)
        assert features.shape[0] == values.shape[0]

        self.size = values.shape[0]
        origin_X = shared(features)
        origin_y = shared(values.flatten())
        self.origin = (origin_X, origin_y)
        self.X = T.cast(origin_X, features.dtype.name)
        self.y = T.cast(origin_y, values.dtype.name)
Пример #3
0
    def __init__(self, input_size, hidden_layers_size, output_size,
                 output_layer=LogisticRegression, lamda=0):
        """
        Parameters:
        ------
        inpute_size: int
            The number of input layer units.

        hidden_layers_size: list
            A list of numbers of units in each hidden layer.

        output_size: int
            The number of output layer units.

        output_layer: object

        lamda: float
            Parameter used for regularization. Set 0 to disable regularize.
        """
        assert isinstance(hidden_layers_size, list) and \
            len(hidden_layers_size) > 0

        self.lamda = shared(lamda, name='lamda')

        self.layers_size = [input_size, output_size]
        self.layers_size[1:-1] = hidden_layers_size
        self.hidden_layers = [
            HiddenLayer(self.layers_size[i-1], self.layers_size[i],
                        self.lamda, activation=tanh)
            for i in range(1, len(self.layers_size)-1)
        ]
        self.output_layer = output_layer(
            self.layers_size[-2], self.layers_size[-1], self.lamda.get_value())
        self.layers = self.hidden_layers + [self.output_layer]
Пример #4
0
    def __init__(self, input_size, conv_layers, hidden_layers, output_size,
                 output_layer=LogisticRegression, conv_filter_size=5,
                 conv_pool_size=2, lamda=0):
        """
        Parameters:
        ------
        inpute_size: int
            The width of input image.(assume that input image is square)

        conv_layers: list
            A list of numbers of feature maps in each convlution layer.

        hidden_layers: list
            A list of numbers of units in each hidden layer
            of fully connected mlp.

        output_size: int
            The number of output layer units.

        output_layer: object

        conv_filter_size: int
            The convlution factor.

        conv_pool_size: int
            The downsampling (pooling) factor.

        lamda: float
            Parameter used for regularization. Set 0 to disable regularize.
        """
        self.lamda = shared(lamda, name='lamda')
        image_shape = (None, 1, input_size, input_size)
        self.conv_layers = []
        for c in conv_layers:
            filter_shape = (
                c, image_shape[1], conv_filter_size, conv_filter_size
            )
            cp = LeNetConvPoolLayer(
                filter_shape, image_shape, lamda, conv_pool_size)
            image_shape = cp.output_shape
            self.conv_layers.append(cp)

        self.mlp = MLP(np.prod(image_shape[1:]), hidden_layers, output_size,
                       output_layer=output_layer, lamda=self.lamda.get_value())
        self.layers = self.conv_layers + [self.mlp]
Пример #5
0
    def __init__(self, n_in, n_out, lamda):
        """
        n_in: int
            Number of input units, the dimension of the space
            in which the input lie.

        n_out: int
            Number of output units, the dimension of the space
            in which the output lie.

        lamda: theano like
            Parameter used for regularization. Set 0 to disable regularize.
        """
        self.n_in = n_in
        self.n_out = n_out
        self.lamda = lamda

        self._theta = shared(self._initialize_theta())
        self.w = self._theta[1:, :]
        self.b = self._theta[0, :]
Пример #6
0
 def __init__(self, features, labels, lamda=0):
     lamda = shared(lamda, name='lamda')
     super(LogisticRegression, self).__init__(features, labels, lamda)