Ejemplo n.º 1
0
    def __init__(self,
                 input,
                 input_shape,
                 batch_size,
                 rng,
                 n_out,
                 activation=T.tanh):
        # initialize with Layer parent
        Layer.__init__(self)

        # standard stuff
        self.rng = rng
        self.input = input
        self.batch_size = batch_size

        # define model
        n_in = input_shape[0]

        self.W = init.init_zero((n_in, n_out))
        self.b = init.init_zero((n_out, ))

        # define output
        self.output = T.nnet.softmax(T.dot(input, self.W) + self.b)
        self.y_pred = T.argmax(self.output, axis=1)
        self.params = [self.W, self.b]
        self.output_shape = (n_out, )
Ejemplo n.º 2
0
    def __init__(self,
                 input,
                 input_shape,
                 batch_size,
                 rng,
                 n_filters,
                 filter=(2, 2),
                 activation=T.tanh,
                 poolsize=(2, 2)):
        # input: the 4D input
        # input_shape: the shape description : (channels, dim_x, dim_y)
        # batch_size: number of batches
        # n-filters = number of filter channels
        # pool_shape: the shape of the downsampling pool, used for initializing
        #               the weights
        # filter:   shape of the filter
        # initialize with Layer parent
        Layer.__init__(self)

        # standard stuff
        self.rng = rng
        self.input = input
        self.batch_size = batch_size

        # set shapes
        if len(input_shape) == 2:
            input_shape = (1, ) + input_shape

        self.input_shape = input_shape
        self.output_shape = (n_filters, input_shape[1] + 1 - filter[0],
                             input_shape[2] + 1 - filter[1])
        self.filter_shape = (n_filters, ) + filter

        # this is used by the conv2d and has to be:
        #   (n_channels, n_channels_in, filter_dimx, filter_dimy)
        conv_filter_shape = (n_filters, input_shape[0]) + filter

        # this is used by conv2d and has to be:
        #   (batch_size, n_channels_in, img_dim_x, img_dim_y)
        conv_image_shape = (batch_size, input_shape[0]) + input_shape[1:]

        # initialize weights with random weights
        self.W = init.init_conv(rng, conv_filter_shape, poolsize)

        # the bias is a 1D tensor -- one bias per output feature map
        self.b = init.init_zero((self.filter_shape[0], ))

        # convolve input feature maps with filters

        conv_out = conv.conv2d(
            input=input,
            filters=self.W,
            filter_shape=conv_filter_shape,
            image_shape=conv_image_shape)  # !!! OPTIMIZATable

        self.output = activation(conv_out +
                                 self.b.dimshuffle('x', 0, 'x', 'x'))

        # store parameters of this layer
        self.params = [self.W, self.b]
Ejemplo n.º 3
0
    def __init__(self,
                 input,
                 input_shape,
                 batch_size,
                 rng,
                 n_out,
                 activation=T.tanh):
        # initialize with Layer parent
        Layer.__init__(self)

        # standard stuff
        self.rng = rng
        self.input = input
        self.batch_size = batch_size

        # set shapes
        self.input_shape = input_shape
        self.output_shape = (n_out, )

        # define the model parameters
        n_in = np.prod(input_shape)
        W = init.init_standard(rng, (n_in, n_out))
        if activation == theano.tensor.nnet.sigmoid:
            W.set_value(W.get_value() * 4)
        b = init.init_zero((n_out, ))
        self.W = W
        self.b = b

        #define output
        lin_output = T.dot(input, self.W) + self.b
        self.output = activation(lin_output)

        # define params
        self.params = [self.W, self.b]
Ejemplo n.º 4
0
    def __init__(self,  input, input_shape, batch_size, rng, n_out, activation=T.tanh, dim=10):
        # initialize with Layer parent
        Layer.__init__(self)
        
        # standard stuff
        self.rng = rng
        self.input = input
        self.batch_size = batch_size
        
        # set shapes
        self.input_shape = input_shape
        self.output_shape = (n_out,)
 
        # define the model parameters
        n_in = np.prod(input_shape)
        self.W_l = init.init_standard(rng, (n_in, dim))
        self.W_r = init.init_standard(rng, (dim, n_out))
        if activation == theano.tensor.nnet.sigmoid:
            W_l.set_value(W_l.get_value()*4)
            W_r.set_value(W_r.get_value()*4)
        b = init.init_zero((n_out,))
        self.W = T.dot(self.W_l, self.W_r)
        self.b = b

        #define output        
        lin_output = T.dot(input, self.W) + self.b
        self.output = activation(lin_output)
        
        # define params
        self.params = [self.W_l, self.W_r, self.b]
Ejemplo n.º 5
0
    def __init__(self, input, input_shape, batch_size, rng, n_out, activation=T.tanh):
        # initialize with Layer parent
        Layer.__init__(self)
        
        # standard stuff
        self.rng = rng
        self.input = input
        self.batch_size = batch_size
        
        # define model
        n_in = input_shape[0]
        
        self.W = init.init_zero((n_in,n_out))
        self.b = init.init_zero((n_out,))

        # define output
        self.output = T.nnet.softmax(T.dot(input, self.W) + self.b)
        self.y_pred = T.argmax(self.output, axis=1)
        self.params = [self.W, self.b]
        self.output_shape = (n_out, )
Ejemplo n.º 6
0
    def __init__(self, input, input_shape, batch_size, rng, n_filters, filter=(2,2), activation=T.tanh, poolsize=(2,2)):
        # input: the 4D input 
        # input_shape: the shape description : (channels, dim_x, dim_y)
        # batch_size: number of batches
        # n-filters = number of filter channels
        # pool_shape: the shape of the downsampling pool, used for initializing
        #               the weights
        # filter:   shape of the filter
        # initialize with Layer parent
        Layer.__init__(self)

        # standard stuff
        self.rng = rng
        self.input = input
        self.batch_size = batch_size

        # set shapes
        if len(input_shape)==2:
            input_shape = (1,) + input_shape
        
        self.input_shape = input_shape
        self.output_shape = (n_filters,input_shape[1]+1-filter[0],input_shape[2]+1-filter[1] )
        self.filter_shape = (n_filters,) + filter
        
        # this is used by the conv2d and has to be:
        #   (n_channels, n_channels_in, filter_dimx, filter_dimy)
        conv_filter_shape = (n_filters, input_shape[0]) + filter
        
        # this is used by conv2d and has to be:
        #   (batch_size, n_channels_in, img_dim_x, img_dim_y)
        conv_image_shape = (batch_size, input_shape[0]) + input_shape[1:]
        
        # initialize weights with random weights
        self.W = init.init_conv(rng, conv_filter_shape, poolsize)

        # the bias is a 1D tensor -- one bias per output feature map        
        self.b = init.init_zero((self.filter_shape[0],))

        # convolve input feature maps with filters
        
        conv_out = conv.conv2d(input=input, filters=self.W,
                filter_shape=conv_filter_shape, 
                image_shape=conv_image_shape) # !!! OPTIMIZATable


        self.output = activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))

        # store parameters of this layer
        self.params = [self.W, self.b]
Ejemplo n.º 7
0
    def __init__(self, input, input_shape, batch_size, rng, prob=0.5):
        # initialize layer parent
        Layer.__init__(self)
        self.options.append('randomize-reset')

        # set input
        self.input = input
        self.input_shape = input_shape
        self.output_shape = input_shape
        self.rng = rng
        self.prob = prob

        self.mask = init.init_zero(self.output_shape)
        self.reset()

        self.output = self.input * self.mask
        #self.output = self.mask
        self.params = []
Ejemplo n.º 8
0
    def __init__(self, input, input_shape, batch_size, rng, scale=0.1):
        # initialize layer parent
        Layer.__init__(self)
        self.options.append('randomize-reset')

        # set input
        self.input = input
        self.input_shape = input_shape
        self.output_shape = input_shape
        self.rng = rng
        self.scale = scale

        self.noise = init.init_zero(self.output_shape)
        self.reset()

        self.output = self.input + self.noise
        #self.output = self.noise
        self.params = []
Ejemplo n.º 9
0
    def __init__(self, input, input_shape, batch_size, rng, scale=0.1):
        # initialize layer parent
        Layer.__init__(self)
        self.options.append('randomize-reset')        
        
        # set input
        self.input = input
        self.input_shape = input_shape
        self.output_shape = input_shape
        self.rng = rng
        self.scale = scale

        self.noise = init.init_zero(self.output_shape)
        self.reset()
            
        self.output = self.input + self.noise
        #self.output = self.noise
        self.params = []
Ejemplo n.º 10
0
    def __init__(self, input, input_shape, batch_size, rng, prob=0.5):
        # initialize layer parent
        Layer.__init__(self)
        self.options.append('randomize-reset')        
        
        # set input
        self.input = input
        self.input_shape = input_shape
        self.output_shape = input_shape
        self.rng = rng
        self.prob = prob

        self.mask = init.init_zero(self.output_shape)
        self.reset()
            
        self.output = self.input * self.mask
        #self.output = self.mask
        self.params = []