def __init__(self, rng, x_data, batch_size, image_shape, filter_shape, 
                 pool_size):
        """
        Build the model

        type: numpy.random.RandomState
        parameter: rng: A random number generator from which the weights will be
                        initialised.
        
        type: theano.tensor
        parameter: x_data: The input data to learn from

        type: 4-tuple or 4 element list
        parameter: image_shape: (batch size, number of feature maps, image
                                height, image width)

        type: 4-tuple or 4 element list
        parameter: filter_shape: (number of filters, number of feature
                                  maps, filter height, filter width)

        type: 2-tuple or 2 element list
        parameter: pool_size: (maxpool height, maxpool width)
        """
        self.image_shape = image_shape
        self.layer_0_input = x_data.reshape(image_shape)
        self.layer_0 = ConvPoolLayer(rng=rng, 
                                     x_data=self.layer_0_input,
                                     image_shape=image_shape, 
                                     filter_shape=filter_shape, 
                                     pool_size=pool_size)
        self.regression_input = self.layer_0.output.flatten(2)
        output_size = calculate_output_size(image_shape, 
                                            filter_shape,
                                            pool_size)
        output_length = calculate_output_length(image_shape[2],
                                                filter_shape[2],
                                                pool_size[0])
        self.regression = TheanoLeastSquaresRegression(self.regression_input.T, 
                                                      output_size *
                                                      filter_shape[0],
                                                      output_length)
        self.l1 = abs(self.layer_0.W).sum() + abs(self.regression.theta).sum()
        self.l2 = (self.layer_0.W ** 2).sum() + (self.regression.theta ** 2).sum()
        self.params = self.layer_0.params + self.regression.params
class SingleLayerConvNN(object):
    """
    A single layered convolution neural attempting to recreate the Single Layer
    Model from [Gray10] http://www.dbs.ifi.lmu.de/~yu_k/dgray_eccv2010final.pdf
    """
    def __init__(self, rng, x_data, batch_size, image_shape, filter_shape, 
                 pool_size):
        """
        Build the model

        type: numpy.random.RandomState
        parameter: rng: A random number generator from which the weights will be
                        initialised.
        
        type: theano.tensor
        parameter: x_data: The input data to learn from

        type: 4-tuple or 4 element list
        parameter: image_shape: (batch size, number of feature maps, image
                                height, image width)

        type: 4-tuple or 4 element list
        parameter: filter_shape: (number of filters, number of feature
                                  maps, filter height, filter width)

        type: 2-tuple or 2 element list
        parameter: pool_size: (maxpool height, maxpool width)
        """
        self.image_shape = image_shape
        self.layer_0_input = x_data.reshape(image_shape)
        self.layer_0 = ConvPoolLayer(rng=rng, 
                                     x_data=self.layer_0_input,
                                     image_shape=image_shape, 
                                     filter_shape=filter_shape, 
                                     pool_size=pool_size)
        self.regression_input = self.layer_0.output.flatten(2)
        output_size = calculate_output_size(image_shape, 
                                            filter_shape,
                                            pool_size)
        output_length = calculate_output_length(image_shape[2],
                                                filter_shape[2],
                                                pool_size[0])
        self.regression = TheanoLeastSquaresRegression(self.regression_input.T, 
                                                      output_size *
                                                      filter_shape[0],
                                                      output_length)
        self.l1 = abs(self.layer_0.W).sum() + abs(self.regression.theta).sum()
        self.l2 = (self.layer_0.W ** 2).sum() + (self.regression.theta ** 2).sum()
        self.params = self.layer_0.params + self.regression.params

    def to_xml(self, document, parent):
        net = document.createElement('net')
        net.setAttribute('name', 'single layer')
        net.setAttribute('creator', 'eldog')
        parent.appendChild(net)

        src_plane = document.createElement('plane')
        src_plane.setAttribute('id', 'src')
        src_plane.setAttribute('type', 'source')
        src_plane.setAttribute('featuremapsize', '%dx%d' 
                               % (self.image_shape[2], self.image_shape[3]))
        net.appendChild(src_plane)

        self.layer_0.to_xml(document, net)
        self.regression.to_xml(document, net)
def eigface_sgd(data_file_name, n_eigs=100, learning_rate=0.000000000000000001, 
                reg_lambda=0.1, display=False):
    train_data, test_data, image_names = old_load_images(data_file_name)
    eig_face = EigenFace.from_file(train_data[0], data_file_name, n_eigs)
    train_data[0] = eig_face.project_to_face_space(train_data[0])
    test_data[0] = eig_face.project_to_face_space(test_data[0])
    train_data[0] = get_face_space(data_file_name, 'train_x', train_data[0],
                                   eig_face)
    test_data[0] = get_face_space(data_file_name, 'test_x', test_data[0],
                                  eig_face)

    n_features, n_training_examples = train_data[0].shape
    #n_features += 1 # we're going to add the ones on
    n_test_examples = test_data[0].shape[1]
    #train_data[0] = prepend_ones(train_data[0])
    #test_data[0] = prepend_ones(test_data[0])

    train_data = to_theano_shared(train_data)
    test_data = to_theano_shared(test_data)

    x_train, y_train = train_data
    x_test, y_test = test_data

    x = T.matrix('x')
    y = T.vector('y')

    tlsr = TheanoLeastSquaresRegression(x, n_features, n_training_examples,
                                        reg_lambda=reg_lambda)
    cost = tlsr.cost(y)
    test_model = theano.function([], outputs=cost, givens={x:x_test[:],
        y:y_test[:]})
    
    g_theta = T.grad(cost, tlsr.theta)
    g_bias = T.grad(cost, tlsr.bias)
    updates = {
                tlsr.theta : tlsr.theta - learning_rate * g_theta,
                tlsr.bias : tlsr.bias - learning_rate * g_bias
              }
    train_model = theano.function([], outputs=cost, updates=updates,
            givens={x:x_train[:], y:y_train[:]})

    current_cost = train_model()
    logging.info('initial cost %f' % current_cost)
    old_cost = 0
    iterations = 0
    logging.info('beginning stochastic gradient descent')
    while ((abs(current_cost- old_cost)) > 0.000001):
        old_cost = current_cost
        current_cost = train_model()
        if iterations % 1000 == 0:
            logging.info('iteration % 9d cost % 9f' % (iterations, current_cost))
        iterations += 1

    error = test_model()
    theta = tlsr.theta.get_value()
    bias = tlsr.theta.get_value()

    # Print the results
    logging.info('training cost minimised: %f' % current_cost)
    logging.info('test error: %f' % error)

    # Save our weights should we ever need them again
    theta_file_name = '%s.pickle' % append_timestamp_to_file_name('weights')
    logging.info('writing weights to %s' % theta_file_name)
    save_pickle((theta, bias), theta_file_name)
    y = y_test.get_value().tolist()
    y = map(float, y)
    plot_correlation(x_test.get_value(), y, image_names,
    'linear regression', 'linear-regression')