Ejemplo n.º 1
0
def train_spectra(nu_max, modes, nhidden, learn_rate, W=0, bhid=0, bvis=0):
    import pandas as pd
    import dA_class
    from dA_class import dA
    import numpy as np
    from lasagne.updates import apply_momentum

    pathname = "/home/rakesh/Data/Spec_numax_%d_modes_%d.csv" % (nu_max, modes)
    df = pd.read_csv(pathname)
    sig = np.asarray((df.l0 + df.l1 + df.l2) * df.GaussProf)
    sig /= np.max(sig)
    sig_noise = np.asarray((df.l0 + df.l1 + df.l2) * df.GaussProf + df.noise)
    sig_noise /= np.max(sig)
    randnum = np.random.RandomState(123)
    if W is 0 and bhid is 0 and bvis is 0:
        enc = dA(numpy_rng=randnum, input=sig, n_visible=sig.size, n_hidden=nhidden)
    else:
        enc = dA(numpy_rng=randnum, input=sig, n_visible=sig.size, n_hidden=nhidden, W=W, bhid=bhid, bvis=bvis)

    [cost, update] = enc.get_cost_updates(sig_noise, learning_rate=learn_rate)

    return (cost, update)
Ejemplo n.º 2
0
    def __init__(
        self,
	input,
        numpy_rng,
        theano_rng=None,
        n_ins=784,
        hidden_layers_sizes=[500, 500],
        n_outs=10,
	W=None,
	bhid=None,
	bvis=None,
    ):
        """ This class is made to support a variable number of layers.

        :type numpy_rng: numpy.random.RandomState
        :param numpy_rng: numpy random number generator used to draw initial
                    weights

        :type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
        :param theano_rng: Theano random generator; if None is given one is
                           generated based on a seed drawn from `rng`

        :type n_ins: int
        :param n_ins: dimension of the input to the sdA

        :type n_layers_sizes: list of ints
        :param n_layers_sizes: intermediate layers size, must contain
                               at least one value

        :type n_outs: int
        :param n_outs: dimension of the output of the network

"""

        self.sigmoid_layers = []
        self.dA_layers = []
        self.params = []
        self.n_layers = len(hidden_layers_sizes)
	self.x=input
	self.bvis=bvis

        assert self.n_layers > 0

	theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
        for i in xrange(self.n_layers):
            # construct the sigmoidal layer

            # the size of the input is either the number of hidden units of
            # the layer below or the input size if we are on the first layer
            if i == 0:
                input_size = n_ins
            else:
                input_size = hidden_layers_sizes[i - 1]

            # the input to this layer is either the activation of the hidden
            # layer below or the input of the SdA if you are on the first
            # layer
            if i == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[-1].output
	
            if W is None and bvis is None and bhid is None:
		    sigmoid_layer = HiddenLayer(rng=numpy_rng,
						input=input,
						n_in=input.size,
						n_out=hidden_layers_sizes[i],
						activation=T.nnet.sigmoid)
	    else:
		    sigmoid_layer = HiddenLayer(rng=numpy_rng,
						input=input,
						n_in=input.size,
						n_out=hidden_layers_sizes[i],
						W=W[i],
						b=bhid[i],
						activation=T.nnet.sigmoid)
		    
            # add the layer to our list of layers
            self.sigmoid_layers.append(sigmoid_layer)
            # we are going to only declare that the parameters of the
            # sigmoid_layers are parameters of the StackedDAA
            # the visible biases in the dA are parameters of those
            # dA, but not the SdA
            self.params.extend(sigmoid_layer.params)

            # Construct a denoising autoencoder that shared weights with this
            # layer
	    if bvis is None:
		    dA_layer = dA(numpy_rng=numpy_rng,
				  input=input,
				  n_visible=input.size,
				  n_hidden=hidden_layers_sizes[i],
				  W=sigmoid_layer.W,
				  bhid=sigmoid_layer.b,
				  bvis=None)
	    else:
		    dA_layer = dA(numpy_rng=numpy_rng,
				  input=input,
				  n_visible=input.size,
				  n_hidden=hidden_layers_sizes[i],
				  W=sigmoid_layer.W,
				  bhid=sigmoid_layer.b,
				  bvis=self.bvis[i])

            self.dA_layers.append(dA_layer)
Ejemplo n.º 3
0
def test_dA(learning_rate, training_epochs,
            sig,sig_noise,chunks,batch_size=20,n_ins=441,n_hidden=1000):

    """
    This demo is tested on MNIST

    :type learning_rate: float
    :param learning_rate: learning rate used for training the DeNosing
                          AutoEncoder

    :type training_epochs: int
    :param training_epochs: number of epochs used for training

    :type dataset: string
    :param dataset: path to the picked dataset

    """
   # datasets = load_data(training_dataset,validation_dataset)

    pulsations= sig
    observations=sig_noise

    # compute number of minibatches for training, validation and testing
    n_train_batches = pulsations.get_value(borrow=True).shape[0] / batch_size
    print("Size of batch is %d"%batch_size)
    # start-snippet-2
    # allocate symbolic variables for the data
    index = T.lscalar()    # index to a [mini]batch
    x = T.matrix('pulsations')  # the data is presented as rasterized images
    y = T.matrix('Observations')  # Noisy 
    # end-snippet-2

    ####################################
    # BUILDING THE MODEL NO CORRUPTION #
    ####################################

    rng = numpy.random.RandomState(123)

    da = dA(
        numpy_rng=rng,
        input=x,
        n_visible=n_ins,
        n_hidden=n_hidden
    )
    cost, updates = da.get_cost_updates(corrupted_input=y,learning_rate=learning_rate)
    train_da = function(
        inputs=[index],
      	outputs=[cost],
        updates=updates,
        givens={
                y: observations[index * batch_size: (index + 1) * batch_size,:],
                x: pulsations[index * batch_size: (index + 1) * batch_size,:]
        }
    )

    start_time = timeit.default_timer()

    ############
    # TRAINING #
    ############
    n_train_batches=chunks

    # go through training epochs
    cos=[]
    for epoch in xrange(training_epochs):
        # go through trainng set
	st_epoch=timeit.default_timer()
        c = []
        for batch_index in xrange(int(n_train_batches/batch_size)):
           # st_time=timeit.default_timer()
             c.append(train_da(batch_index))
            #end_time=timeit.default_timer()
            #print("training for batch %d/%d takes %3.3f s"%(batch_index,n_train_batches/batch_size,(end_time-st_time)))
	 
	cos.append(numpy.mean([item for item in c]))
	en_epoch=timeit.default_timer()

        print 'Training epoch %d in %3.3f s' %(epoch,(en_epoch-st_epoch))

    end_time = timeit.default_timer()

    training_time = (end_time - start_time)

    print >> sys.stderr, ('The no corruption code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %3.2f s' % ((training_time)))

    return (cos,updates)