Exemplo n.º 1
0
def finetune_sa(train_data, train_masks, numbatches, n_epochs, pretrainedSA,
                **args):
    '''
        Fine tunes stacked autoencoder
    '''
    finetunedSA = pretrainedSA

    traindim = train_data.shape
    batch_size = traindim[0] / numbatches

    index = T.lscalar()

    train_data = theano.shared(train_data)
    train_masks = theano.shared(train_masks)

    cost, updates = finetunedSA.get_cost_updates(**args)

    train_model = theano.function(
        inputs=[index],
        outputs=cost,
        updates=updates,
        givens={
            finetunedSA.X:
            train_data[index * batch_size:(index + 1) * batch_size],
            finetunedSA.Y:
            train_masks[index * batch_size:(index + 1) * batch_size]
        })

    HL.iterate_epochs(n_epochs, numbatches, train_model, finetunedSA)

    return finetunedSA
Exemplo n.º 2
0
def pretrain_sa(train_data, train_masks, numbatches, n_epochs, model_class,
                **args):
    '''_
        Pretrains stacked autoencoder
    '''

    X = T.matrix('X')
    Y = T.matrix('Y')
    index = T.lscalar('index')

    traindim = train_data.shape
    batch_size = traindim[0] / numbatches

    train_data = theano.shared(train_data)
    train_masks = theano.shared(train_masks)

    rng = np.random.RandomState(123)
    theano_rng = RandomStreams(rng.randint(2**30))

    model_object = model_class(inputs=X,
                               masks=Y,
                               numpy_rng=rng,
                               theano_rng=theano_rng,
                               n_ins=4096,
                               hidden_layers_sizes=[200, 100, 100, 200],
                               n_outs=4096)

    for autoE in model_object.AutoEncoder_layers:
        # get the cost and the updates list
        cost, updates = autoE.get_cost_updates(**args)
        # compile the theano function
        train_model = theano.function(
            inputs=[index],
            outputs=cost,
            updates=updates,
            givens={
                X: train_data[index * batch_size:(index + 1) * batch_size]
            })
        HL.iterate_epochs(n_epochs, numbatches, train_model, autoE)

    logReg = model_object.logLayer
    logcost, logupdates = logReg.get_cost_updates(**args)
    train_model = theano.function(
        inputs=[index],
        outputs=logcost,
        updates=logupdates,
        givens={
            X: train_data[index * batch_size:(index + 1) * batch_size],
            Y: train_masks[index * batch_size:(index + 1) * batch_size]
        })

    HL.iterate_epochs(n_epochs, numbatches, train_model, logReg)

    return model_object
Exemplo n.º 3
0
def train_logreg(train_data, train_masks, numbatches,
                 n_epochs, model_class, **args):

    """
    trains auto-encoder model for initialising weights for the CNN layer of the model, taking as input
    random mini batches from the training images.
    :param training data
    :param n_epochs: number of training iterations
    :param model_class: class of model to train
    :param **args: any named inputs required by the cost function


    RETURNS: final array of weights from trained model
    """

    traindim = train_data.shape
    batch_size = traindim[0]/numbatches

    X = T.matrix('X')
    Y = T.matrix('Y')
    index = T.lscalar()

    train_data = theano.shared(train_data)
    train_masks = theano.shared(train_masks)


    model_object = model_class(
            input=X,
            masks=Y,
            n_in=100,
            n_out=1024)

    cost, updates = model_object.get_cost_updates(**args)

    train_model = theano.function(inputs=[index], outputs=cost, updates=updates,
                                  givens={X: train_data[index * batch_size:(index + 1) * batch_size],
                                          Y: train_masks[index * batch_size:(index + 1) * batch_size]})

    # go through training epochs
    HL.iterate_epochs(n_epochs, numbatches, train_model, model_class)

    weights = model_object.W.get_value()
    bias = model_object.b.get_value

    return weights, bias
Exemplo n.º 4
0
def train_ac(train_data, numbatches, n_epochs, model_class, **args):
    """
    trains auto-encoder model for initialising weights for the CNN layer of the model, taking as input
    random mini batches from the training images.
    :param training data
    :param n_epochs: number of training iterations
    :param model_class: class of model to train
    :param **args: any named inputs required by the cost function


    RETURNS: final array of weights from trained model
    """

    traindim = train_data.shape
    batch_size = traindim[0] / numbatches

    X = T.matrix('X')
    index = T.lscalar()

    train_data = theano.shared(train_data)
    rng = np.random.RandomState(123)
    theano_rng = RandomStreams(rng.randint(2**30))

    model_object = model_class(numpy_rng=rng,
                               theano_rng=theano_rng,
                               input=X,
                               n_visible=121,
                               n_hidden=100)

    cost, updates = model_object.get_cost_updates(**args)

    train_model = theano.function(
        inputs=[index],
        outputs=cost,
        updates=updates,
        givens={X: train_data[index * batch_size:(index + 1) * batch_size]})

    # go through training epochs
    HL.iterate_epochs(n_epochs, numbatches, train_model, model_class)

    W_hid = model_object.Whid.get_value()
    b_hid = model_object.bhid.get_value()

    return W_hid, b_hid
Exemplo n.º 5
0
def train_ac(train_data, numbatches, n_epochs, model_class, **args):

    """
    trains auto-encoder model for initialising weights for the CNN layer of the model, taking as input
    random mini batches from the training images.
    :param training data
    :param n_epochs: number of training iterations
    :param model_class: class of model to train
    :param **args: any named inputs required by the cost function


    RETURNS: final array of weights from trained model
    """

    traindim = train_data.shape
    batch_size = traindim[0]/numbatches

    X = T.matrix('X')
    index = T.lscalar()

    train_data = theano.shared(train_data)
    rng = np.random.RandomState(123)
    theano_rng = RandomStreams(rng.randint(2 ** 30))

    model_object = model_class(numpy_rng=rng,
            theano_rng=theano_rng,
            input=X,
            n_visible=121,
            n_hidden=100)

    cost, updates = model_object.get_cost_updates(**args)

    train_model = theano.function(inputs=[index], outputs=cost, updates=updates,
                                   givens={X: train_data[index * batch_size:(index + 1) * batch_size]})

    # go through training epochs
    HL.iterate_epochs(n_epochs, numbatches, train_model, model_class)

    W_hid = model_object.Whid.get_value()
    b_hid = model_object.bhid.get_value()

    return W_hid, b_hid