Ejemplo n.º 1
0
    transform = theano.function([minibatch], cae(minibatch))

    print "Transformed data:"
    print numpy.histogram(transform(data))

    # We'll now create a stacked denoising autoencoder. First, we change
    # the number of hidden units to be a list. This tells the build_stacked_AE
    # method how many layers to make.
    stack_conf = conf.copy()
    stack_conf['nhids'] = [20, 20, 10]
    #choose which layer is a regular da and which one is a cae
    stack_conf['contracting']=[True,False,True]
    stack_conf['anneal_start'] = None # Don't anneal these learning rates
    scae = build_stacked_ae(nvis=stack_conf['nvis'],
                            nhids=stack_conf['nhids'],
                            act_enc=stack_conf['act_enc'],
                            act_dec=stack_conf['act_dec'],
                            contracting=stack_conf['contracting'])

    # To pretrain it, we'll use a different SGDOptimizer for each layer.
    optimizers = []
    thislayer_input = [minibatch]
    for layer in scae.layers():
        cost = SquaredError(layer)(thislayer_input[0],
                layer.reconstruct(thislayer_input[0])
                ).mean()
        if isinstance(layer,ContractiveAutoencoder):
            cost+=layer.contraction_penalty(thislayer_input[0]).mean()
        opt = SGDOptimizer( layer.params(),
                            stack_conf['base_lr'],
                            stack_conf['anneal_start']
Ejemplo n.º 2
0
    # Add to cost function a regularization term for each layer :
    # - Layer1 : l1_penalty with sparse_penalty = 0.01
    # - Layer2 : sqr_penalty with sparsity_target = 0.2
    #            and sparsity_target_penalty = 0.01
    # - Layer3 : l1_penalty with sparse_penalty = 0.1
    
    sda_conf['solution'] = ['l1_penalty','sqr_penalty','l1_penalty']
    sda_conf['sparse_penalty'] = [0.02, 0, 0.1]
    sda_conf['sparsity_target'] = [0, 0.3, 0]
    sda_conf['sparsity_target_penalty'] = [0, 0.001, 0]             
    
    sda_conf['anneal_start'] = None # Don't anneal these learning rates
    sda = build_stacked_ae(sda_conf['nvis'], sda_conf['nhid'],
                           sda_conf['act_enc'], sda_conf['act_dec'],
                           corruptor=corruptor, contracting=False,
                           solution=sda_conf['solution'],
                           sparse_penalty=sda_conf['sparse_penalty'],
                           sparsity_target=sda_conf['sparsity_target'],
                           sparsity_target_penalty=sda_conf['sparsity_target_penalty'])

    # To pretrain it, we'll use a different SGDOptimizer for each layer.
    optimizers = []
    thislayer_input = [minibatch]
    for layer in sda.layers():
      
        cost = SquaredError(layer)(thislayer_input[0],
                layer.reconstruct(thislayer_input[0])).mean()
        opt = SGDOptimizer(layer.params(), sda_conf['base_lr'],
                           sda_conf['anneal_start'])
        optimizers.append((opt, cost))
        # Retrieve a Theano function for training this layer.
Ejemplo n.º 3
0
    transform = theano.function([minibatch], cae(minibatch))

    print "Transformed data:"
    print numpy.histogram(transform(data))

    # We'll now create a stacked denoising autoencoder. First, we change
    # the number of hidden units to be a list. This tells the build_stacked_AE
    # method how many layers to make.
    stack_conf = conf.copy()
    stack_conf['nhids'] = [20, 20, 10]
    #choose which layer is a regular da and which one is a cae
    stack_conf['contracting'] = [True, False, True]
    stack_conf['anneal_start'] = None  # Don't anneal these learning rates
    scae = build_stacked_ae(nvis=stack_conf['nvis'],
                            nhids=stack_conf['nhids'],
                            act_enc=stack_conf['act_enc'],
                            act_dec=stack_conf['act_dec'],
                            contracting=stack_conf['contracting'])

    # To pretrain it, we'll use a different SGDOptimizer for each layer.
    optimizers = []
    thislayer_input = [minibatch]
    for layer in scae.layers():
        cost = SquaredError(layer)(thislayer_input[0],
                                   layer.reconstruct(
                                       thislayer_input[0])).mean()
        if isinstance(layer, ContractiveAutoencoder):
            cost += layer.contraction_penalty(thislayer_input[0]).mean()
        opt = SGDOptimizer(layer.params(), stack_conf['base_lr'],
                           stack_conf['anneal_start'])
        optimizers.append((opt, cost))
Ejemplo n.º 4
0
    # - Layer2 : sqr_penalty with sparsity_target = 0.2
    #            and sparsity_target_penalty = 0.01
    # - Layer3 : l1_penalty with sparse_penalty = 0.1

    sda_conf['solution'] = ['l1_penalty', 'sqr_penalty', 'l1_penalty']
    sda_conf['sparse_penalty'] = [0.02, 0, 0.1]
    sda_conf['sparsity_target'] = [0, 0.3, 0]
    sda_conf['sparsity_target_penalty'] = [0, 0.001, 0]

    sda_conf['anneal_start'] = None  # Don't anneal these learning rates
    sda = build_stacked_ae(
        sda_conf['nvis'],
        sda_conf['nhid'],
        sda_conf['act_enc'],
        sda_conf['act_dec'],
        corruptor=corruptor,
        contracting=False,
        solution=sda_conf['solution'],
        sparse_penalty=sda_conf['sparse_penalty'],
        sparsity_target=sda_conf['sparsity_target'],
        sparsity_target_penalty=sda_conf['sparsity_target_penalty'])

    # To pretrain it, we'll use a different SGDOptimizer for each layer.
    optimizers = []
    thislayer_input = [minibatch]
    for layer in sda.layers():

        cost = SquaredError(layer)(thislayer_input[0],
                                   layer.reconstruct(
                                       thislayer_input[0])).mean()
        opt = SGDOptimizer(layer.params(), sda_conf['base_lr'],