Example #1
0
    def _gan(self,
             dataset=None,
             params=None,
             optimizer_params=None,
             cook=True,
             root='.',
             verbose=1):
        """
        This function is a demo example of a generative adversarial network. 
        This is an example code. You should study this code rather than merely run it.  

        Args: 
            dataset: Supply a dataset.    
            root: location to save down stuff. 
            params: Initialize network with parameters. 
            cook: <True> If False, won't cook.         
            increment: which number of GAN to  
            verbose: Similar to the rest of the dataset.

        Returns:
            net: A Network object.

        Notes:
            This is not setup properly therefore does not learn at the moment. This network here mimics
            Ian Goodfellow's original code and implementation for MNIST adapted from his source code:
            https://github.com/goodfeli/adversarial/blob/master/mnist.yaml .It might not be a perfect 
            replicaiton, but I tried as best as I could.
        """
        if dataset is None:
            dataset = self.dataset[-1]
        else:
            self.dataset.append(dataset)

        if verbose >= 2:
            print(".. Creating the initial GAN network")

        input_params = None

        if optimizer_params is None:
            optimizer_params = {
                "momentum_type": 'false',
                "momentum_params": (0.55, 0.9, 20),
                "regularization": (0.00001, 0.00001),
                "optimizer_type": 'adam',
                "id": "main"
            }

        dataset_params = {"dataset": dataset, "type": 'xy', "id": 'data'}

        visualizer_params = {
            "root": root + '/visualizer/gan_' + str(self.increment),
            "frequency": 1,
            "sample_size": 225,
            "rgb_filters": True,
            "debug_functions": False,
            "debug_layers": True,
            "id": 'main'
        }

        resultor_params = {
            "root": root + "/resultor/gan_" + str(self.increment),
            "id": "resultor"
        }

        regularize = True
        batch_norm = True
        dropout_rate = 0.5
        # intitialize the network
        gan_net = gan(borrow=True, verbose=verbose)

        gan_net.add_module(type='datastream',
                           params=dataset_params,
                           verbose=verbose)

        gan_net.add_module(type='visualizer',
                           params=visualizer_params,
                           verbose=verbose)

        gan_net.add_module(type='resultor',
                           params=resultor_params,
                           verbose=verbose)
        self.mini_batch_size = gan_net.datastream['data'].mini_batch_size

        #z - latent space created by random layer
        gan_net.add_layer(
            type='random',
            id='z',
            num_neurons=(self.mini_batch_size, 32),
            distribution='normal',
            mu=0,
            sigma=1,
            # limits = (0,1),
            verbose=verbose)

        # Generator layers
        if not params is None:
            input_params = params['G1']

        gan_net.add_layer(type="dot_product",
                          origin="z",
                          id="G1",
                          num_neurons=1200,
                          activation='relu',
                          regularize=regularize,
                          batch_norm=batch_norm,
                          input_params=input_params,
                          verbose=verbose)

        if not params is None:
            input_params = params['G2']
        gan_net.add_layer(type="dot_product",
                          origin="G1",
                          id="G2",
                          num_neurons=5408,
                          activation='relu',
                          regularize=regularize,
                          batch_norm=batch_norm,
                          input_params=input_params,
                          verbose=verbose)

        gan_net.add_layer(type="unflatten",
                          origin="G2",
                          id="G2-unflatten",
                          shape=(13, 13, 32),
                          batch_norm=batch_norm,
                          verbose=verbose)

        if not params is None:
            input_params = params['G3']
        gan_net.add_layer(type="deconv",
                          origin="G2-unflatten",
                          id="G3",
                          num_neurons=32,
                          filter_size=(3, 3),
                          output_shape=(28, 28, 32),
                          activation='relu',
                          regularize=regularize,
                          batch_norm=batch_norm,
                          input_params=input_params,
                          stride=(2, 2),
                          verbose=verbose)

        if not params is None:
            input_params = params['G4']
        gan_net.add_layer(type="deconv",
                          origin="G3",
                          id="G4",
                          num_neurons=32,
                          filter_size=(3, 3),
                          output_shape=(30, 30, 64),
                          activation='relu',
                          regularize=regularize,
                          batch_norm=batch_norm,
                          input_params=input_params,
                          stride=(1, 1),
                          verbose=verbose)

        if not params is None:
            input_params = params['G(z)']
        gan_net.add_layer(type="deconv",
                          origin="G4",
                          id="G(z)",
                          num_neurons=64,
                          filter_size=(3, 3),
                          output_shape=(32, 32, 3),
                          activation='tanh',
                          regularize=regularize,
                          stride=(1, 1),
                          input_params=input_params,
                          verbose=verbose)

        #x - inputs come from dataset 1 X 3072
        gan_net.add_layer(
            type="input",
            id="x",
            verbose=verbose,
            datastream_origin=
            'data',  # if you didnt add a dataset module, now is 
            # the time.
            mean_subtract=False)

        #D(x) - Contains params theta_d creates features 1 X 800.
        # Discriminator Layers
        # add first convolutional layer
        if not params is None:
            input_params = params['D1-x']
        gan_net.add_layer(type="conv_pool",
                          origin="x",
                          id="D1-x",
                          num_neurons=20,
                          filter_size=(5, 5),
                          pool_size=(2, 2),
                          activation='relu',
                          regularize=regularize,
                          batch_norm=batch_norm,
                          input_params=input_params,
                          verbose=verbose)

        gan_net.add_layer(type="conv_pool",
                          origin="G(z)",
                          id="D1-z",
                          num_neurons=20,
                          filter_size=(5, 5),
                          pool_size=(2, 2),
                          activation='relu',
                          regularize=regularize,
                          batch_norm=batch_norm,
                          input_params=gan_net.dropout_layers["D1-x"].params,
                          verbose=verbose)

        if not params is None:
            input_params = params['D2-x']
        gan_net.add_layer(type="conv_pool",
                          origin="D1-x",
                          id="D2-x",
                          num_neurons=50,
                          filter_size=(3, 3),
                          pool_size=(2, 2),
                          activation='relu',
                          regularize=regularize,
                          batch_norm=batch_norm,
                          input_params=input_params,
                          verbose=verbose)

        gan_net.add_layer(
            type="conv_pool",
            origin="D1-z",
            # origin = "G(z)",
            id="D2-z",
            num_neurons=50,
            filter_size=(3, 3),
            pool_size=(2, 2),
            activation='relu',
            regularize=regularize,
            batch_norm=batch_norm,
            input_params=gan_net.dropout_layers["D2-x"].params,
            verbose=verbose)

        if not params is None:
            input_params = params['D3-x']
        gan_net.add_layer(type="dot_product",
                          id="D3-x",
                          origin="D2-x",
                          num_neurons=1200,
                          activation='relu',
                          regularize=regularize,
                          batch_norm=batch_norm,
                          dropout_rate=dropout_rate,
                          input_params=input_params,
                          verbose=verbose)

        gan_net.add_layer(type="dot_product",
                          id="D3-z",
                          origin="D2-z",
                          input_params=gan_net.dropout_layers["D3-x"].params,
                          num_neurons=1200,
                          activation='relu',
                          regularize=regularize,
                          batch_norm=batch_norm,
                          dropout_rate=dropout_rate,
                          verbose=verbose)

        if not params is None:
            input_params = params['D4-x']
        gan_net.add_layer(type="dot_product",
                          id="D4-x",
                          origin="D3-x",
                          num_neurons=1200,
                          activation='relu',
                          regularize=regularize,
                          batch_norm=batch_norm,
                          dropout_rate=dropout_rate,
                          input_params=input_params,
                          verbose=verbose)

        gan_net.add_layer(type="dot_product",
                          id="D4-z",
                          origin="D3-z",
                          input_params=gan_net.dropout_layers["D4-x"].params,
                          num_neurons=1200,
                          activation='relu',
                          regularize=regularize,
                          dropout_rate=dropout_rate,
                          batch_norm=batch_norm,
                          verbose=verbose)

        #C(D(x)) - This is the opposite of C(D(G(z))), real

        if not params is None:
            input_params = params['D(x)']
        gan_net.add_layer(type="dot_product",
                          id="D(x)",
                          origin="D4-x",
                          num_neurons=1,
                          activation='sigmoid',
                          regularize=regularize,
                          input_params=input_params,
                          verbose=verbose)

        #C(D(G(z))) fake - the classifier for fake/real that always predicts fake
        gan_net.add_layer(type="dot_product",
                          id="D(G(z))",
                          origin="D4-z",
                          num_neurons=1,
                          activation='sigmoid',
                          regularize=regularize,
                          input_params=gan_net.dropout_layers["D(x)"].params,
                          verbose=verbose)

        #C(D(x)) - This is the opposite of C(D(G(z))), real

        if not params is None:
            input_params = params['softmax']
        gan_net.add_layer(type="classifier",
                          id="softmax",
                          origin="D4-x",
                          num_classes=10,
                          regularize=regularize,
                          input_params=input_params,
                          activation='softmax',
                          verbose=verbose)

        # objective layers
        # discriminator objective
        gan_net.add_layer (type = "tensor",
                        input =  0.5 * T.mean(T.sqr(gan_net.layers['D(x)'].output - 1)) + \
                                    0.5 * T.mean(T.sqr(gan_net.layers['D(G(z))'].output)),
                        input_shape = (1,),
                        id = "discriminator_task"
                        )

        gan_net.add_layer(
            type="objective",
            id="discriminator_obj",
            origin="discriminator_task",
            layer_type='value',
            objective=gan_net.dropout_layers['discriminator_task'].output,
            datastream_origin='data',
            verbose=verbose)
        #generator objective
        gan_net.add_layer(type="tensor",
                          input=0.5 *
                          T.mean(T.sqr(gan_net.layers['D(G(z))'].output - 1)),
                          input_shape=(1, ),
                          id="objective_task")
        gan_net.add_layer(
            type="objective",
            id="generator_obj",
            layer_type='value',
            origin="objective_task",
            objective=gan_net.dropout_layers['objective_task'].output,
            datastream_origin='data',
            verbose=verbose)

        #softmax objective.
        gan_net.add_layer(type="objective",
                          id="classifier_obj",
                          origin="softmax",
                          objective="nll",
                          layer_type='discriminator',
                          datastream_origin='data',
                          verbose=verbose)

        # from yann.utils.graph import draw_network
        # draw_network(net.graph, filename = 'gan.png')
        # gan_net.pretty_print()

        if cook is True:
            """gan_net.datastream['data'].batches2train = 10
            gan_net.datastream['data'].batches2validate = 2
            gan_net.datastream['data'].batches2test = 1"""

            gan_net.cook(
                objective_layers=[
                    "classifier_obj", "discriminator_obj", "generator_obj"
                ],
                optimizer_params=optimizer_params,
                discriminator_layers=["D1-x", "D2-x", "D3-x", "D4-x"],
                generator_layers=["G1", "G2", "G3", "G4", "G(z)"],
                classifier_layers=["D1-x", "D2-x", "D3-x", "D4-x", "softmax"],
                softmax_layer="softmax",
                game_layers=("D(x)", "D(G(z))"),
                verbose=verbose)
        return gan_net
Example #2
0
def shallow_gan(dataset=None, verbose=1):
    """
    This function is a demo example of a generative adversarial network. 
    This is an example code. You should study this code rather than merely run it.  

    Args: 
        dataset: Supply a dataset.    
        verbose: Similar to the rest of the dataset.
    """
    optimizer_params = {
        "momentum_type": 'polyak',
        "momentum_params": (0.65, 0.9, 50),
        "regularization": (0.000, 0.000),
        "optimizer_type": 'rmsprop',
        "id": "main"
    }

    dataset_params = {"dataset": dataset, "type": 'xy', "id": 'data'}

    visualizer_params = {
        "root": '.',
        "frequency": 1,
        "sample_size": 225,
        "rgb_filters": False,
        "debug_functions": False,
        "debug_layers": True,
        "id": 'main'
    }

    # intitialize the network
    net = gan(borrow=True, verbose=verbose)

    net.add_module(type='datastream', params=dataset_params, verbose=verbose)

    net.add_module(type='visualizer',
                   params=visualizer_params,
                   verbose=verbose)

    #z - latent space created by random layer
    net.add_layer(type='random',
                  id='z',
                  num_neurons=(100, 32),
                  distribution='normal',
                  mu=0,
                  sigma=1,
                  verbose=verbose)

    #x - inputs come from dataset 1 X 784
    net.add_layer(
        type="input",
        id="x",
        verbose=verbose,
        datastream_origin='data',  # if you didnt add a dataset module, now is 
        # the time.
        mean_subtract=False)

    net.add_layer(
        type="dot_product",
        origin="z",
        id="G(z)",
        num_neurons=784,
        activation='tanh',
        verbose=verbose)  # This layer is the one that creates the images.

    #D(x) - Contains params theta_d creates features 1 X 800.
    net.add_layer(type="dot_product",
                  id="D(x)",
                  origin="x",
                  num_neurons=800,
                  activation='relu',
                  regularize=True,
                  verbose=verbose)

    net.add_layer(type="dot_product",
                  id="D(G(z))",
                  origin="G(z)",
                  input_params=net.dropout_layers["D(x)"].params,
                  num_neurons=800,
                  activation='relu',
                  regularize=True,
                  verbose=verbose)

    #C(D(x)) - This is the opposite of C(D(G(z))), real
    net.add_layer(type="dot_product",
                  id="real",
                  origin="D(x)",
                  num_neurons=1,
                  activation='sigmoid',
                  verbose=verbose)

    #C(D(G(z))) fake - the classifier for fake/real that always predicts fake
    net.add_layer(
        type="dot_product",
        id="fake",
        origin="D(G(z))",
        num_neurons=1,
        activation='sigmoid',
        input_params=net.dropout_layers["real"].
        params,  # Again share their parameters                    
        verbose=verbose)

    #C(D(x)) - This is the opposite of C(D(G(z))), real
    net.add_layer(type="classifier",
                  id="softmax",
                  origin="D(x)",
                  num_classes=10,
                  activation='softmax',
                  verbose=verbose)

    # objective layers
    # discriminator objective
    net.add_layer (type = "tensor",
                    input =  - 0.5 * T.mean(T.log(net.layers['real'].output)) - \
                                  0.5 * T.mean(T.log(1-net.layers['fake'].output)),
                    input_shape = (1,),
                    id = "discriminator_task"
                    )

    net.add_layer(type="objective",
                  id="discriminator_obj",
                  origin="discriminator_task",
                  layer_type='value',
                  objective=net.dropout_layers['discriminator_task'].output,
                  datastream_origin='data',
                  verbose=verbose)
    #generator objective
    net.add_layer(type="tensor",
                  input=-0.5 * T.mean(T.log(net.layers['fake'].output)),
                  input_shape=(1, ),
                  id="objective_task")
    net.add_layer(type="objective",
                  id="generator_obj",
                  layer_type='value',
                  origin="objective_task",
                  objective=net.dropout_layers['objective_task'].output,
                  datastream_origin='data',
                  verbose=verbose)

    #softmax objective.
    net.add_layer(type="objective",
                  id="classifier_obj",
                  origin="softmax",
                  objective="nll",
                  layer_type='discriminator',
                  datastream_origin='data',
                  verbose=verbose)

    from yann.utils.graph import draw_network
    draw_network(net.graph, filename='gan.png')
    net.pretty_print()

    net.cook(objective_layers=[
        "classifier_obj", "discriminator_obj", "generator_obj"
    ],
             optimizer_params=optimizer_params,
             discriminator_layers=["D(x)"],
             generator_layers=["G(z)"],
             classifier_layers=["D(x)", "softmax"],
             softmax_layer="softmax",
             game_layers=("fake", "real"),
             verbose=verbose)

    learning_rates = (0.05, 0.01)

    net.train(epochs=(20),
              k=2,
              pre_train_discriminator=3,
              validate_after_epochs=1,
              visualize_after_epochs=1,
              training_accuracy=True,
              show_progress=True,
              early_terminate=True,
              verbose=verbose)

    return net
Example #3
0
def deep_deconvolutional_lsgan(dataset,
                              regularize = True,
                              batch_norm = True,
                              dropout_rate = 0.5,
                              verbose = 1 ):
    """
    This function is a demo example of a generative adversarial network. 
    This is an example code. You should study this code rather than merely run it.  
    This method uses a few deconvolutional layers as was used in the DCGAN paper.
    This method is setup to produce images of size 32X32. 

    Args:         
        dataset: Supply a dataset.    
        regularize: ``True`` (default) supplied to layer arguments
        batch_norm: ``True`` (default) supplied to layer arguments
        dropout_rate: ``None`` (default) supplied to layer arguments
        verbose: Similar to the rest of the dataset.

    Returns:
        net: A Network object.

    Notes:
        This method is setupfor SVHN / CIFAR10.
        This is an implementation of th least squares GAN with a = 0, b = 1 and c= 1 (equation 9)
        [1] Least Squares Generative Adversarial Networks, Xudong Mao, Qing Li, Haoran Xie, Raymond Y.K. Lau, Zhen Wang
    """
    if verbose >=2:
        print (".. Creating a GAN network")

    optimizer_params =  {        
                "momentum_type"       : 'polyak',             
                "momentum_params"     : (0.55, 0.9, 20),      
                "regularization"      : (0.00001, 0.00001),       
                "optimizer_type"      : 'adagrad',                
                "id"                  : "main"
                        }


    dataset_params  = {
                            "dataset"   : dataset,
                            "type"      : 'xy',
                            "id"        : 'data'
                    }

    visualizer_params = {
                    "root"       : '.',
                    "frequency"  : 1,
                    "sample_size": 225,
                    "rgb_filters": True,
                    "debug_functions" : False,
                    "debug_layers": False,  
                    "id"         : 'main'
                        }  
                    
    # intitialize the network
    net = gan (      borrow = True,
                    verbose = verbose )                       
    
    net.add_module ( type = 'datastream', 
                    params = dataset_params,
                    verbose = verbose )    
    
    net.add_module ( type = 'visualizer',
                    params = visualizer_params,
                    verbose = verbose 
                    ) 

    #z - latent space created by random layer
    net.add_layer(type = 'random',
                        id = 'z',
                        num_neurons = (500,32), 
                        distribution = 'normal',
                        mu = 0,
                        sigma = 1,
                        limits = (0,1),
                        verbose = verbose)

    # Generator layers
    net.add_layer ( type = "dot_product",
                    origin = "z",
                    id = "G1",
                    num_neurons = 1200,
                    activation = 'relu',
                    regularize = regularize,
                    batch_norm = batch_norm,
                    verbose = verbose
                    ) 

    net.add_layer ( type = "dot_product",
                    origin = "G1",
                    id = "G2",
                    num_neurons = 5408,
                    activation = 'relu',
                    regularize = regularize,
                    batch_norm = batch_norm,
                    verbose = verbose
                    )

    net.add_layer ( type = "unflatten",
                    origin = "G2",
                    id = "G2-unflatten",
                    shape = (13, 13, 32),
                    batch_norm = batch_norm,
                    verbose = verbose
                    )

    net.add_layer ( type = "deconv",
                    origin = "G2-unflatten",
                    id = "G3",
                    num_neurons = 32,
                    filter_size = (3,3),
                    output_shape = (28,28,32),
                    activation = 'relu',
                    regularize = regularize,    
                    batch_norm = batch_norm,
                    stride = (2,2),
                    verbose = verbose
                    )

    net.add_layer ( type = "deconv",
                    origin = "G3",
                    id = "G(z)",
                    num_neurons = 32,
                    filter_size = (5,5),
                    output_shape = (32,32,3),
                    activation = 'tanh',
                    # regularize = regularize,    
                    stride = (1,1),
                    verbose = verbose
                    )
    
    #x - inputs come from dataset 1 X 784
    net.add_layer ( type = "input",
                    id = "x",
                    verbose = verbose, 
                    datastream_origin = 'data', # if you didnt add a dataset module, now is 
                                                # the time. 
                    mean_subtract = False )

    #D(x) - Contains params theta_d creates features 1 X 800. 
    # Discriminator Layers
    # add first convolutional layer

    net.add_layer ( type = "conv_pool",
                    origin = "x",
                    id = "D1-x",
                    num_neurons = 20,
                    filter_size = (5,5),
                    pool_size = (2,2),
                    activation = 'relu',
                    regularize = regularize,
                    batch_norm = batch_norm,                    
                    verbose = verbose
                    )

    net.add_layer ( type = "conv_pool",
                    origin = "G(z)",
                    id = "D1-z",
                    num_neurons = 20,
                    filter_size = (5,5),
                    pool_size = (2,2),
                    activation = 'relu',
                    regularize = regularize,
                    batch_norm = batch_norm,
                    input_params = net.dropout_layers["D1-x"].params,
                    verbose = verbose
                    )
    
    net.add_layer ( type = "conv_pool",
                    origin = "D1-x",
                    # origin = "x",
                    id = "D2-x",
                    num_neurons = 50,
                    filter_size = (3,3),
                    pool_size = (2,2),
                    activation = 'relu',
                    regularize = regularize,
                    batch_norm = batch_norm,                    
                    verbose = verbose
                    )      

    net.add_layer ( type = "conv_pool",
                    origin = "D1-z",
                    # origin = "G(z)",
                    id = "D2-z",
                    num_neurons = 50,
                    filter_size = (3,3),
                    pool_size = (2,2),
                    activation = 'relu',
                    regularize = regularize,
                    batch_norm = batch_norm,                    
                    input_params = net.dropout_layers["D2-x"].params,
                    verbose = verbose
                    )      

    net.add_layer ( type = "dot_product",
                    id = "D3-x",
                    origin = "D2-x",
                    num_neurons = 1200,
                    activation = 'relu',
                    regularize = regularize,  
                    batch_norm = batch_norm,
                    dropout_rate = dropout_rate,                                                       
                    verbose = verbose
                    )

    net.add_layer ( type = "dot_product",
                    id = "D3-z",
                    origin = "D2-z",
                    input_params = net.dropout_layers["D3-x"].params, 
                    num_neurons = 1200,
                    activation = 'relu',
                    regularize = regularize,
                    batch_norm = batch_norm,
                    dropout_rate = dropout_rate,                       
                    verbose = verbose
                    )

    net.add_layer ( type = "dot_product",
                    id = "D4-x",
                    origin = "D3-x",
                    num_neurons = 1200,
                    activation = 'relu',
                    regularize = regularize,       
                    batch_norm = batch_norm,
                    dropout_rate = dropout_rate,                                                                         
                    verbose = verbose
                    )

    net.add_layer ( type = "dot_product",
                    id = "D4-z",
                    origin = "D3-z",
                    input_params = net.dropout_layers["D4-x"].params, 
                    num_neurons = 1200,
                    activation = 'relu',
                    regularize = regularize,
                    dropout_rate = dropout_rate,          
                    batch_norm = batch_norm,                    
                    verbose = verbose
                    )

    #C(D(x)) - This is the opposite of C(D(G(z))), real
    net.add_layer ( type = "dot_product",
                    id = "D(x)",
                    origin = "D4-x",
                    num_neurons = 1,
                    activation = 'sigmoid',
                    regularize = regularize,
                    verbose = verbose
                    )

    #C(D(G(z))) fake - the classifier for fake/real that always predicts fake 
    net.add_layer ( type = "dot_product",
                    id = "D(G(z))",
                    origin = "D4-z",
                    num_neurons = 1,
                    activation = 'sigmoid',
                    regularize = regularize,
                    input_params = net.dropout_layers["D(x)"].params,                   
                    verbose = verbose
                    )

    
    #C(D(x)) - This is the opposite of C(D(G(z))), real
    net.add_layer ( type = "classifier",
                    id = "softmax",
                    origin = "D4-x",
                    num_classes = 10,
                    regularize = regularize,
                    activation = 'softmax',
                    verbose = verbose
                )
    
    # objective layers 
    # discriminator objective 
    net.add_layer (type = "tensor",
                    input =  0.5 * T.mean(T.sqr(net.layers['D(x)'].output-1)) + \
                                0.5 * T.mean(T.sqr(net.layers['D(G(z))'].output)),
                    input_shape = (1,),
                    id = "discriminator_task"
                    )

    net.add_layer ( type = "objective",
                    id = "discriminator_obj",
                    origin = "discriminator_task",
                    layer_type = 'value',
                    objective = net.dropout_layers['discriminator_task'].output,
                    datastream_origin = 'data', 
                    verbose = verbose
                    )
    #generator objective 
    net.add_layer (type = "tensor",
                    input =  0.5 * T.mean(T.sqr(net.layers['D(G(z))'].output-1)),
                    input_shape = (1,),
                    id = "objective_task"
                    )
    net.add_layer ( type = "objective",
                    id = "generator_obj",
                    layer_type = 'value',
                    origin = "objective_task",
                    objective = net.dropout_layers['objective_task'].output,
                    datastream_origin = 'data', 
                    verbose = verbose
                    )   
    
    #softmax objective.    
    net.add_layer ( type = "objective",
                    id = "classifier_obj",
                    origin = "softmax",
                    objective = "nll",
                    layer_type = 'discriminator',
                    datastream_origin = 'data', 
                    verbose = verbose
                    )
    
    # from yann.utils.graph import draw_network
    # draw_network(net.graph, filename = 'gan.png')    
    net.pretty_print()

    net.cook (  objective_layers = ["classifier_obj", "discriminator_obj", "generator_obj"],
                optimizer_params = optimizer_params,
                discriminator_layers = ["D1-x", "D2-x","D3-x","D4-x"],
                generator_layers = ["G1","G2","G3","G(z)"], 
                classifier_layers = ["D1-x", "D2-x","D3-x","D4-x","softmax"],                                                
                softmax_layer = "softmax",
                game_layers = ("D(x)", "D(G(z))"),
                verbose = verbose )
                    
    learning_rates = (0.04, 0.01 )  

    net.train( epochs = (20), 
            k = 2, 
            pre_train_discriminator = 0,
            validate_after_epochs = 1,
            visualize_after_epochs = 1,
            training_accuracy = True,
            show_progress = True,
            early_terminate = True,
            verbose = verbose)

    return net
Example #4
0
def deep_deconvolutional_gan(dataset, verbose=1):
    """
    This function is a demo example of a generative adversarial network. 
    This is an example code. You should study this code rather than merely run it.  
    This method uses a few deconvolutional layers as was used in the DCGAN paper.

    Args: 
        dataset: Supply a dataset.    
        verbose: Similar to the rest of the dataset.

    Returns:
        net: A Network object.

    Notes:
        This is not setup properly therefore does not learn at the moment. This network here mimics
        Ian Goodfellow's original code and implementation for MNIST adapted from his source code:
        https://github.com/goodfeli/adversarial/blob/master/mnist.yaml .It might not be a perfect 
        replicaiton, but I tried as best as I could.
    """
    if verbose >= 2:
        print(".. Creating a GAN network")

    optimizer_params = {
        "momentum_type": 'polyak',
        "momentum_params": (0.5, 0.7, 20),
        "regularization": (0.000, 0.000),
        "optimizer_type": 'rmsprop',
        "id": "main"
    }

    dataset_params = {"dataset": dataset, "type": 'xy', "id": 'data'}

    visualizer_params = {
        "root": '.',
        "frequency": 1,
        "sample_size": 225,
        "rgb_filters": False,
        "debug_functions": False,
        "debug_layers": False,
        "id": 'main'
    }

    # intitialize the network
    net = gan(borrow=True, verbose=verbose)

    net.add_module(type='datastream', params=dataset_params, verbose=verbose)

    net.add_module(type='visualizer',
                   params=visualizer_params,
                   verbose=verbose)

    #z - latent space created by random layer
    net.add_layer(
        type='random',
        id='z',
        num_neurons=(100, 10),
        distribution='normal',
        mu=0,
        sigma=1,
        # limits = (0,1),
        verbose=verbose)

    # Generator layers
    net.add_layer(
        type="dot_product",
        origin="z",
        id="G1",
        num_neurons=1200,
        activation='relu',
        regularize=True,
        # batch_norm = True,
        verbose=verbose)

    net.add_layer(
        type="dot_product",
        origin="G1",
        id="G2",
        num_neurons=1440,
        activation='relu',
        regularize=True,
        # batch_norm = True,
        verbose=verbose)

    net.add_layer(type="unflatten",
                  origin="G2",
                  id="G2-unflatten",
                  shape=(12, 12, 10),
                  verbose=verbose)

    net.add_layer(type="deconv",
                  origin="G2-unflatten",
                  id="G3",
                  num_neurons=10,
                  filter_size=(3, 3),
                  output_shape=(26, 26, 20),
                  activation='relu',
                  regularize=True,
                  stride=(2, 2),
                  verbose=verbose)

    net.add_layer(
        type="deconv",
        origin="G3",
        id="G(z)",
        num_neurons=20,
        filter_size=(3, 3),
        output_shape=(28, 28, 1),
        activation='tanh',
        # regularize = True,
        stride=(1, 1),
        verbose=verbose)

    #x - inputs come from dataset 1 X 784
    net.add_layer(
        type="input",
        id="x",
        verbose=verbose,
        datastream_origin='data',  # if you didnt add a dataset module, now is 
        # the time.
        mean_subtract=False)

    #D(x) - Contains params theta_d creates features 1 X 800.
    # Discriminator Layers
    # add first convolutional layer

    net.add_layer(type="conv_pool",
                  origin="x",
                  id="D1-x",
                  num_neurons=20,
                  filter_size=(5, 5),
                  pool_size=(2, 2),
                  activation='relu',
                  regularize=True,
                  verbose=verbose)

    net.add_layer(type="conv_pool",
                  origin="G(z)",
                  id="D1-z",
                  num_neurons=20,
                  filter_size=(5, 5),
                  pool_size=(2, 2),
                  activation='relu',
                  regularize=True,
                  input_params=net.dropout_layers["D1-x"].params,
                  verbose=verbose)

    net.add_layer(
        type="conv_pool",
        origin="D1-x",
        # origin = "x",
        id="D2-x",
        num_neurons=50,
        filter_size=(3, 3),
        pool_size=(2, 2),
        activation='relu',
        regularize=True,
        verbose=verbose)

    net.add_layer(
        type="conv_pool",
        origin="D1-z",
        # origin = "G(z)",
        id="D2-z",
        num_neurons=50,
        filter_size=(3, 3),
        pool_size=(2, 2),
        activation='relu',
        regularize=True,
        input_params=net.dropout_layers["D2-x"].params,
        verbose=verbose)

    net.add_layer(
        type="dot_product",
        id="D3-x",
        origin="D2-x",
        num_neurons=1200,
        activation='relu',
        regularize=True,
        # batch_norm = True,
        dropout_rate=0.5,
        verbose=verbose)

    net.add_layer(
        type="dot_product",
        id="D3-z",
        origin="D2-z",
        input_params=net.dropout_layers["D3-x"].params,
        num_neurons=1200,
        activation='relu',
        regularize=True,
        # batch_norm = True,
        dropout_rate=0.5,
        verbose=verbose)

    net.add_layer(
        type="dot_product",
        id="D4-x",
        origin="D3-x",
        num_neurons=1200,
        activation='relu',
        regularize=True,
        # batch_norm = True,
        dropout_rate=0.5,
        verbose=verbose)

    net.add_layer(
        type="dot_product",
        id="D4-z",
        origin="D3-z",
        input_params=net.dropout_layers["D4-x"].params,
        num_neurons=1200,
        activation='relu',
        regularize=True,
        dropout_rate=0.5,
        # batch_norm = True,
        verbose=verbose)

    #C(D(x)) - This is the opposite of C(D(G(z))), real
    net.add_layer(type="dot_product",
                  id="D(x)",
                  origin="D4-x",
                  num_neurons=1,
                  activation='sigmoid',
                  verbose=verbose)

    #C(D(G(z))) fake - the classifier for fake/real that always predicts fake
    net.add_layer(type="dot_product",
                  id="D(G(z))",
                  origin="D4-z",
                  num_neurons=1,
                  activation='sigmoid',
                  input_params=net.dropout_layers["D(x)"].params,
                  verbose=verbose)

    #C(D(x)) - This is the opposite of C(D(G(z))), real
    net.add_layer(type="classifier",
                  id="softmax",
                  origin="D4-x",
                  num_classes=10,
                  activation='softmax',
                  verbose=verbose)

    # objective layers
    # discriminator objective
    net.add_layer (type = "tensor",
                    input =  - 0.5 * T.mean(T.log(net.layers['D(x)'].output)) - \
                                0.5 * T.mean(T.log(1-net.layers['D(G(z))'].output)),
                    input_shape = (1,),
                    id = "discriminator_task"
                    )

    net.add_layer(type="objective",
                  id="discriminator_obj",
                  origin="discriminator_task",
                  layer_type='value',
                  objective=net.dropout_layers['discriminator_task'].output,
                  datastream_origin='data',
                  verbose=verbose)
    #generator objective
    net.add_layer(type="tensor",
                  input=-0.5 * T.mean(T.log(net.layers['D(G(z))'].output)),
                  input_shape=(1, ),
                  id="objective_task")
    net.add_layer(type="objective",
                  id="generator_obj",
                  layer_type='value',
                  origin="objective_task",
                  objective=net.dropout_layers['objective_task'].output,
                  datastream_origin='data',
                  verbose=verbose)

    #softmax objective.
    net.add_layer(type="objective",
                  id="classifier_obj",
                  origin="softmax",
                  objective="nll",
                  layer_type='discriminator',
                  datastream_origin='data',
                  verbose=verbose)

    # from yann.utils.graph import draw_network
    # draw_network(net.graph, filename = 'gan.png')
    net.pretty_print()

    net.cook(objective_layers=[
        "classifier_obj", "discriminator_obj", "generator_obj"
    ],
             optimizer_params=optimizer_params,
             discriminator_layers=["D1-x", "D2-x", "D3-x", "D4-x"],
             generator_layers=["G1", "G2", "G3", "G(z)"],
             classifier_layers=["D1-x", "D2-x", "D3-x", "D4-x", "softmax"],
             softmax_layer="softmax",
             game_layers=("D(x)", "D(G(z))"),
             verbose=verbose)

    learning_rates = (0.00004, 0.001)

    net.train(epochs=(20),
              k=1,
              pre_train_discriminator=0,
              validate_after_epochs=1,
              visualize_after_epochs=1,
              training_accuracy=True,
              show_progress=True,
              early_terminate=True,
              verbose=verbose)

    return net
Example #5
0
    			"rgb_filters": <bool> flag. if True a 3D-RGB rendition of the CNN
                    			filters is rendered. Default value is False.
    			"debug_functions" : <bool> visualize train and test and other theano functions.
                        		default is False. Needs pydot and dv2viz to be installed.
    			"debug_layers" : <bool> Will print layer activities from input to that layer
                     			output. ( this is almost always useless because test debug
                     			function will combine all these layers and print directly.)
    			"id"         : id of the visualizer
                		}
		Returns: A visualizer object.

"""
                      
    # intitialize the network with a datastream, visualizer and an optimizer

    net = gan (     borrow = True,
                    verbose = verbose )     

"""
      add_module(type, params=None, verbose=2): used to add a module to net

            type: which module to add. Options are 'datastream', 'visualizer', 'optimizer' and 'resultor'
            params: dicitionary as used above
	    verbose: similar to the rest of the toolbox

"""                  
    
    
    net.add_module ( type = 'datastream', 
                     params = dataset_params,
                     verbose = verbose )