Ejemplo n.º 1
0
    def create_reconstruction_image(self, input_data):
        """
        Adds noise to an input and saves an image from the reconstruction running the input through the computation
        graph.
        """
        n_examples = len(input_data)
        xs_test = input_data
        noisy_xs_test = self.f_noise(input_data)
        reconstructed = self.run(noisy_xs_test)
        # Concatenate stuff
        width, height = closest_to_square_factors(n_examples)
        stacked = numpy.vstack([
            numpy.vstack([
                xs_test[i * width:(i + 1) * width],
                noisy_xs_test[i * width:(i + 1) * width],
                reconstructed[i * width:(i + 1) * width]
            ]) for i in range(height)
        ])
        number_reconstruction = PIL.Image.fromarray(
            tile_raster_images(stacked, (self.image_height, self.image_width),
                               (height, 3 * width)))

        save_path = os.path.join(self.outdir, 'gsn_reconstruction.png')
        save_path = os.path.realpath(save_path)
        number_reconstruction.save(save_path)
        log.info("saved output image to %s", save_path)
    def create_reconstruction_image(self, input_data):
        """
        Adds noise to an input and saves an image from the reconstruction running the input through the computation
        graph.
        """
        n_examples = len(input_data)
        xs_test = input_data
        noisy_xs_test = self.f_noise(input_data)
        reconstructed = self.run(noisy_xs_test)
        # Concatenate stuff
        width, height = closest_to_square_factors(n_examples)
        stacked = numpy.vstack(
            [numpy.vstack([xs_test[i * width: (i + 1) * width],
                           noisy_xs_test[i * width: (i + 1) * width],
                           reconstructed[i * width: (i + 1) * width]])
             for i in range(height)])
        number_reconstruction = PIL.Image.fromarray(
            tile_raster_images(stacked, (self.image_height, self.image_width), (height, 3*width))
        )

        save_path = os.path.join(self.outdir, 'gsn_reconstruction.png')
        save_path = os.path.realpath(save_path)
        number_reconstruction.save(save_path)
        log.info("saved output image to %s", save_path)
Ejemplo n.º 3
0
def run_sequence(sequence=0):
    log.info("Creating RNN-GSN for sequence %d!" % sequence)

    # grab the MNIST dataset
    mnist = MNIST(sequence_number=sequence, concat_train_valid=True)
    outdir = "outputs/rnngsn/mnist_%d/" % sequence

    rng = numpy.random.RandomState(1234)
    mrg = RandomStreams(rng.randint(2**30))
    rnngsn = RNN_GSN(layers=2,
                     walkbacks=4,
                     input_size=28 * 28,
                     hidden_size=1000,
                     tied_weights=True,
                     rnn_hidden_size=100,
                     weights_init='uniform',
                     weights_interval='montreal',
                     rnn_weights_init='identity',
                     mrg=mrg,
                     outdir=outdir)
    # load pretrained rbm on mnist
    # rnngsn.load_gsn_params('outputs/trained_gsn_epoch_1000.pkl')
    # make an optimizer to train it (AdaDelta is a good default)
    optimizer = AdaDelta(model=rnngsn,
                         dataset=mnist,
                         n_epoch=200,
                         batch_size=100,
                         minimum_batch_size=2,
                         learning_rate=1e-6,
                         save_frequency=1,
                         early_stop_length=200)
    # optimizer = SGD(model=rnngsn,
    #                 dataset=mnist,
    #                 n_epoch=300,
    #                 batch_size=100,
    #                 minimum_batch_size=2,
    #                 learning_rate=.25,
    #                 lr_decay='exponential',
    #                 lr_factor=.995,
    #                 momentum=0.5,
    #                 nesterov_momentum=True,
    #                 momentum_decay=False,
    #                 save_frequency=20,
    #                 early_stop_length=100)

    crossentropy = Monitor('crossentropy',
                           rnngsn.get_monitors()['noisy_recon_cost'],
                           test=True)
    error = Monitor('error', rnngsn.get_monitors()['mse'], test=True)

    # perform training!
    optimizer.train(monitor_channels=[crossentropy, error])
    # use the generate function!
    log.debug("generating images...")
    generated, ut = rnngsn.generate(initial=None, n_steps=400)

    # Construct image
    image = Image.fromarray(
        tile_raster_images(X=generated,
                           img_shape=(28, 28),
                           tile_shape=(20, 20),
                           tile_spacing=(1, 1)))
    image.save(outdir + "rnngsn_mnist_generated.png")
    log.debug('saved generated.png')

    # Construct image from the weight matrix
    image = Image.fromarray(
        tile_raster_images(X=rnngsn.weights_list[0].get_value(borrow=True).T,
                           img_shape=(28, 28),
                           tile_shape=closest_to_square_factors(
                               rnngsn.hidden_size),
                           tile_spacing=(1, 1)))
    image.save(outdir + "rnngsn_mnist_weights.png")

    log.debug("done!")

    del mnist
    del rnngsn
    del optimizer
Ejemplo n.º 4
0
def run_sequence(sequence=0):
    log.info("Creating RNN-GSN for sequence %d!" % sequence)

    # grab the MNIST dataset
    mnist = MNIST(sequence_number=sequence, concat_train_valid=True)
    outdir = "outputs/rnngsn/mnist_%d/" % sequence

    rng = numpy.random.RandomState(1234)
    mrg = RandomStreams(rng.randint(2 ** 30))
    rnngsn = RNN_GSN(layers=2,
                     walkbacks=4,
                     input_size=28 * 28,
                     hidden_size=1000,
                     tied_weights=True,
                     rnn_hidden_size=100,
                     weights_init='uniform',
                     weights_interval='montreal',
                     rnn_weights_init='identity',
                     mrg=mrg,
                     outdir=outdir)
    # load pretrained rbm on mnist
    # rnngsn.load_gsn_params('outputs/trained_gsn_epoch_1000.pkl')
    # make an optimizer to train it (AdaDelta is a good default)
    optimizer = AdaDelta(model=rnngsn,
                         dataset=mnist,
                         n_epoch=200,
                         batch_size=100,
                         minimum_batch_size=2,
                         learning_rate=1e-6,
                         save_frequency=1,
                         early_stop_length=200)
    # optimizer = SGD(model=rnngsn,
    #                 dataset=mnist,
    #                 n_epoch=300,
    #                 batch_size=100,
    #                 minimum_batch_size=2,
    #                 learning_rate=.25,
    #                 lr_decay='exponential',
    #                 lr_factor=.995,
    #                 momentum=0.5,
    #                 nesterov_momentum=True,
    #                 momentum_decay=False,
    #                 save_frequency=20,
    #                 early_stop_length=100)

    crossentropy = Monitor('crossentropy', rnngsn.get_monitors()['noisy_recon_cost'], test=True)
    error = Monitor('error', rnngsn.get_monitors()['mse'], test=True)

    # perform training!
    optimizer.train(monitor_channels=[crossentropy, error])
    # use the generate function!
    log.debug("generating images...")
    generated, ut = rnngsn.generate(initial=None, n_steps=400)



    # Construct image
    image = Image.fromarray(
        tile_raster_images(
            X=generated,
            img_shape=(28, 28),
            tile_shape=(20, 20),
            tile_spacing=(1, 1)
        )
    )
    image.save(outdir + "rnngsn_mnist_generated.png")
    log.debug('saved generated.png')

    # Construct image from the weight matrix
    image = Image.fromarray(
        tile_raster_images(
            X=rnngsn.weights_list[0].get_value(borrow=True).T,
            img_shape=(28, 28),
            tile_shape=closest_to_square_factors(rnngsn.hidden_size),
            tile_spacing=(1, 1)
        )
    )
    image.save(outdir + "rnngsn_mnist_weights.png")

    log.debug("done!")

    del mnist
    del rnngsn
    del optimizer
    def __init__(self, inputs_hook=None, hiddens_hook=None, params_hook=None, outdir='outputs/gsn/',
                 input_size=None, hidden_size=1000,
                 layers=2, walkbacks=4,
                 visible_activation='sigmoid', hidden_activation='tanh',
                 input_sampling=True, mrg=RNG_MRG.MRG_RandomStreams(1),
                 tied_weights=True,
                 weights_init='uniform', weights_interval='montreal', weights_mean=0, weights_std=5e-3,
                 bias_init=0.0,
                 cost_function='binary_crossentropy', cost_args=None,
                 add_noise=True, noiseless_h1=True,
                 hidden_noise='gaussian', hidden_noise_level=2, input_noise='salt_and_pepper', input_noise_level=0.4,
                 noise_decay='exponential', noise_annealing=1,
                 image_width=None, image_height=None,
                 **kwargs):
        """
        Initialize a GSN.

        Parameters
        ----------
        inputs_hook : Tuple of (shape, variable)
            Routing information for the model to accept inputs from elsewhere. This is used for linking
            different models together (e.g. setting the Softmax model's input layer to the DAE's hidden layer gives a
            newly supervised classification model). For now, it needs to include the shape information (normally the
            dimensionality of the input i.e. n_in).
        hiddens_hook : Tuple of (shape, variable)
            Routing information for the model to accept its hidden representation from elsewhere.
            This is used for linking different models together (e.g. setting the DAE model's hidden layers to the RNN's
            output layer gives a generative recurrent model.) For now, it needs to include the shape
            information (normally the dimensionality of the hiddens i.e. n_hidden).
        params_hook : List(theano shared variable)
            A list of model parameters (shared theano variables) that you should use when constructing
            this model (instead of initializing your own shared variables). This parameter is useful when you want to
            have two versions of the model that use the same parameters - such as a training model with dropout applied
            to layers and one without for testing, where the parameters are shared between the two.
        outdir : str
            The directory you want outputs (parameters, images, etc.) to save to. If None, nothing will
            be saved.
        input_size : int
            The size (dimensionality) of the input to the DAE. If shape is provided in `inputs_hook`, this is optional.
            The :class:`Model` requires an `output_size`, which gets set to this value because the DAE is an
            unsupervised model. The output is a reconstruction of the input.
        hidden_size : int
            The size (dimensionality) of the hidden layer for the DAE. Generally, you want it to be larger than
            `input_size`, which is known as *overcomplete*.
        visible_activation : str or callable
            The nonlinear (or linear) visible activation to perform after the dot product from hiddens -> visible layer.
            This activation function should be appropriate for the input unit types, i.e. 'sigmoid' for binary inputs.
            See opendeep.utils.activation for a list of available activation functions. Alternatively, you can pass
            your own function to be used as long as it is callable.
        hidden_activation : str or callable
            The nonlinear (or linear) hidden activation to perform after the dot product from visible -> hiddens layer.
            See opendeep.utils.activation for a list of available activation functions. Alternatively, you can pass
            your own function to be used as long as it is callable.
        layers : int
            The number of hidden layers to use.
        walkbacks : int
            The number of walkbacks to perform (the variable K in Bengio's paper above). A walkback is a Gibbs sample
            from the DAE, which means the model generates inputs in sequence, where each generated input is compared
            to the original input to create the reconstruction cost for training. For running the model, the very last
            generated input in the Gibbs chain is used as the output.
        input_sampling : bool
            During walkbacks, whether to sample from the generated input to create a new starting point for the next
            walkback (next step in the Gibbs chain). This generally makes walkbacks more effective by making the
            process more stochastic - more likely to find spurious modes in the model's representation.
        mrg : random
            A random number generator that is used when adding noise into the network and for sampling from the input.
            I recommend using Theano's sandbox.rng_mrg.MRG_RandomStreams.
        tied_weights : bool
            DAE has two weight matrices - W from input -> hiddens and V from hiddens -> input. This boolean
            determines if V = W.T, which 'ties' V to W and reduces the number of parameters necessary during training.
        weights_init : str
            Determines the method for initializing model weights. See opendeep.utils.nnet for options.
        weights_interval : str or float
            If Uniform `weights_init`, the +- interval to use. See opendeep.utils.nnet for options.
        weights_mean : float
            If Gaussian `weights_init`, the mean value to use.
        weights_std : float
            If Gaussian `weights_init`, the standard deviation to use.
        bias_init : float
            The initial value to use for the bias parameter. Most often, the default of 0.0 is preferred.
        cost_function : str or callable
            The function to use when calculating the reconstruction cost of the model. This should be appropriate
            for the type of input, i.e. use 'binary_crossentropy' for binary inputs, or 'mse' for real-valued inputs.
            See opendeep.utils.cost for options. You can also specify your own function, which needs to be callable.
        cost_args : dict
            Any additional named keyword arguments to pass to the specified `cost_function`.
        add_noise : bool
            Whether to add noise (corrupt) the input before passing it through the computation graph during training.
            This should most likely be set to the default of True, because this is a *denoising* autoencoder after all.
        noiseless_h1 : bool
            Whether to not add noise (corrupt) the hidden layer during computation.
        hidden_noise : str
            What type of noise to use for corrupting the hidden layer (if not `noiseless_h1`). See opendeep.utils.noise
            for options. This should be appropriate for the hidden unit activation, i.e. Gaussian for tanh or other
            real-valued activations, etc.
        hidden_noise_level : float
            The amount of noise to use for the noise function specified by `hidden_noise`. This could be the
            standard deviation for gaussian noise, the interval for uniform noise, the dropout amount, etc.
        input_noise : str
            What type of noise to use for corrupting the input before computation (if `add_noise`).
            See opendeep.utils.noise for options. This should be appropriate for the input units, i.e. salt-and-pepper
            for binary units, etc.
        input_noise_level : float
            The amount of noise used to corrupt the input. This could be the masking probability for salt-and-pepper,
            standard deviation for Gaussian, interval for Uniform, etc.
        noise_decay : str or False
            Whether to use `input_noise` scheduling (decay `input_noise_level` during the course of training),
            and if so, the string input specifies what type of decay to use. See opendeep.utils.decay for options.
            Noise decay (known as noise scheduling) effectively helps the DAE learn larger variance features first,
            and then smaller ones later (almost as a kind of curriculum learning). May help it converge faster.
        noise_annealing : float
            The amount to reduce the `input_noise_level` after each training epoch based on the decay function specified
            in `noise_decay`.
        image_width : int
            If the input should be represented as an image, the width of the input image. If not specified, it will be
            close to the square factor of the `input_size`.
        image_height : int
            If the input should be represented as an image, the height of the input image. If not specified, it will be
            close to the square factor of the `input_size`.
        """
        # init Model to combine the defaults and config dictionaries with the initial parameters.
        initial_parameters = locals().copy()
        initial_parameters.pop('self')
        super(GSN, self).__init__(**initial_parameters)

        # when the input should be thought of as an image, either use the specified width and height,
        # or try to make as square as possible.
        if image_height is None and image_width is None:
            (_h, _w) = closest_to_square_factors(self.input_size)
            self.image_width  = _w
            self.image_height = _h
        else:
            self.image_height = image_height
            self.image_width = image_width

        ############################
        # Theano variables and RNG #
        ############################
        if self.inputs_hook is None:
            self.X = T.matrix('X')
        else:
            # inputs_hook is a (shape, input) tuple
            self.X = self.inputs_hook[1]
        
        ##########################
        # Network specifications #
        ##########################
        # generally, walkbacks should be at least 2*layers
        if layers % 2 == 0:
            if walkbacks < 2*layers:
                log.warning('Not enough walkbacks for the layers! Layers is %s and walkbacks is %s. '
                            'Generaly want 2X walkbacks to layers',
                            str(layers), str(walkbacks))
        else:
            if walkbacks < 2*layers-1:
                log.warning('Not enough walkbacks for the layers! Layers is %s and walkbacks is %s. '
                            'Generaly want 2X walkbacks to layers',
                            str(layers), str(walkbacks))

        self.add_noise = add_noise
        self.noise_annealing = as_floatX(noise_annealing)  # noise schedule parameter
        self.hidden_noise_level = sharedX(hidden_noise_level, dtype=theano.config.floatX)
        self.hidden_noise = get_noise(name=hidden_noise, noise_level=self.hidden_noise_level, mrg=mrg)
        self.input_noise_level = sharedX(input_noise_level, dtype=theano.config.floatX)
        self.input_noise = get_noise(name=input_noise, noise_level=self.input_noise_level, mrg=mrg)

        self.walkbacks = walkbacks
        self.tied_weights = tied_weights
        self.layers = layers
        self.noiseless_h1 = noiseless_h1
        self.input_sampling = input_sampling
        self.noise_decay = noise_decay

        # if there was a hiddens_hook, unpack the hidden layers in the tensor
        if self.hiddens_hook is not None:
            hidden_size = self.hiddens_hook[0]
            self.hiddens_flag = True
        else:
            self.hiddens_flag = False

        # determine the sizes of each layer in a list.
        #  layer sizes, from h0 to hK (h0 is the visible layer)
        hidden_size = list(raise_to_list(hidden_size))
        if len(hidden_size) == 1:
            self.layer_sizes = [self.input_size] + hidden_size * self.layers
        else:
            assert len(hidden_size) == self.layers, "Hiddens sizes and number of hidden layers mismatch." + \
                                                    "Hiddens %d and layers %d" % (len(hidden_size), self.layers)
            self.layer_sizes = [self.input_size] + hidden_size

        if self.hiddens_hook is not None:
            self.hiddens = self.unpack_hiddens(self.hiddens_hook[1])

        #########################
        # Activation functions! #
        #########################
        # hidden unit activation
        self.hidden_activation = get_activation_function(hidden_activation)
        # Visible layer activation
        self.visible_activation = get_activation_function(visible_activation)
        # make sure the sampling functions are appropriate for the activation functions.
        if is_binary(self.visible_activation):
            self.visible_sampling = mrg.binomial
        else:
            # TODO: implement non-binary activation
            log.error("Non-binary visible activation not supported yet!")
            raise NotImplementedError("Non-binary visible activation not supported yet!")

        # Cost function
        self.cost_function = get_cost_function(cost_function)
        self.cost_args = cost_args or dict()

        ###############
        # Parameters! #
        ###############
        # make sure to deal with params_hook!
        if self.params_hook is not None:
            # if tied weights, expect layers*2 + 1 params
            if self.tied_weights:
                assert len(self.params_hook) == 2*layers + 1, \
                    "Tied weights: expected {0!s} params, found {1!s}!".format(2*layers+1, len(self.params_hook))
                self.weights_list = self.params_hook[:layers]
                self.bias_list = self.params_hook[layers:]
            # if untied weights, expect layers*3 + 1 params
            else:
                assert len(self.params_hook) == 3*layers + 1, \
                    "Untied weights: expected {0!s} params, found {1!s}!".format(3*layers+1, len(self.params_hook))
                self.weights_list = self.params_hook[:2*layers]
                self.bias_list = self.params_hook[2*layers:]
        # otherwise, construct our params
        else:
            # initialize a list of weights and biases based on layer_sizes for the GSN
            self.weights_list = [get_weights(weights_init=weights_init,
                                             shape=(self.layer_sizes[i], self.layer_sizes[i+1]),
                                             name="W_{0!s}_{1!s}".format(i, i+1),
                                             rng=mrg,
                                             # if gaussian
                                             mean=weights_mean,
                                             std=weights_std,
                                             # if uniform
                                             interval=weights_interval)
                                 for i in range(layers)]
            # add more weights if we aren't tying weights between layers (need to add for higher-lower layers now)
            if not tied_weights:
                self.weights_list.extend(
                    [get_weights(weights_init=weights_init,
                                 shape=(self.layer_sizes[i+1], self.layer_sizes[i]),
                                 name="W_{0!s}_{1!s}".format(i+1, i),
                                 rng=mrg,
                                 # if gaussian
                                 mean=weights_mean,
                                 std=weights_std,
                                 # if uniform
                                 interval=weights_interval)
                     for i in reversed(range(layers))]
                )
            # initialize each layer bias to 0's.
            self.bias_list = [get_bias(shape=(self.layer_sizes[i],),
                                       name='b_' + str(i),
                                       init_values=bias_init)
                              for i in range(layers+1)]

        # build the params of the model into a list
        self.params = self.weights_list + self.bias_list
        log.debug("gsn params: %s", str(self.params))

        # using the properties, build the computational graph
        self.cost, self.monitors, self.output, self.hiddens = self.build_computation_graph()
Ejemplo n.º 6
0
def run_midi(dataset):
    log.info("Creating RNN-RBM for dataset %s!", dataset)

    outdir = "outputs/rnnrbm/%s/" % dataset

    # grab the MIDI dataset
    if dataset == 'nottingham':
        midi = Nottingham()
    elif dataset == 'jsb':
        midi = JSBChorales()
    elif dataset == 'muse':
        midi = MuseData()
    elif dataset == 'piano_de':
        midi = PianoMidiDe()
    else:
        raise AssertionError("dataset %s not recognized." % dataset)

    # create the RNN-RBM
    # rng = numpy.random
    # rng.seed(0xbeef)
    # mrg = RandomStreams(seed=rng.randint(1 << 30))
    rng = numpy.random.RandomState(1234)
    mrg = RandomStreams(rng.randint(2 ** 30))
    # rnnrbm = RNN_RBM(input_size=88,
    #                  hidden_size=150,
    #                  rnn_hidden_size=100,
    #                  k=15,
    #                  weights_init='gaussian',
    #                  weights_std=0.01,
    #                  rnn_weights_init='gaussian',
    #                  rnn_weights_std=0.0001,
    #                  rng=rng,
    #                  outdir=outdir)
    rnnrbm = RNN_RBM(input_size=88,
                     hidden_size=150,
                     rnn_hidden_size=100,
                     k=15,
                     weights_init='gaussian',
                     weights_std=0.01,
                     rnn_weights_init='identity',
                     rnn_hidden_activation='relu',
                     # rnn_weights_init='gaussian',
                     # rnn_hidden_activation='tanh',
                     rnn_weights_std=0.0001,
                     mrg=mrg,
                     outdir=outdir)

    # make an optimizer to train it
    optimizer = SGD(model=rnnrbm,
                    dataset=midi,
                    n_epoch=200,
                    batch_size=100,
                    minimum_batch_size=2,
                    learning_rate=.001,
                    save_frequency=10,
                    early_stop_length=200,
                    momentum=False,
                    momentum_decay=False,
                    nesterov_momentum=False)

    optimizer = AdaDelta(model=rnnrbm,
                         dataset=midi,
                         n_epoch=200,
                         batch_size=100,
                         minimum_batch_size=2,
                         # learning_rate=1e-4,
                         learning_rate=1e-6,
                         save_frequency=10,
                         early_stop_length=200)

    ll = Monitor('pseudo-log', rnnrbm.get_monitors()['pseudo-log'], test=True)
    mse = Monitor('frame-error', rnnrbm.get_monitors()['mse'], valid=True, test=True)

    plot = Plot(bokeh_doc_name='rnnrbm_midi_%s' % dataset, monitor_channels=[ll, mse], open_browser=True)

    # perform training!
    optimizer.train(plot=plot)
    # use the generate function!
    generated, _ = rnnrbm.generate(initial=None, n_steps=200)

    dt = 0.3
    r = (21, 109)
    midiwrite(outdir + 'rnnrbm_generated_midi.mid', generated, r=r, dt=dt)

    if has_pylab:
        extent = (0, dt * len(generated)) + r
        pylab.figure()
        pylab.imshow(generated.T, origin='lower', aspect='auto',
                     interpolation='nearest', cmap=pylab.cm.gray_r,
                     extent=extent)
        pylab.xlabel('time (s)')
        pylab.ylabel('MIDI note number')
        pylab.title('generated piano-roll')

    # Construct image from the weight matrix
    image = Image.fromarray(
        tile_raster_images(
            X=rnnrbm.W.get_value(borrow=True).T,
            img_shape=closest_to_square_factors(rnnrbm.input_size),
            tile_shape=closest_to_square_factors(rnnrbm.hidden_size),
            tile_spacing=(1, 1)
        )
    )
    image.save(outdir + 'rnnrbm_midi_weights.png')

    log.debug("done!")
    del midi
    del rnnrbm
    del optimizer
Ejemplo n.º 7
0
            tile_spacing=(1, 1)
        )
    )
    image.save('rbm_test.png')

    # Construct image from the preds matrix
    image = Image.fromarray(
        tile_raster_images(
            X=preds,
            img_shape=(28, 28),
            tile_shape=(5, 5),
            tile_spacing=(1, 1)
        )
    )
    image.save('rbm_preds.png')

    # Construct image from the weight matrix
    image = Image.fromarray(
        tile_raster_images(
            X=rbm.W.get_value(borrow=True).T,
            img_shape=(28, 28),
            tile_shape=closest_to_square_factors(rbm.hidden_size),
            tile_spacing=(1, 1)
        )
    )
    image.save('rbm_weights.png')


    del mnist
    del rbm
    del optimizer
Ejemplo n.º 8
0
    # perform training!
    optimizer.train(monitor_channels=ll)
    # test it on some images!
    test_data = mnist.test_inputs[:25]
    # use the run function!
    preds = rbm.run(test_data)

    # Construct image from the test matrix
    image = Image.fromarray(tile_raster_images(X=test_data, img_shape=(28, 28), tile_shape=(5, 5), tile_spacing=(1, 1)))
    image.save("rbm_test.png")

    # Construct image from the preds matrix
    image = Image.fromarray(tile_raster_images(X=preds, img_shape=(28, 28), tile_shape=(5, 5), tile_spacing=(1, 1)))
    image.save("rbm_preds.png")

    # Construct image from the weight matrix
    image = Image.fromarray(
        tile_raster_images(
            X=rbm.W.get_value(borrow=True).T,
            img_shape=(28, 28),
            tile_shape=closest_to_square_factors(config_args["hidden_size"]),
            tile_spacing=(1, 1),
        )
    )
    image.save("rbm_weights.png")

    del mnist
    del rbm
    del optimizer
Ejemplo n.º 9
0
            tile_spacing=(1, 1)
        )
    )
    image.save('rbm_test.png')

    # Construct image from the preds matrix
    image = Image.fromarray(
        tile_raster_images(
            X=preds,
            img_shape=(28, 28),
            tile_shape=(5, 5),
            tile_spacing=(1, 1)
        )
    )
    image.save('rbm_preds.png')

    # Construct image from the weight matrix
    image = Image.fromarray(
        tile_raster_images(
            X=rbm.W.get_value(borrow=True).T,
            img_shape=(28, 28),
            tile_shape=closest_to_square_factors(config_args['hiddens']),
            tile_spacing=(1, 1)
        )
    )
    image.save('rbm_weights.png')

    del mnist
    del rbm
    del optimizer
Ejemplo n.º 10
0
def run_midi(dataset):
    log.info("Creating RNN-RBM for dataset %s!", dataset)

    outdir = "outputs/rnnrbm/%s/" % dataset

    # grab the MIDI dataset
    if dataset == 'nottingham':
        midi = Nottingham()
    elif dataset == 'jsb':
        midi = JSBChorales()
    elif dataset == 'muse':
        midi = MuseData()
    elif dataset == 'piano_de':
        midi = PianoMidiDe()
    else:
        raise AssertionError("dataset %s not recognized." % dataset)

    # create the RNN-RBM
    # rng = numpy.random
    # rng.seed(0xbeef)
    # mrg = RandomStreams(seed=rng.randint(1 << 30))
    rng = numpy.random.RandomState(1234)
    mrg = RandomStreams(rng.randint(2**30))
    # rnnrbm = RNN_RBM(input_size=88,
    #                  hidden_size=150,
    #                  rnn_hidden_size=100,
    #                  k=15,
    #                  weights_init='gaussian',
    #                  weights_std=0.01,
    #                  rnn_weights_init='gaussian',
    #                  rnn_weights_std=0.0001,
    #                  rng=rng,
    #                  outdir=outdir)
    rnnrbm = RNN_RBM(
        input_size=88,
        hidden_size=150,
        rnn_hidden_size=100,
        k=15,
        weights_init='gaussian',
        weights_std=0.01,
        rnn_weights_init='identity',
        rnn_hidden_activation='relu',
        # rnn_weights_init='gaussian',
        # rnn_hidden_activation='tanh',
        rnn_weights_std=0.0001,
        mrg=mrg,
        outdir=outdir)

    # make an optimizer to train it
    optimizer = SGD(model=rnnrbm,
                    dataset=midi,
                    epochs=200,
                    batch_size=100,
                    min_batch_size=2,
                    learning_rate=.001,
                    save_freq=10,
                    stop_patience=200,
                    momentum=False,
                    momentum_decay=False,
                    nesterov_momentum=False)

    optimizer = AdaDelta(
        model=rnnrbm,
        dataset=midi,
        epochs=200,
        batch_size=100,
        min_batch_size=2,
        # learning_rate=1e-4,
        learning_rate=1e-6,
        save_freq=10,
        stop_patience=200)

    ll = Monitor('pseudo-log', rnnrbm.get_monitors()['pseudo-log'], test=True)
    mse = Monitor('frame-error',
                  rnnrbm.get_monitors()['mse'],
                  valid=True,
                  test=True)

    plot = Plot(bokeh_doc_name='rnnrbm_midi_%s' % dataset,
                monitor_channels=[ll, mse],
                open_browser=True)

    # perform training!
    optimizer.train(plot=plot)
    # use the generate function!
    generated, _ = rnnrbm.generate(initial=None, n_steps=200)

    dt = 0.3
    r = (21, 109)
    midiwrite(outdir + 'rnnrbm_generated_midi.mid', generated, r=r, dt=dt)

    if has_pylab:
        extent = (0, dt * len(generated)) + r
        pylab.figure()
        pylab.imshow(generated.T,
                     origin='lower',
                     aspect='auto',
                     interpolation='nearest',
                     cmap=pylab.cm.gray_r,
                     extent=extent)
        pylab.xlabel('time (s)')
        pylab.ylabel('MIDI note number')
        pylab.title('generated piano-roll')

    # Construct image from the weight matrix
    image = Image.fromarray(
        tile_raster_images(
            X=rnnrbm.W.get_value(borrow=True).T,
            img_shape=closest_to_square_factors(rnnrbm.input_size),
            tile_shape=closest_to_square_factors(rnnrbm.hidden_size),
            tile_spacing=(1, 1)))
    image.save(outdir + 'rnnrbm_midi_weights.png')

    log.debug("done!")
    del midi
    del rnnrbm
    del optimizer
Ejemplo n.º 11
0
def run_sequence(sequence=0):
    log.info("Creating RNN-RBM for sequence %d!" % sequence)

    # grab the MNIST dataset
    mnist = MNIST(sequence_number=sequence, concat_train_valid=True)
    outdir = "outputs/rnnrbm/mnist_%d/" % sequence
    # create the RNN-RBM
    rng = numpy.random.RandomState(1234)
    mrg = RandomStreams(rng.randint(2 ** 30))
    rnnrbm = RNN_RBM(input_size=28 * 28,
                     hidden_size=1000,
                     rnn_hidden_size=100,
                     k=15,
                     weights_init='uniform',
                     weights_interval=4 * numpy.sqrt(6. / (28 * 28 + 500)),
                     rnn_weights_init='identity',
                     rnn_hidden_activation='relu',
                     rnn_weights_std=1e-4,
                     mrg=mrg,
                     outdir=outdir)
    # load pretrained rbm on mnist
    # rnnrbm.load_params(outdir + 'trained_epoch_200.pkl')
    # make an optimizer to train it (AdaDelta is a good default)
    optimizer = AdaDelta(model=rnnrbm,
                         dataset=mnist,
                         n_epoch=200,
                         batch_size=100,
                         minimum_batch_size=2,
                         learning_rate=1e-8,
                         save_frequency=10,
                         early_stop_length=200)

    crossentropy = Monitor('crossentropy', rnnrbm.get_monitors()['crossentropy'], test=True)
    error = Monitor('error', rnnrbm.get_monitors()['mse'], test=True)
    plot = Plot(bokeh_doc_name='rnnrbm_mnist_%d' % sequence, monitor_channels=[crossentropy, error], open_browser=True)

    # perform training!
    optimizer.train(plot=plot)
    # use the generate function!
    log.debug("generating images...")
    generated, ut = rnnrbm.generate(initial=None, n_steps=400)



    # Construct image
    image = Image.fromarray(
        tile_raster_images(
            X=generated,
            img_shape=(28, 28),
            tile_shape=(20, 20),
            tile_spacing=(1, 1)
        )
    )
    image.save(outdir + "rnnrbm_mnist_generated.png")
    log.debug('saved generated.png')

    # Construct image from the weight matrix
    image = Image.fromarray(
        tile_raster_images(
            X=rnnrbm.W.get_value(borrow=True).T,
            img_shape=(28, 28),
            tile_shape=closest_to_square_factors(rnnrbm.hidden_size),
            tile_spacing=(1, 1)
        )
    )
    image.save(outdir + "rnnrbm_mnist_weights.png")

    log.debug("done!")

    del mnist
    del rnnrbm
    del optimizer
Ejemplo n.º 12
0
def run_sequence(sequence=0):
    log.info("Creating RNN-RBM for sequence %d!" % sequence)

    # grab the MNIST dataset
    mnist = MNIST(sequence_number=sequence, concat_train_valid=True)
    outdir = "outputs/rnnrbm/mnist_%d/" % sequence
    # create the RNN-RBM
    rng = numpy.random.RandomState(1234)
    mrg = RandomStreams(rng.randint(2**30))
    rnnrbm = RNN_RBM(input_size=28 * 28,
                     hidden_size=1000,
                     rnn_hidden_size=100,
                     k=15,
                     weights_init='uniform',
                     weights_interval=4 * numpy.sqrt(6. / (28 * 28 + 500)),
                     rnn_weights_init='identity',
                     rnn_hidden_activation='relu',
                     rnn_weights_std=1e-4,
                     mrg=mrg,
                     outdir=outdir)
    # load pretrained rbm on mnist
    # rnnrbm.load_params(outdir + 'trained_epoch_200.pkl')
    # make an optimizer to train it (AdaDelta is a good default)
    optimizer = AdaDelta(model=rnnrbm,
                         dataset=mnist,
                         n_epoch=200,
                         batch_size=100,
                         minimum_batch_size=2,
                         learning_rate=1e-8,
                         save_frequency=10,
                         early_stop_length=200)

    crossentropy = Monitor('crossentropy',
                           rnnrbm.get_monitors()['crossentropy'],
                           test=True)
    error = Monitor('error', rnnrbm.get_monitors()['mse'], test=True)
    plot = Plot(bokeh_doc_name='rnnrbm_mnist_%d' % sequence,
                monitor_channels=[crossentropy, error],
                open_browser=True)

    # perform training!
    optimizer.train(plot=plot)
    # use the generate function!
    log.debug("generating images...")
    generated, ut = rnnrbm.generate(initial=None, n_steps=400)

    # Construct image
    image = Image.fromarray(
        tile_raster_images(X=generated,
                           img_shape=(28, 28),
                           tile_shape=(20, 20),
                           tile_spacing=(1, 1)))
    image.save(outdir + "rnnrbm_mnist_generated.png")
    log.debug('saved generated.png')

    # Construct image from the weight matrix
    image = Image.fromarray(
        tile_raster_images(X=rnnrbm.W.get_value(borrow=True).T,
                           img_shape=(28, 28),
                           tile_shape=closest_to_square_factors(
                               rnnrbm.hidden_size),
                           tile_spacing=(1, 1)))
    image.save(outdir + "rnnrbm_mnist_weights.png")

    log.debug("done!")

    del mnist
    del rnnrbm
    del optimizer
Ejemplo n.º 13
0
    preds = rbm.run(test_data)

    # Construct image from the test matrix
    image = Image.fromarray(
        tile_raster_images(X=test_data,
                           img_shape=(28, 28),
                           tile_shape=(5, 5),
                           tile_spacing=(1, 1)))
    image.save('rbm_test.png')

    # Construct image from the preds matrix
    image = Image.fromarray(
        tile_raster_images(X=preds,
                           img_shape=(28, 28),
                           tile_shape=(5, 5),
                           tile_spacing=(1, 1)))
    image.save('rbm_preds.png')

    # Construct image from the weight matrix
    image = Image.fromarray(
        tile_raster_images(X=rbm.W.get_value(borrow=True).T,
                           img_shape=(28, 28),
                           tile_shape=closest_to_square_factors(
                               config_args['hidden_size']),
                           tile_spacing=(1, 1)))
    image.save('rbm_weights.png')

    del mnist
    del rbm
    del optimizer
Ejemplo n.º 14
0
def run_audio(dataset):
    log.info("Creating RNN-GSN for dataset %s!", dataset)

    outdir = "outputs/rnngsn/%s/" % dataset

    # grab the dataset
    if dataset == 'tedlium':
        dataset = tedlium.TEDLIUMDataset(max_speeches=3)
        extra_args = {
        }
    elif dataset == 'codegolf':
        dataset = codegolf.CodeGolfDataset()
        extra_args = {
        }
    else:
        raise ValueError("dataset %s not recognized." % dataset)

    rng = numpy.random.RandomState(1234)
    mrg = RandomStreams(rng.randint(2 ** 30))
    assert dataset.window_size == 256, dataset.window_size
    
    rnngsn = RNN_GSN(layers=2,
                     walkbacks=4,
                     input_size=dataset.window_size,
                     image_height = 1,
                     image_width = 256,
                     hidden_size=128,
                     rnn_hidden_size=128,
                     weights_init='gaussian',
                     weights_std=0.01,
                     rnn_weights_init='identity',
                     rnn_hidden_activation='relu',
                     rnn_weights_std=0.0001,
                     mrg=mrg,
                     outdir=outdir, **extra_args)

    # make an optimizer to train it
    optimizer = AdaDelta(model=rnngsn,
                         dataset=dataset,
                         epochs=200,
                         batch_size=128,
                         min_batch_size=2,
                         learning_rate=1e-6,
                         save_freq=1,
                         stop_patience=100)

    ll = Monitor('crossentropy', rnngsn.get_monitors()['noisy_recon_cost'],test=True)
    mse = Monitor('frame-error', rnngsn.get_monitors()['mse'],train=True,test=True,valid=True)
    plot = Plot(
        bokeh_doc_name='rnngsn_tedlium_%s'%dataset, monitor_channels=[ll,mse],open_browser=True
    )

    # perform training!
    optimizer.train(plot=plot)
    
    # use the generate function!
    generated, _ = rnngsn.generate(initial=None, n_steps=200)

    # Construct image from the weight matrix
    image = Image.fromarray(
        tile_raster_images(
            X=rnngsn.weights_list[0].get_value(borrow=True).T,
            img_shape=closest_to_square_factors(rnngsn.input_size),
            tile_shape=closest_to_square_factors(rnngsn.hidden_size),
            tile_spacing=(1, 1)
        )
    )
    image.save(outdir + 'rnngsn_%s_weights.png'%(dataset,))

    log.debug("done!")
    del rnngsn
    del optimizer

    if has_pylab:
        pylab.show()
Ejemplo n.º 15
0
    def __init__(self,
                 inputs_hook=None,
                 hiddens_hook=None,
                 params_hook=None,
                 outdir='outputs/gsn/',
                 input_size=None,
                 hidden_size=1000,
                 layers=2,
                 walkbacks=4,
                 visible_activation='sigmoid',
                 hidden_activation='tanh',
                 input_sampling=True,
                 mrg=RNG_MRG.MRG_RandomStreams(1),
                 tied_weights=True,
                 weights_init='uniform',
                 weights_interval='montreal',
                 weights_mean=0,
                 weights_std=5e-3,
                 bias_init=0.0,
                 cost_function='binary_crossentropy',
                 cost_args=None,
                 add_noise=True,
                 noiseless_h1=True,
                 hidden_noise='gaussian',
                 hidden_noise_level=2,
                 input_noise='salt_and_pepper',
                 input_noise_level=0.4,
                 noise_decay='exponential',
                 noise_annealing=1,
                 image_width=None,
                 image_height=None,
                 **kwargs):
        """
        Initialize a GSN.

        Parameters
        ----------
        inputs_hook : Tuple of (shape, variable)
            Routing information for the model to accept inputs from elsewhere. This is used for linking
            different models together (e.g. setting the Softmax model's input layer to the DAE's hidden layer gives a
            newly supervised classification model). For now, it needs to include the shape information (normally the
            dimensionality of the input i.e. n_in).
        hiddens_hook : Tuple of (shape, variable)
            Routing information for the model to accept its hidden representation from elsewhere.
            This is used for linking different models together (e.g. setting the DAE model's hidden layers to the RNN's
            output layer gives a generative recurrent model.) For now, it needs to include the shape
            information (normally the dimensionality of the hiddens i.e. n_hidden).
        params_hook : List(theano shared variable)
            A list of model parameters (shared theano variables) that you should use when constructing
            this model (instead of initializing your own shared variables). This parameter is useful when you want to
            have two versions of the model that use the same parameters - such as a training model with dropout applied
            to layers and one without for testing, where the parameters are shared between the two.
        outdir : str
            The directory you want outputs (parameters, images, etc.) to save to. If None, nothing will
            be saved.
        input_size : int
            The size (dimensionality) of the input to the DAE. If shape is provided in `inputs_hook`, this is optional.
            The :class:`Model` requires an `output_size`, which gets set to this value because the DAE is an
            unsupervised model. The output is a reconstruction of the input.
        hidden_size : int
            The size (dimensionality) of the hidden layer for the DAE. Generally, you want it to be larger than
            `input_size`, which is known as *overcomplete*.
        visible_activation : str or callable
            The nonlinear (or linear) visible activation to perform after the dot product from hiddens -> visible layer.
            This activation function should be appropriate for the input unit types, i.e. 'sigmoid' for binary inputs.
            See opendeep.utils.activation for a list of available activation functions. Alternatively, you can pass
            your own function to be used as long as it is callable.
        hidden_activation : str or callable
            The nonlinear (or linear) hidden activation to perform after the dot product from visible -> hiddens layer.
            See opendeep.utils.activation for a list of available activation functions. Alternatively, you can pass
            your own function to be used as long as it is callable.
        layers : int
            The number of hidden layers to use.
        walkbacks : int
            The number of walkbacks to perform (the variable K in Bengio's paper above). A walkback is a Gibbs sample
            from the DAE, which means the model generates inputs in sequence, where each generated input is compared
            to the original input to create the reconstruction cost for training. For running the model, the very last
            generated input in the Gibbs chain is used as the output.
        input_sampling : bool
            During walkbacks, whether to sample from the generated input to create a new starting point for the next
            walkback (next step in the Gibbs chain). This generally makes walkbacks more effective by making the
            process more stochastic - more likely to find spurious modes in the model's representation.
        mrg : random
            A random number generator that is used when adding noise into the network and for sampling from the input.
            I recommend using Theano's sandbox.rng_mrg.MRG_RandomStreams.
        tied_weights : bool
            DAE has two weight matrices - W from input -> hiddens and V from hiddens -> input. This boolean
            determines if V = W.T, which 'ties' V to W and reduces the number of parameters necessary during training.
        weights_init : str
            Determines the method for initializing model weights. See opendeep.utils.nnet for options.
        weights_interval : str or float
            If Uniform `weights_init`, the +- interval to use. See opendeep.utils.nnet for options.
        weights_mean : float
            If Gaussian `weights_init`, the mean value to use.
        weights_std : float
            If Gaussian `weights_init`, the standard deviation to use.
        bias_init : float
            The initial value to use for the bias parameter. Most often, the default of 0.0 is preferred.
        cost_function : str or callable
            The function to use when calculating the reconstruction cost of the model. This should be appropriate
            for the type of input, i.e. use 'binary_crossentropy' for binary inputs, or 'mse' for real-valued inputs.
            See opendeep.utils.cost for options. You can also specify your own function, which needs to be callable.
        cost_args : dict
            Any additional named keyword arguments to pass to the specified `cost_function`.
        add_noise : bool
            Whether to add noise (corrupt) the input before passing it through the computation graph during training.
            This should most likely be set to the default of True, because this is a *denoising* autoencoder after all.
        noiseless_h1 : bool
            Whether to not add noise (corrupt) the hidden layer during computation.
        hidden_noise : str
            What type of noise to use for corrupting the hidden layer (if not `noiseless_h1`). See opendeep.utils.noise
            for options. This should be appropriate for the hidden unit activation, i.e. Gaussian for tanh or other
            real-valued activations, etc.
        hidden_noise_level : float
            The amount of noise to use for the noise function specified by `hidden_noise`. This could be the
            standard deviation for gaussian noise, the interval for uniform noise, the dropout amount, etc.
        input_noise : str
            What type of noise to use for corrupting the input before computation (if `add_noise`).
            See opendeep.utils.noise for options. This should be appropriate for the input units, i.e. salt-and-pepper
            for binary units, etc.
        input_noise_level : float
            The amount of noise used to corrupt the input. This could be the masking probability for salt-and-pepper,
            standard deviation for Gaussian, interval for Uniform, etc.
        noise_decay : str or False
            Whether to use `input_noise` scheduling (decay `input_noise_level` during the course of training),
            and if so, the string input specifies what type of decay to use. See opendeep.utils.decay for options.
            Noise decay (known as noise scheduling) effectively helps the DAE learn larger variance features first,
            and then smaller ones later (almost as a kind of curriculum learning). May help it converge faster.
        noise_annealing : float
            The amount to reduce the `input_noise_level` after each training epoch based on the decay function specified
            in `noise_decay`.
        image_width : int
            If the input should be represented as an image, the width of the input image. If not specified, it will be
            close to the square factor of the `input_size`.
        image_height : int
            If the input should be represented as an image, the height of the input image. If not specified, it will be
            close to the square factor of the `input_size`.
        """
        # init Model to combine the defaults and config dictionaries with the initial parameters.
        initial_parameters = locals().copy()
        initial_parameters.pop('self')
        super(GSN, self).__init__(**initial_parameters)

        # when the input should be thought of as an image, either use the specified width and height,
        # or try to make as square as possible.
        if image_height is None and image_width is None:
            (_h, _w) = closest_to_square_factors(self.input_size)
            self.image_width = _w
            self.image_height = _h
        else:
            self.image_height = image_height
            self.image_width = image_width

        ############################
        # Theano variables and RNG #
        ############################
        if self.inputs_hook is None:
            self.X = T.matrix('X')
        else:
            # inputs_hook is a (shape, input) tuple
            self.X = self.inputs_hook[1]

        ##########################
        # Network specifications #
        ##########################
        # generally, walkbacks should be at least 2*layers
        if layers % 2 == 0:
            if walkbacks < 2 * layers:
                log.warning(
                    'Not enough walkbacks for the layers! Layers is %s and walkbacks is %s. '
                    'Generaly want 2X walkbacks to layers', str(layers),
                    str(walkbacks))
        else:
            if walkbacks < 2 * layers - 1:
                log.warning(
                    'Not enough walkbacks for the layers! Layers is %s and walkbacks is %s. '
                    'Generaly want 2X walkbacks to layers', str(layers),
                    str(walkbacks))

        self.add_noise = add_noise
        self.noise_annealing = as_floatX(
            noise_annealing)  # noise schedule parameter
        self.hidden_noise_level = sharedX(hidden_noise_level,
                                          dtype=theano.config.floatX)
        self.hidden_noise = get_noise(name=hidden_noise,
                                      noise_level=self.hidden_noise_level,
                                      mrg=mrg)
        self.input_noise_level = sharedX(input_noise_level,
                                         dtype=theano.config.floatX)
        self.input_noise = get_noise(name=input_noise,
                                     noise_level=self.input_noise_level,
                                     mrg=mrg)

        self.walkbacks = walkbacks
        self.tied_weights = tied_weights
        self.layers = layers
        self.noiseless_h1 = noiseless_h1
        self.input_sampling = input_sampling
        self.noise_decay = noise_decay

        # if there was a hiddens_hook, unpack the hidden layers in the tensor
        if self.hiddens_hook is not None:
            hidden_size = self.hiddens_hook[0]
            self.hiddens_flag = True
        else:
            self.hiddens_flag = False

        # determine the sizes of each layer in a list.
        #  layer sizes, from h0 to hK (h0 is the visible layer)
        hidden_size = list(raise_to_list(hidden_size))
        if len(hidden_size) == 1:
            self.layer_sizes = [self.input_size] + hidden_size * self.layers
        else:
            assert len(hidden_size) == self.layers, "Hiddens sizes and number of hidden layers mismatch." + \
                                                    "Hiddens %d and layers %d" % (len(hidden_size), self.layers)
            self.layer_sizes = [self.input_size] + hidden_size

        if self.hiddens_hook is not None:
            self.hiddens = self.unpack_hiddens(self.hiddens_hook[1])

        #########################
        # Activation functions! #
        #########################
        # hidden unit activation
        self.hidden_activation = get_activation_function(hidden_activation)
        # Visible layer activation
        self.visible_activation = get_activation_function(visible_activation)
        # make sure the sampling functions are appropriate for the activation functions.
        if is_binary(self.visible_activation):
            self.visible_sampling = mrg.binomial
        else:
            # TODO: implement non-binary activation
            log.error("Non-binary visible activation not supported yet!")
            raise NotImplementedError(
                "Non-binary visible activation not supported yet!")

        # Cost function
        self.cost_function = get_cost_function(cost_function)
        self.cost_args = cost_args or dict()

        ###############
        # Parameters! #
        ###############
        # make sure to deal with params_hook!
        if self.params_hook is not None:
            # if tied weights, expect layers*2 + 1 params
            if self.tied_weights:
                assert len(self.params_hook) == 2*layers + 1, \
                    "Tied weights: expected {0!s} params, found {1!s}!".format(2*layers+1, len(self.params_hook))
                self.weights_list = self.params_hook[:layers]
                self.bias_list = self.params_hook[layers:]
            # if untied weights, expect layers*3 + 1 params
            else:
                assert len(self.params_hook) == 3*layers + 1, \
                    "Untied weights: expected {0!s} params, found {1!s}!".format(3*layers+1, len(self.params_hook))
                self.weights_list = self.params_hook[:2 * layers]
                self.bias_list = self.params_hook[2 * layers:]
        # otherwise, construct our params
        else:
            # initialize a list of weights and biases based on layer_sizes for the GSN
            self.weights_list = [
                get_weights(
                    weights_init=weights_init,
                    shape=(self.layer_sizes[i], self.layer_sizes[i + 1]),
                    name="W_{0!s}_{1!s}".format(i, i + 1),
                    rng=mrg,
                    # if gaussian
                    mean=weights_mean,
                    std=weights_std,
                    # if uniform
                    interval=weights_interval) for i in range(layers)
            ]
            # add more weights if we aren't tying weights between layers (need to add for higher-lower layers now)
            if not tied_weights:
                self.weights_list.extend([
                    get_weights(
                        weights_init=weights_init,
                        shape=(self.layer_sizes[i + 1], self.layer_sizes[i]),
                        name="W_{0!s}_{1!s}".format(i + 1, i),
                        rng=mrg,
                        # if gaussian
                        mean=weights_mean,
                        std=weights_std,
                        # if uniform
                        interval=weights_interval)
                    for i in reversed(range(layers))
                ])
            # initialize each layer bias to 0's.
            self.bias_list = [
                get_bias(shape=(self.layer_sizes[i], ),
                         name='b_' + str(i),
                         init_values=bias_init) for i in range(layers + 1)
            ]

        # build the params of the model into a list
        self.params = self.weights_list + self.bias_list
        log.debug("gsn params: %s", str(self.params))

        # using the properties, build the computational graph
        self.cost, self.monitors, self.output, self.hiddens = self.build_computation_graph(
        )
Ejemplo n.º 16
0
def main():
    ########################################
    # Initialization things with arguments #
    ########################################
    # use these arguments to get results from paper referenced above
    _train_args = {"epochs": 1000,  # maximum number of times to run through the dataset
                   "batch_size": 100,  # number of examples to process in parallel (minibatch)
                   "min_batch_size": 1,  # the minimum number of examples for a batch to be considered
                   "save_freq": 1,  # how many epochs between saving parameters
                   "stop_threshold": .9995,  # multiplier for how much the train cost to improve to not stop early
                   "stop_patience": 500,  # how many epochs to wait to see if the threshold has been reached
                   "learning_rate": .25,  # initial learning rate for SGD
                   "lr_decay": 'exponential',  # the decay function to use for the learning rate parameter
                   "lr_decay_factor": .995,  # by how much to decay the learning rate each epoch
                   "momentum": 0.5,  # the parameter momentum amount
                   'momentum_decay': False,  # how to decay the momentum each epoch (if applicable)
                   'momentum_factor': 0,  # by how much to decay the momentum (in this case not at all)
                   'nesterov_momentum': False,  # whether to use nesterov momentum update (accelerated momentum)
    }

    config_root_logger()
    log.info("Creating a new GSN")

    mnist = MNIST(concat_train_valid=True)
    gsn = GSN(layers=2,
              walkbacks=4,
              hidden_size=1500,
              visible_activation='sigmoid',
              hidden_activation='tanh',
              input_size=28*28,
              tied_weights=True,
              hidden_add_noise_sigma=2,
              input_salt_and_pepper=0.4,
              outdir='outputs/test_gsn/',
              vis_init=False,
              noiseless_h1=True,
              input_sampling=True,
              weights_init='uniform',
              weights_interval='montreal',
              bias_init=0,
              cost_function='binary_crossentropy')

    recon_cost_channel = MonitorsChannel(name='cost')
    recon_cost_channel.add(Monitor('recon_cost', gsn.get_monitors()['recon_cost'], test=True))
    recon_cost_channel.add(Monitor('noisy_recon_cost', gsn.get_monitors()['noisy_recon_cost'], test=True))

    # Load initial weights and biases from file
    # params_to_load = '../../../outputs/gsn/mnist/trained_epoch_395.pkl'
    # gsn.load_params(params_to_load)

    optimizer = SGD(model=gsn, dataset=mnist, **_train_args)
    # optimizer = AdaDelta(model=gsn, dataset=mnist, epochs=200, batch_size=100, learning_rate=1e-6)
    optimizer.train(monitor_channels=recon_cost_channel)

    # Save some reconstruction output images
    n_examples = 100
    xs_test = mnist.test_inputs[:n_examples]
    noisy_xs_test = gsn.f_noise(xs_test)
    reconstructed = gsn.run(noisy_xs_test)
    # Concatenate stuff
    stacked = numpy.vstack(
        [numpy.vstack([xs_test[i * 10: (i + 1) * 10],
                       noisy_xs_test[i * 10: (i + 1) * 10],
                       reconstructed[i * 10: (i + 1) * 10]])
         for i in range(10)])
    number_reconstruction = PIL.Image.fromarray(
        tile_raster_images(stacked, (gsn.image_height, gsn.image_width), (10, 30))
    )

    number_reconstruction.save(gsn.outdir + 'reconstruction.png')
    log.info("saved output image!")

    # Construct image from the weight matrix
    image = PIL.Image.fromarray(
        tile_raster_images(
            X=gsn.weights_list[0].get_value(borrow=True).T,
            img_shape=(28, 28),
            tile_shape=closest_to_square_factors(gsn.hidden_size),
            tile_spacing=(1, 1)
        )
    )
    image.save(gsn.outdir + "gsn_mnist_weights.png")
Ejemplo n.º 17
0
def main():
    ########################################
    # Initialization things with arguments #
    ########################################
    # use these arguments to get results from paper referenced above
    _train_args = {"n_epoch": 1000,  # maximum number of times to run through the dataset
                   "batch_size": 100,  # number of examples to process in parallel (minibatch)
                   "minimum_batch_size": 1,  # the minimum number of examples for a batch to be considered
                   "save_frequency": 1,  # how many epochs between saving parameters
                   "early_stop_threshold": .9995,  # multiplier for how much the train cost to improve to not stop early
                   "early_stop_length": 500,  # how many epochs to wait to see if the threshold has been reached
                   "learning_rate": .25,  # initial learning rate for SGD
                   "lr_decay": 'exponential',  # the decay function to use for the learning rate parameter
                   "lr_factor": .995,  # by how much to decay the learning rate each epoch
                   "momentum": 0.5,  # the parameter momentum amount
                   'momentum_decay': False,  # how to decay the momentum each epoch (if applicable)
                   'momentum_factor': 0,  # by how much to decay the momentum (in this case not at all)
                   'nesterov_momentum': False,  # whether to use nesterov momentum update (accelerated momentum)
    }

    config_root_logger()
    log.info("Creating a new GSN")

    mnist = MNIST(concat_train_valid=True)
    gsn = GSN(layers=2,
              walkbacks=4,
              hidden_size=1500,
              visible_activation='sigmoid',
              hidden_activation='tanh',
              input_size=28*28,
              tied_weights=True,
              hidden_add_noise_sigma=2,
              input_salt_and_pepper=0.4,
              outdir='outputs/test_gsn/',
              vis_init=False,
              noiseless_h1=True,
              input_sampling=True,
              weights_init='uniform',
              weights_interval='montreal',
              bias_init=0,
              cost_function='binary_crossentropy')

    recon_cost_channel = MonitorsChannel(name='cost')
    recon_cost_channel.add(Monitor('recon_cost', gsn.get_monitors()['recon_cost'], test=True))
    recon_cost_channel.add(Monitor('noisy_recon_cost', gsn.get_monitors()['noisy_recon_cost'], test=True))

    # Load initial weights and biases from file
    # params_to_load = '../../../outputs/gsn/mnist/trained_epoch_395.pkl'
    # gsn.load_params(params_to_load)

    optimizer = SGD(model=gsn, dataset=mnist, **_train_args)
    # optimizer = AdaDelta(model=gsn, dataset=mnist, n_epoch=200, batch_size=100, learning_rate=1e-6)
    optimizer.train(monitor_channels=recon_cost_channel)

    # Save some reconstruction output images
    import opendeep.data.dataset as datasets
    n_examples = 100
    xs_test, _ = mnist.getSubset(datasets.TEST)
    xs_test = xs_test[:n_examples].eval()
    noisy_xs_test = gsn.f_noise(xs_test)
    reconstructed = gsn.run(noisy_xs_test)
    # Concatenate stuff
    stacked = numpy.vstack(
        [numpy.vstack([xs_test[i * 10: (i + 1) * 10],
                       noisy_xs_test[i * 10: (i + 1) * 10],
                       reconstructed[i * 10: (i + 1) * 10]])
         for i in range(10)])
    number_reconstruction = PIL.Image.fromarray(
        tile_raster_images(stacked, (gsn.image_height, gsn.image_width), (10, 30))
    )

    number_reconstruction.save(gsn.outdir + 'reconstruction.png')
    log.info("saved output image!")

    # Construct image from the weight matrix
    image = PIL.Image.fromarray(
        tile_raster_images(
            X=gsn.weights_list[0].get_value(borrow=True).T,
            img_shape=(28, 28),
            tile_shape=closest_to_square_factors(gsn.hidden_size),
            tile_spacing=(1, 1)
        )
    )
    image.save(gsn.outdir + "gsn_mnist_weights.png")