Exemple #1
0
def main():
    ########################################
    # Initialization things with arguments #
    ########################################
    # use these arguments to get results from paper referenced above
    _train_args = {"epochs": 1000,  # maximum number of times to run through the dataset
                   "batch_size": 100,  # number of examples to process in parallel (minibatch)
                   "min_batch_size": 1,  # the minimum number of examples for a batch to be considered
                   "save_freq": 1,  # how many epochs between saving parameters
                   "stop_threshold": .9995,  # multiplier for how much the train cost to improve to not stop early
                   "stop_patience": 500,  # how many epochs to wait to see if the threshold has been reached
                   "learning_rate": .25,  # initial learning rate for SGD
                   "lr_decay": 'exponential',  # the decay function to use for the learning rate parameter
                   "lr_decay_factor": .995,  # by how much to decay the learning rate each epoch
                   "momentum": 0.5,  # the parameter momentum amount
                   'momentum_decay': False,  # how to decay the momentum each epoch (if applicable)
                   'momentum_factor': 0,  # by how much to decay the momentum (in this case not at all)
                   'nesterov_momentum': False,  # whether to use nesterov momentum update (accelerated momentum)
    }

    config_root_logger()
    log.info("Creating a new GSN")

    mnist = MNIST(concat_train_valid=True)
    gsn = GSN(layers=2,
              walkbacks=4,
              hidden_size=1500,
              visible_activation='sigmoid',
              hidden_activation='tanh',
              input_size=28*28,
              tied_weights=True,
              hidden_add_noise_sigma=2,
              input_salt_and_pepper=0.4,
              outdir='outputs/test_gsn/',
              vis_init=False,
              noiseless_h1=True,
              input_sampling=True,
              weights_init='uniform',
              weights_interval='montreal',
              bias_init=0,
              cost_function='binary_crossentropy')

    recon_cost_channel = MonitorsChannel(name='cost')
    recon_cost_channel.add(Monitor('recon_cost', gsn.get_monitors()['recon_cost'], test=True))
    recon_cost_channel.add(Monitor('noisy_recon_cost', gsn.get_monitors()['noisy_recon_cost'], test=True))

    # Load initial weights and biases from file
    # params_to_load = '../../../outputs/gsn/mnist/trained_epoch_395.pkl'
    # gsn.load_params(params_to_load)

    optimizer = SGD(model=gsn, dataset=mnist, **_train_args)
    # optimizer = AdaDelta(model=gsn, dataset=mnist, epochs=200, batch_size=100, learning_rate=1e-6)
    optimizer.train(monitor_channels=recon_cost_channel)

    # Save some reconstruction output images
    n_examples = 100
    xs_test = mnist.test_inputs[:n_examples]
    noisy_xs_test = gsn.f_noise(xs_test)
    reconstructed = gsn.run(noisy_xs_test)
    # Concatenate stuff
    stacked = numpy.vstack(
        [numpy.vstack([xs_test[i * 10: (i + 1) * 10],
                       noisy_xs_test[i * 10: (i + 1) * 10],
                       reconstructed[i * 10: (i + 1) * 10]])
         for i in range(10)])
    number_reconstruction = PIL.Image.fromarray(
        tile_raster_images(stacked, (gsn.image_height, gsn.image_width), (10, 30))
    )

    number_reconstruction.save(gsn.outdir + 'reconstruction.png')
    log.info("saved output image!")

    # Construct image from the weight matrix
    image = PIL.Image.fromarray(
        tile_raster_images(
            X=gsn.weights_list[0].get_value(borrow=True).T,
            img_shape=(28, 28),
            tile_shape=closest_to_square_factors(gsn.hidden_size),
            tile_spacing=(1, 1)
        )
    )
    image.save(gsn.outdir + "gsn_mnist_weights.png")
Exemple #2
0
    def _build_rnngsn(self):
        """
        Creates the updates and other return variables for the computation graph.

        Returns
        -------
        List
            the sample at the end of the computation graph, the train cost function, the train monitors,
            the computation updates, the generated visible list, the generated computation updates, the ending
            recurrent states
        """
        # For training, the deterministic recurrence is used to compute all the
        # {h_t, 1 <= t <= T} given Xs. Conditional GSNs can then be trained
        # in batches using those parameters.
        (u, h_ts), updates_train = theano.scan(fn=lambda x_t, u_tm1: self.recurrent_step(x_t, u_tm1),
                                               sequences=self.input,
                                               outputs_info=[self.u0, None],
                                               name="rnngsn_computation_scan")

        h_list = [T.zeros_like(self.input)]
        for layer, w in enumerate(self.weights_list[:self.layers]):
            if layer % 2 != 0:
                h_list.append(T.zeros_like(T.dot(h_list[-1], w)))
            else:
                h_list.append((h_ts.T[(layer/2) * self.hidden_size:(layer/2 + 1) * self.hidden_size]).T)

        gsn = GSN(
            inputs_hook        = (self.input_size, self.input),
            hiddens_hook       = (self.hidden_size, GSN.pack_hiddens(h_list)),
            params_hook        = self.gsn_params,
            outdir             = os.path.join(self.outdir, 'gsn_noisy/'),
            layers             = self.layers,
            walkbacks          = self.walkbacks,
            visible_activation = self.visible_activation_func,
            hidden_activation  = self.hidden_activation_func,
            input_sampling     = self.input_sampling,
            mrg                = self.mrg,
            tied_weights       = self.tied_weights,
            cost_function      = self.cost_function,
            cost_args          = self.cost_args,
            add_noise          = self.add_noise,
            noiseless_h1       = self.noiseless_h1,
            hidden_noise       = self.hidden_noise,
            hidden_noise_level = self.hidden_noise_level,
            input_noise        = self.input_noise,
            input_noise_level  = self.input_noise_level,
            noise_decay        = self.noise_decay,
            noise_annealing    = self.noise_annealing,
            image_width        = self.image_width,
            image_height       = self.image_height
        )

        cost = gsn.get_train_cost()
        monitors = gsn.get_monitors()  # frame-level error would be the 'mse' monitor from GSN
        x_sample_recon = gsn.get_outputs()

        # symbolic loop for sequence generation
        (x_ts, u_ts), updates_generate = theano.scan(lambda u_tm1: self.recurrent_step(None, u_tm1),
                                                     outputs_info=[None, self.generate_u0],
                                                     n_steps=self.n_steps,
                                                     name="rnngsn_generate_scan")

        return x_sample_recon, cost, monitors, updates_train, x_ts, updates_generate, u_ts[-1]
Exemple #3
0
    def _build_rnngsn(self):
        """
        Creates the updates and other return variables for the computation graph.

        Returns
        -------
        List
            the sample at the end of the computation graph, the train cost function, the train monitors,
            the computation updates, the generated visible list, the generated computation updates, the ending
            recurrent states
        """
        # For training, the deterministic recurrence is used to compute all the
        # {h_t, 1 <= t <= T} given Xs. Conditional GSNs can then be trained
        # in batches using those parameters.
        (u, h_ts), updates_train = theano.scan(fn=lambda x_t, u_tm1: self.recurrent_step(x_t, u_tm1),
                                               sequences=self.input,
                                               outputs_info=[self.u0, None],
                                               name="rnngsn_computation_scan")

        h_list = [T.zeros_like(self.input)]
        for layer, w in enumerate(self.weights_list[:self.layers]):
            if layer % 2 != 0:
                h_list.append(T.zeros_like(T.dot(h_list[-1], w)))
            else:
                h_list.append((h_ts.T[(layer/2) * self.hidden_size:(layer/2 + 1) * self.hidden_size]).T)

        gsn = GSN(
            inputs_hook        = (self.input_size, self.input),
            input_size         = self.input_size,
            hiddens_hook       = (self.hidden_size, GSN.pack_hiddens(h_list)),
            params_hook        = self.gsn_params,
            outdir             = os.path.join(self.outdir, 'gsn_noisy/'),
            layers             = self.layers,
            walkbacks          = self.walkbacks,
            visible_activation = self.visible_activation_func,
            hidden_activation  = self.hidden_activation_func,
            input_sampling     = self.input_sampling,
            mrg                = self.mrg,
            tied_weights       = self.tied_weights,
            cost_function      = self.cost_function,
            cost_args          = self.cost_args,
            add_noise          = self.add_noise,
            noiseless_h1       = self.noiseless_h1,
            hidden_noise       = self.hidden_noise,
            hidden_noise_level = self.hidden_noise_level,
            input_noise        = self.input_noise,
            input_noise_level  = self.input_noise_level,
            noise_decay        = self.noise_decay,
            noise_annealing    = self.noise_annealing,
            image_width        = self.image_width,
            image_height       = self.image_height
        )

        cost = gsn.get_train_cost()
        monitors = gsn.get_monitors()  # frame-level error would be the 'mse' monitor from GSN
        x_sample_recon = gsn.get_outputs()

        # symbolic loop for sequence generation
        (x_ts, u_ts), updates_generate = theano.scan(lambda u_tm1: self.recurrent_step(None, u_tm1),
                                                     outputs_info=[None, self.generate_u0],
                                                     n_steps=self.n_steps,
                                                     name="rnngsn_generate_scan")

        return x_sample_recon, cost, monitors, updates_train, x_ts, updates_generate, u_ts[-1]
Exemple #4
0
def main():
    ########################################
    # Initialization things with arguments #
    ########################################
    # use these arguments to get results from paper referenced above
    _train_args = {"n_epoch": 1000,  # maximum number of times to run through the dataset
                   "batch_size": 100,  # number of examples to process in parallel (minibatch)
                   "minimum_batch_size": 1,  # the minimum number of examples for a batch to be considered
                   "save_frequency": 1,  # how many epochs between saving parameters
                   "early_stop_threshold": .9995,  # multiplier for how much the train cost to improve to not stop early
                   "early_stop_length": 500,  # how many epochs to wait to see if the threshold has been reached
                   "learning_rate": .25,  # initial learning rate for SGD
                   "lr_decay": 'exponential',  # the decay function to use for the learning rate parameter
                   "lr_factor": .995,  # by how much to decay the learning rate each epoch
                   "momentum": 0.5,  # the parameter momentum amount
                   'momentum_decay': False,  # how to decay the momentum each epoch (if applicable)
                   'momentum_factor': 0,  # by how much to decay the momentum (in this case not at all)
                   'nesterov_momentum': False,  # whether to use nesterov momentum update (accelerated momentum)
    }

    config_root_logger()
    log.info("Creating a new GSN")

    mnist = MNIST(concat_train_valid=True)
    gsn = GSN(layers=2,
              walkbacks=4,
              hidden_size=1500,
              visible_activation='sigmoid',
              hidden_activation='tanh',
              input_size=28*28,
              tied_weights=True,
              hidden_add_noise_sigma=2,
              input_salt_and_pepper=0.4,
              outdir='outputs/test_gsn/',
              vis_init=False,
              noiseless_h1=True,
              input_sampling=True,
              weights_init='uniform',
              weights_interval='montreal',
              bias_init=0,
              cost_function='binary_crossentropy')

    recon_cost_channel = MonitorsChannel(name='cost')
    recon_cost_channel.add(Monitor('recon_cost', gsn.get_monitors()['recon_cost'], test=True))
    recon_cost_channel.add(Monitor('noisy_recon_cost', gsn.get_monitors()['noisy_recon_cost'], test=True))

    # Load initial weights and biases from file
    # params_to_load = '../../../outputs/gsn/mnist/trained_epoch_395.pkl'
    # gsn.load_params(params_to_load)

    optimizer = SGD(model=gsn, dataset=mnist, **_train_args)
    # optimizer = AdaDelta(model=gsn, dataset=mnist, n_epoch=200, batch_size=100, learning_rate=1e-6)
    optimizer.train(monitor_channels=recon_cost_channel)

    # Save some reconstruction output images
    import opendeep.data.dataset as datasets
    n_examples = 100
    xs_test, _ = mnist.getSubset(datasets.TEST)
    xs_test = xs_test[:n_examples].eval()
    noisy_xs_test = gsn.f_noise(xs_test)
    reconstructed = gsn.run(noisy_xs_test)
    # Concatenate stuff
    stacked = numpy.vstack(
        [numpy.vstack([xs_test[i * 10: (i + 1) * 10],
                       noisy_xs_test[i * 10: (i + 1) * 10],
                       reconstructed[i * 10: (i + 1) * 10]])
         for i in range(10)])
    number_reconstruction = PIL.Image.fromarray(
        tile_raster_images(stacked, (gsn.image_height, gsn.image_width), (10, 30))
    )

    number_reconstruction.save(gsn.outdir + 'reconstruction.png')
    log.info("saved output image!")

    # Construct image from the weight matrix
    image = PIL.Image.fromarray(
        tile_raster_images(
            X=gsn.weights_list[0].get_value(borrow=True).T,
            img_shape=(28, 28),
            tile_shape=closest_to_square_factors(gsn.hidden_size),
            tile_spacing=(1, 1)
        )
    )
    image.save(gsn.outdir + "gsn_mnist_weights.png")