Exemple #1
0
            def iter_update(self, epoch, nb_batches, iter_update_batch):
                status = super(MyBatchOptimizer, self).iter_update(epoch, nb_batches, iter_update_batch)
                #status["reconstruction_error"] = self.model.reconstruction_error_function(X)
                #status["lower_bound_train"] = self.model.get_likelihood_lower_bound(X_train)
                status["lower_bound_validation"] = self.model.get_likelihood_lower_bound(X_valid)
                status["log_likelihood_validation"] = self.model.log_likelihood_approximation_function(X_valid)
                for k, v in status.items():
                    light.append(k, float(v))
                
                if epoch % 10 == 0:

                    if save_codes is True:
                        plt.clf()

                        fig = plt.gcf()
                        code, code_sigma = self.model.encode(X_valid)
                        pca = PCA(n_components=2)
                        code_2d = pca.fit_transform(code)
                        if y is None:
                            plt.scatter(code_2d[:, 0], code_2d[:, 1])
                        else:
                            plt.scatter(code_2d[:, 0], code_2d[:, 1], c=y_valid)
                        fig.canvas.draw()
                        data = fig_to_list(fig)
                        light.append("codes", light.insert_blob(data))

                    if save_samples is True:
                        fig = plt.gcf()
                        X_ = self.model.sample(100, only_means=True)
                        X_ = get_2d_square_image_view(X_)
                        light.append("samples", light.insert_blob(X_.tolist()))

                    if learning_trajectory is True:
                        points, _ = self.model.encode(X[0:200])
                        points = points.ravel().tolist()
                        light.append("trajectories", {"points": points, "epoch": epoch, "seed": state})
                    
                    if save_hidden_activations is True:
                        samples = range(0, 200)
                        hidden = []
                        for layer in self.model.encoder_layers:
                            h = layer(X[samples])
                            h = h.tolist()
                            hidden.append(h)
                        light.append("hidden_activations_encoder", light.insert_blob(hidden))

                return status
Exemple #2
0
                                       optimization_procedure=(updates.adadelta, {"learning_rate" : 1.}),
                                       batch_size=256,
                                       whole_dataset_in_device=True,
                                       verbose=1)
    rng = RandomStreams(seed=1000)
    #noise_function = lambda X_batch: X_batch * rng.binomial(size=X_batch.shape, p=0.7)
    noise_function = None
    model = autoencoder.Autoencoder(nnet_x_to_z, nnet_z_to_x, batch_optimizer, noise_function=noise_function, walkback=1)
    model.fit(X)

    """
    conv_filters = h.W.get_value()
    conv_filters = conv_filters.reshape( (conv_filters.shape[0], conv_filters.shape[2], conv_filters.shape[3]))
    grid_plot(conv_filters, imshow_options={"cmap": "gray"})
    plt.savefig('out-filters-conv.png')
    plt.show()
    """

    filters = h.W.get_value().T
    filters = get_2d_square_image_view(filters)
    grid_plot(filters, imshow_options={"cmap":"gray"})
    plt.savefig("out-filters.png")
    plt.show()

    plt.clf()
    samples = model.sample(nb=100, nb_iterations=10000)
    samples = get_2d_square_image_view(samples)
    grid_plot(samples, imshow_options={"cmap": "gray"})
    plt.savefig('out-samples.png')
    plt.show()
Exemple #3
0
def test():
    state = 10
    rng = rng_mrg.MRG_RandomStreams(seed=state)
    x_dim = 64
    h_dim = 100
    
    x_in = layers.InputLayer((None, x_dim))
    l_out = Layer(x_in, num_units=h_dim)
    model_encoder = LightweightModel([x_in], [l_out])

    h_in = layers.InputLayer((None, h_dim))
    l_out = Layer(h_in, num_units=x_dim)
    model_decoder = LightweightModel([h_in], [l_out])

    h_in = layers.InputLayer((None, 1))
    l_out = Layer(h_in, num_units=h_dim)
    model_prior = LightweightModel([h_in], [l_out])

    def loss_function(model, tensors):
        X = tensors["X"]
        (h, u), = model_encoder.get_output(X, mode="sample", rng=rng)
        #print(X.ndim, h.ndim)
        (q_h_given_x, _), = model_encoder.get_output(X, mode="evaluate", on=h)
        (p_x_given_h, _),  = model_decoder.get_output(h, mode="evaluate", on=X)
        ones = T.alloc(np.cast[theano.config.floatX](1.), *h.shape)
        zeros = T.alloc(np.cast[theano.config.floatX](0.), *h.shape)
        (p_h, _), = model_prior.get_output(zeros[:, 0:1], mode="evaluate", on=ones)
        L = -((T.log(p_x_given_h).sum(axis=1) + T.log(p_h).sum(axis=1) - T.log(q_h_given_x).sum(axis=1)))
        return (L.mean()), u

    input_variables = OrderedDict(
            X=dict(tensor_type=T.matrix),
    )
    
    functions = dict(
    )

    # sample function
    nb = T.iscalar()
    sample_input = T.alloc(np.cast[theano.config.floatX](0.), nb, 1) 
    from theano.updates import OrderedUpdates
    u = OrderedUpdates()
    (s_h, u_h), = model_prior.get_output(sample_input, mode="sample", rng=rng)
    ones = T.alloc(np.cast[theano.config.floatX](1.), s_h.shape[0], x_dim)
    (s_x, u_x), = model_decoder.get_output(s_h, mode="evaluate", on=ones)
    sample = theano.function([nb], s_x, updates=u_x)
    batch_optimizer = BatchOptimizer(verbose=1, max_nb_epochs=100,
            batch_size=256,
            optimization_procedure=(updates.rmsprop, {"learning_rate": 0.0001})
    )
    class Container(object):
        def __init__(self, models):
            self.models = models
        def get_all_params(self):
            return [p for m in self.models for p in m.get_all_params()]
    models = [model_encoder, model_decoder]
    models = Container(models)
    darn = Capsule(input_variables, 
                   models,
                   loss_function,
                   functions=functions,
                   batch_optimizer=batch_optimizer)

    from sklearn.datasets import load_digits
    def build_digits():
        digits = load_digits()
        X = digits.data
        X = X.astype(np.float32) / 16.
        return X, digits.images.shape[1:]

    X, imshape = build_digits()
    darn.fit(X=X)

    s=(sample(20))
    s = get_2d_square_image_view(s)
    grid_plot(s, imshow_options={"cmap": "gray"})
    plt.savefig("out.png")
Exemple #4
0
    input_variables = OrderedDict(
            X=dict(tensor_type=T.matrix),
    )

    functions = dict(
           sample=dict(
               get_output=lambda model, X: model.get_output(X, sampler=True)[0],
               params=["X"]
           ),
           log_likelihood=dict(
               get_output=lambda model, X: T.log(model.get_output(X)[0]).sum(axis=1),
               params=["X"]
           ),
    )

    batch_optimizer = BatchOptimizer(verbose=1, max_nb_epochs=2,
            batch_size=256,
            optimization_procedure=(updates.rmsprop, {"learning_rate": 0.001})
    )
    nade = Capsule(input_variables, model,
                   loss_function,
                   functions=functions,
                   batch_optimizer=batch_optimizer)
    nade.fit(X=X)
    T = np.ones((100, x_dim)).astype(np.float32)
    T = T[:, order]
    s = get_2d_square_image_view(nade.sample(T))
    grid_plot(s, imshow_options={"cmap": "gray"})
    plt.savefig("out.png")