コード例 #1
1
ファイル: mcgsm_test.py プロジェクト: jakirkham/cmt
	def test_mogsm(self):
		mcgsm = MCGSM(
			dim_in=0,
			dim_out=3,
			num_components=2,
			num_scales=2,
			num_features=0)

		p0 = 0.3
		p1 = 0.7
		N = 20000
		m0 = array([[2], [0], [0]])
		m1 = array([[0], [2], [1]])
		C0 = cov(randn(mcgsm.dim_out, mcgsm.dim_out**2))
		C1 = cov(randn(mcgsm.dim_out, mcgsm.dim_out**2))
		input = zeros([0, N])
		output = hstack([
			dot(cholesky(C0), randn(mcgsm.dim_out, round(p0 * N))) + m0,
			dot(cholesky(C1), randn(mcgsm.dim_out, round(p1 * N))) + m1]) * (rand(1, N) + 0.5)

		mcgsm.train(input, output, parameters={
			'verbosity': 0,
			'max_iter': 10,
			'train_means': True})

		mogsm = MoGSM(3, 2, 2)

		# translate parameters from MCGSM to MoGSM
		mogsm.priors = sum(exp(mcgsm.priors), 1) / sum(exp(mcgsm.priors))

		for k in range(mogsm.num_components):
			mogsm[k].mean = mcgsm.means[:, k]
			mogsm[k].covariance = inv(dot(mcgsm.cholesky_factors[k], mcgsm.cholesky_factors[k].T))
			mogsm[k].scales = exp(mcgsm.scales[k, :])
			mogsm[k].priors = exp(mcgsm.priors[k, :]) / sum(exp(mcgsm.priors[k, :]))

		self.assertAlmostEqual(mcgsm.evaluate(input, output), mogsm.evaluate(output), 5)

		mogsm_samples = mogsm.sample(N)
		mcgsm_samples = mcgsm.sample(input)

		# generated samples should have the same distribution
		for i in range(mogsm.dim):
			self.assertTrue(ks_2samp(mogsm_samples[i], mcgsm_samples[0]) > 0.0001)
			self.assertTrue(ks_2samp(mogsm_samples[i], mcgsm_samples[1]) > 0.0001)
			self.assertTrue(ks_2samp(mogsm_samples[i], mcgsm_samples[2]) > 0.0001)

		posterior = mcgsm.posterior(input, mcgsm_samples)

		# average posterior should correspond to prior
		for k in range(mogsm.num_components):
			self.assertLess(abs(1 - mean(posterior[k]) / mogsm.priors[k]), 0.1)
コード例 #2
0
ファイル: images.py プロジェクト: cajal/cmt
def main(argv):
	# load image and turn into grayscale
	img = rgb2gray(imread('media/newyork.png'))

	# generate data
	inputs, outputs = generate_data_from_image(
		img, input_mask, output_mask, 220000)

	# split data into training, test, and validation sets
	inputs  = split(inputs,  [100000, 200000], 1)
	outputs = split(outputs, [100000, 200000], 1)

	data_train = inputs[0], outputs[0]
	data_test  = inputs[1], outputs[1]
	data_valid = inputs[2], outputs[2]

	# compute normalizing transformation
	pre = WhiteningPreconditioner(*data_train)

	# intialize model
	model = MCGSM(
		dim_in=data_train[0].shape[0],
		dim_out=data_train[1].shape[0],
		num_components=8,
		num_scales=4,
		num_features=32)

	# fit parameters
	model.initialize(*pre(*data_train))
	model.train(*chain(pre(*data_train), pre(*data_valid)),
		parameters={
			'verbosity': 1,
			'max_iter': 1000,
			'threshold': 1e-7,
			'val_iter': 5,
			'val_look_ahead': 10,
			'num_grad': 20,
		})

	# evaluate model
	print 'Average log-likelihood: {0:.4f} [bit/px]'.format(
			-model.evaluate(data_test[0], data_test[1], pre))

	# synthesize a new image
	img_sample = sample_image(img, model, input_mask, output_mask, pre)

	imwrite('newyork_sample.png', img_sample,
		cmap='gray',
		vmin=min(img),
		vmax=max(img))

	# save model
	with open('image_model.pck', 'wb') as handle:
		dump({
			'model': model,
			'input_mask': input_mask,
			'output_mask': output_mask}, handle, 1)

	return 0
コード例 #3
0
ファイル: mcgsm_test.py プロジェクト: jakirkham/cmt
	def test_evaluate(self):
		mcgsm = MCGSM(5, 3, 4, 2, 10)

		inputs = randn(mcgsm.dim_in, 100)
		outputs = mcgsm.sample(inputs)

		pre = WhiteningPreconditioner(inputs, outputs)

		loglik1 = -mcgsm.evaluate(inputs, outputs, pre)
		loglik2 = (mcgsm.loglikelihood(*pre(inputs, outputs)).mean() 
			+ pre.logjacobian(inputs, outputs).mean()) / log(2.) / mcgsm.dim_out

		self.assertAlmostEqual(loglik1, loglik2, 8)
コード例 #4
0
    def test_evaluate(self):
        mcgsm = MCGSM(5, 3, 4, 2, 10)

        inputs = randn(mcgsm.dim_in, 100)
        outputs = mcgsm.sample(inputs)

        pre = WhiteningPreconditioner(inputs, outputs)

        loglik1 = -mcgsm.evaluate(inputs, outputs, pre)
        loglik2 = (
            mcgsm.loglikelihood(*pre(inputs, outputs)).mean() +
            pre.logjacobian(inputs, outputs).mean()) / log(2.) / mcgsm.dim_out

        self.assertAlmostEqual(loglik1, loglik2, 8)
コード例 #5
0
    def test_mogsm(self):
        mcgsm = MCGSM(dim_in=0,
                      dim_out=3,
                      num_components=2,
                      num_scales=2,
                      num_features=0)

        p0 = 0.3
        p1 = 0.7
        N = 20000
        m0 = array([[2], [0], [0]])
        m1 = array([[0], [2], [1]])
        C0 = cov(randn(mcgsm.dim_out, mcgsm.dim_out**2))
        C1 = cov(randn(mcgsm.dim_out, mcgsm.dim_out**2))
        input = zeros([0, N])
        output = hstack([
            dot(cholesky(C0), randn(mcgsm.dim_out, round(p0 * N))) + m0,
            dot(cholesky(C1), randn(mcgsm.dim_out, round(p1 * N))) + m1
        ]) * (rand(1, N) + 0.5)

        mcgsm.train(input,
                    output,
                    parameters={
                        'verbosity': 0,
                        'max_iter': 10,
                        'train_means': True
                    })

        mogsm = MoGSM(3, 2, 2)

        # translate parameters from MCGSM to MoGSM
        mogsm.priors = sum(exp(mcgsm.priors), 1) / sum(exp(mcgsm.priors))

        for k in range(mogsm.num_components):
            mogsm[k].mean = mcgsm.means[:, k]
            mogsm[k].covariance = inv(
                dot(mcgsm.cholesky_factors[k], mcgsm.cholesky_factors[k].T))
            mogsm[k].scales = exp(mcgsm.scales[k, :])
            mogsm[k].priors = exp(mcgsm.priors[k, :]) / sum(
                exp(mcgsm.priors[k, :]))

        self.assertAlmostEqual(mcgsm.evaluate(input, output),
                               mogsm.evaluate(output), 5)

        mogsm_samples = mogsm.sample(N)
        mcgsm_samples = mcgsm.sample(input)

        # generated samples should have the same distribution
        for i in range(mogsm.dim):
            self.assertTrue(
                ks_2samp(mogsm_samples[i], mcgsm_samples[0]) > 0.0001)
            self.assertTrue(
                ks_2samp(mogsm_samples[i], mcgsm_samples[1]) > 0.0001)
            self.assertTrue(
                ks_2samp(mogsm_samples[i], mcgsm_samples[2]) > 0.0001)

        posterior = mcgsm.posterior(input, mcgsm_samples)

        # average posterior should correspond to prior
        for k in range(mogsm.num_components):
            self.assertLess(abs(1 - mean(posterior[k]) / mogsm.priors[k]), 0.1)
コード例 #6
0
ファイル: images.py プロジェクト: ominux/cmt
def main(argv):
    # load image and turn into grayscale
    img = rgb2gray(imread('media/newyork.png'))

    # generate data
    inputs, outputs = generate_data_from_image(img, input_mask, output_mask,
                                               220000)

    # split data into training, test, and validation sets
    inputs = split(inputs, [100000, 200000], 1)
    outputs = split(outputs, [100000, 200000], 1)

    data_train = inputs[0], outputs[0]
    data_test = inputs[1], outputs[1]
    data_valid = inputs[2], outputs[2]

    # compute normalizing transformation
    pre = WhiteningPreconditioner(*data_train)

    # intialize model
    model = MCGSM(dim_in=data_train[0].shape[0],
                  dim_out=data_train[1].shape[0],
                  num_components=8,
                  num_scales=4,
                  num_features=32)

    # fit parameters
    model.initialize(*pre(*data_train))
    model.train(*chain(pre(*data_train), pre(*data_valid)),
                parameters={
                    'verbosity': 1,
                    'max_iter': 1000,
                    'threshold': 1e-7,
                    'val_iter': 5,
                    'val_look_ahead': 10,
                    'num_grad': 20,
                })

    # evaluate model
    print 'Average log-likelihood: {0:.4f} [bit/px]'.format(
        -model.evaluate(data_test[0], data_test[1], pre))

    # synthesize a new image
    img_sample = sample_image(img, model, input_mask, output_mask, pre)

    imwrite('newyork_sample.png',
            img_sample,
            cmap='gray',
            vmin=min(img),
            vmax=max(img))

    # save model
    with open('image_model.pck', 'wb') as handle:
        dump(
            {
                'model': model,
                'input_mask': input_mask,
                'output_mask': output_mask
            }, handle, 1)

    return 0