Esempio n. 1
0
	def test_gradient(self):
		mcbm = MCBM(5, 2, 10)

		# choose random parameters
		mcbm._set_parameters(randn(*mcbm._parameters().shape))

		err = mcbm._check_gradient(
			randint(2, size=[mcbm.dim_in, 1000]),
			randint(2, size=[mcbm.dim_out, 1000]), 1e-5)
		self.assertLess(err, 1e-8)

		# test with regularization turned off
		for param in ['priors', 'weights', 'features', 'pred', 'input_bias', 'output_bias']:
			err = mcbm._check_gradient(
				randint(2, size=[mcbm.dim_in, 1000]),
				randint(2, size=[mcbm.dim_out, 1000]),
				1e-5,
				parameters={
					'train_prior': param == 'priors',
					'train_weights': param == 'weights',
					'train_features': param == 'features',
					'train_predictors': param == 'pred',
					'train_input_bias': param == 'input_bias',
					'train_output_bias': param == 'output_bias',
				})
			self.assertLess(err, 1e-7)

		# test with regularization turned on
		for norm in ['L1', 'L2']:
			for param in ['priors', 'weights', 'features', 'pred', 'input_bias', 'output_bias']:
				err = mcbm._check_gradient(
					randint(2, size=[mcbm.dim_in, 1000]),
					randint(2, size=[mcbm.dim_out, 1000]),
					1e-7,
					parameters={
						'train_prior': param == 'priors',
						'train_weights': param == 'weights',
						'train_features': param == 'features',
						'train_predictors': param == 'pred',
						'train_input_bias': param == 'input_bias',
						'train_output_bias': param == 'output_bias',
						'regularize_features': {'strength': 0.5, 'norm': norm},
						'regularize_predictors': {'strength': 0.5, 'norm': norm},
						'regularize_weights': {'strength': 0.5, 'norm': norm},
					})
				self.assertLess(err, 1e-6)

		self.assertFalse(any(isnan(
			mcbm._parameter_gradient(
				randint(2, size=[mcbm.dim_in, 1000]),
				randint(2, size=[mcbm.dim_out, 1000]),
				mcbm._parameters()))))
Esempio n. 2
0
	def test_train(self):
		mcbm = MCBM(8, 4, 20)

		parameters = mcbm._parameters()

		mcbm.train(
			randint(2, size=[mcbm.dim_in, 2000]),
			randint(2, size=[mcbm.dim_out, 2000]),
			parameters={
				'verbosity': 0,
				'max_iter': 0,
				})

		# parameters should not have changed
		self.assertLess(max(abs(mcbm._parameters() - parameters)), 1e-20)

		def callback(i, mcbm):
			return

		mcbm.train(
			randint(2, size=[mcbm.dim_in, 10000]),
			randint(2, size=[mcbm.dim_out, 10000]),
			parameters={
				'verbosity': 0,
				'max_iter': 10,
				'threshold': 0.,
				'batch_size': 1999,
				'callback': callback,
				'cb_iter': 1,
				})
Esempio n. 3
0
	def test_patchmcbm_train(self):
		xmask = ones([2, 2], dtype='bool')
		ymask = zeros([2, 2], dtype='bool')
		xmask[-1, -1] = False
		ymask[-1, -1] = True

		model = PatchMCBM(2, 2, xmask, ymask, model=MCBM(sum(xmask), 1, 1))

		# checkerboard
		data = array([[0, 1], [1, 0]], dtype='bool').reshape(-1, 1)
		data = tile(data, (1, 1000)) ^ (randn(1, 1000) > .5)

		model.initialize(data)

		# training should converge in much less than 2000 iterations
		self.assertTrue(model.train(data, parameters={'max_iter': 2000}))
		
		samples = model.sample(1000) > .5
		samples ^= samples[0]

		# less than 1 percent should have wrong pattern
		self.assertLess(mean(0 - samples[0]), 0.01)
		self.assertLess(mean(1 - samples[1]), 0.01)
		self.assertLess(mean(1 - samples[2]), 0.01)
		self.assertLess(mean(0 - samples[3]), 0.01)
Esempio n. 4
0
    def test_gradient(self):
        mcbm = MCBM(5, 2, 10)

        # choose random parameters
        mcbm._set_parameters(randn(*mcbm._parameters().shape))

        err = mcbm._check_gradient(randint(2, size=[mcbm.dim_in, 1000]), randint(2, size=[mcbm.dim_out, 1000]), 1e-5)
        self.assertLess(err, 1e-8)

        # test with regularization turned off
        for param in ["priors", "weights", "features", "pred", "input_bias", "output_bias"]:
            err = mcbm._check_gradient(
                randint(2, size=[mcbm.dim_in, 1000]),
                randint(2, size=[mcbm.dim_out, 1000]),
                1e-5,
                parameters={
                    "train_prior": param == "priors",
                    "train_weights": param == "weights",
                    "train_features": param == "features",
                    "train_predictors": param == "pred",
                    "train_input_bias": param == "input_bias",
                    "train_output_bias": param == "output_bias",
                },
            )
            self.assertLess(err, 1e-7)

            # test with regularization turned on
        for norm in ["L1", "L2"]:
            for param in ["priors", "weights", "features", "pred", "input_bias", "output_bias"]:
                err = mcbm._check_gradient(
                    randint(2, size=[mcbm.dim_in, 1000]),
                    randint(2, size=[mcbm.dim_out, 1000]),
                    1e-7,
                    parameters={
                        "train_prior": param == "priors",
                        "train_weights": param == "weights",
                        "train_features": param == "features",
                        "train_predictors": param == "pred",
                        "train_input_bias": param == "input_bias",
                        "train_output_bias": param == "output_bias",
                        "regularize_features": {"strength": 0.5, "norm": norm},
                        "regularize_predictors": {"strength": 0.5, "norm": norm},
                        "regularize_weights": {"strength": 0.5, "norm": norm},
                    },
                )
                self.assertLess(err, 1e-6)

        self.assertFalse(
            any(
                isnan(
                    mcbm._parameter_gradient(
                        randint(2, size=[mcbm.dim_in, 1000]), randint(2, size=[mcbm.dim_out, 1000]), mcbm._parameters()
                    )
                )
            )
        )
Esempio n. 5
0
	def test_patchmcbm(self):
		xmask = ones([8, 8], dtype='bool')
		ymask = zeros([8, 8], dtype='bool')
		xmask[-1, -1] = False
		ymask[-1, -1] = True

		model = PatchMCBM(8, 8, xmask, ymask, model=MCBM(sum(xmask), 1))

		self.assertLess(max(abs(model.input_mask() - xmask)), 1e-8)
		self.assertLess(max(abs(model.output_mask() - ymask)), 1e-8)

		for i in range(8):
			for j in range(8):
				self.assertEqual(model[i, j].dim_in, (i + 1) * (j + 1) - 1)
				self.assertTrue(isinstance(model[i, j], MCBM))

		# random pixel ordering
		rows, cols = 7, 5
		order = [(i // cols, i % cols) for i in permutation(rows * cols)]

		model = PatchMCBM(rows, cols, xmask, ymask, order, MCBM(sum(xmask), 1))

		self.assertLess(max(abs(model.input_mask() - xmask)), 1e-8)
		self.assertLess(max(abs(model.output_mask() - ymask)), 1e-8)

		for i in range(rows):
			for j in range(cols):
				self.assertEqual(model.input_mask(i, j).sum(), model[i, j].dim_in)

		# test constructors
		model0 = PatchMCBM(rows, cols, max_pcs=3)
		model1 = PatchMCBM(rows, cols, model0.input_mask(), model0.output_mask(), model0.order)

		self.assertLess(max(abs(model0.input_mask() - model1.input_mask())), 1e-8)
		self.assertLess(max(abs(model0.output_mask() - model1.output_mask())), 1e-8)
		self.assertLess(max(abs(asarray(model0.order) - asarray(model1.order))), 1e-8)

		# test computation of input masks
		model = PatchMCBM(rows, cols, order, max_pcs=3)

		i, j = model0.order[0]
		input_mask = model.input_mask(i, j)
		for i, j in model.order[1:]:
			self.assertEqual(sum(model.input_mask(i, j) - input_mask), 1)
			input_mask = model.input_mask(i, j)
Esempio n. 6
0
    def test_train(self):
        mcbm = MCBM(8, 4, 20)

        parameters = mcbm._parameters()

        mcbm.train(
            randint(2, size=[mcbm.dim_in, 2000]),
            randint(2, size=[mcbm.dim_out, 2000]),
            parameters={"verbosity": 0, "max_iter": 0},
        )

        # parameters should not have changed
        self.assertLess(max(abs(mcbm._parameters() - parameters)), 1e-20)

        def callback(i, mcbm):
            return

        mcbm.train(
            randint(2, size=[mcbm.dim_in, 10000]),
            randint(2, size=[mcbm.dim_out, 10000]),
            parameters={
                "verbosity": 0,
                "max_iter": 10,
                "threshold": 0.0,
                "batch_size": 1999,
                "callback": callback,
                "cb_iter": 1,
            },
        )
Esempio n. 7
0
	def test_basics(self):
		dim_in = 10
		num_components = 7
		num_features = 50
		num_samples = 100

		# create model
		mcbm = MCBM(dim_in, num_components, num_features)

		# generate output
		input = randint(2, size=[dim_in, num_samples])
		output = mcbm.sample(input)
		loglik = mcbm.loglikelihood(input, output)
		samples = mcbm.sample_posterior(input, output)

		# check hyperparameters
		self.assertEqual(mcbm.dim_in, dim_in)
		self.assertEqual(mcbm.num_components, num_components)
		self.assertEqual(mcbm.num_features, num_features)
	
		# check parameters
		self.assertEqual(mcbm.priors.shape[0], num_components)
		self.assertEqual(mcbm.priors.shape[1], 1)
		self.assertEqual(mcbm.weights.shape[0], num_components)
		self.assertEqual(mcbm.weights.shape[1], num_features)
		self.assertEqual(mcbm.features.shape[0], dim_in)
		self.assertEqual(mcbm.features.shape[1], num_features)
		self.assertEqual(mcbm.predictors.shape[0], num_components)
		self.assertEqual(mcbm.predictors.shape[1], dim_in)
		self.assertEqual(mcbm.input_bias.shape[0], dim_in)
		self.assertEqual(mcbm.input_bias.shape[1], num_components)
		self.assertEqual(mcbm.output_bias.shape[0], num_components)
		self.assertEqual(mcbm.output_bias.shape[1], 1)

		# check dimensionality of output
		self.assertEqual(output.shape[0], 1)
		self.assertEqual(output.shape[1], num_samples)
		self.assertEqual(loglik.shape[0], 1)
		self.assertEqual(loglik.shape[1], num_samples)
		self.assertLess(max(samples), mcbm.num_components)
		self.assertGreaterEqual(min(samples), 0)
		self.assertEqual(samples.shape[0], 1)
		self.assertEqual(samples.shape[1], num_samples)
Esempio n. 8
0
    def test_basics(self):
        dim_in = 10
        num_components = 7
        num_features = 50
        num_samples = 100

        # create model
        mcbm = MCBM(dim_in, num_components, num_features)

        # generate output
        input = randint(2, size=[dim_in, num_samples])
        output = mcbm.sample(input)
        loglik = mcbm.loglikelihood(input, output)
        samples = mcbm.sample_posterior(input, output)

        # check hyperparameters
        self.assertEqual(mcbm.dim_in, dim_in)
        self.assertEqual(mcbm.num_components, num_components)
        self.assertEqual(mcbm.num_features, num_features)

        # check parameters
        self.assertEqual(mcbm.priors.shape[0], num_components)
        self.assertEqual(mcbm.priors.shape[1], 1)
        self.assertEqual(mcbm.weights.shape[0], num_components)
        self.assertEqual(mcbm.weights.shape[1], num_features)
        self.assertEqual(mcbm.features.shape[0], dim_in)
        self.assertEqual(mcbm.features.shape[1], num_features)
        self.assertEqual(mcbm.predictors.shape[0], num_components)
        self.assertEqual(mcbm.predictors.shape[1], dim_in)
        self.assertEqual(mcbm.input_bias.shape[0], dim_in)
        self.assertEqual(mcbm.input_bias.shape[1], num_components)
        self.assertEqual(mcbm.output_bias.shape[0], num_components)
        self.assertEqual(mcbm.output_bias.shape[1], 1)

        # check dimensionality of output
        self.assertEqual(output.shape[0], 1)
        self.assertEqual(output.shape[1], num_samples)
        self.assertEqual(loglik.shape[0], 1)
        self.assertEqual(loglik.shape[1], num_samples)
        self.assertLess(max(samples), mcbm.num_components)
        self.assertGreaterEqual(min(samples), 0)
        self.assertEqual(samples.shape[0], 1)
        self.assertEqual(samples.shape[1], num_samples)
Esempio n. 9
0
	def test_patchmcbm_subscript(self):
		xmask = ones([2, 2], dtype='bool')
		ymask = zeros([2, 2], dtype='bool')
		xmask[-1, -1] = False
		ymask[-1, -1] = True

		model = PatchMCBM(2, 2, xmask, ymask)

		mcbm = MCBM(model[0, 1].dim_in, 12, 47)

		model[0, 1] = mcbm

		self.assertEqual(model[0, 1].num_components, mcbm.num_components)
		self.assertEqual(model[0, 1].num_features, mcbm.num_features)

		def wrong_assign():
			model[1, 1] = 'string'

		self.assertRaises(TypeError, wrong_assign)
Esempio n. 10
0
	def test_pickle(self):
		mcbm0 = MCBM(11, 4, 21)

		tmp_file = mkstemp()[1]

		# store model
		with open(tmp_file, 'w') as handle:
			dump({'mcbm': mcbm0}, handle)

		# load model
		with open(tmp_file) as handle:
			mcbm1 = load(handle)['mcbm']

		# make sure parameters haven't changed
		self.assertEqual(mcbm0.dim_in, mcbm1.dim_in)
		self.assertEqual(mcbm0.num_components, mcbm1.num_components)
		self.assertEqual(mcbm0.num_features, mcbm1.num_features)

		self.assertLess(max(abs(mcbm0.priors - mcbm1.priors)), 1e-20)
		self.assertLess(max(abs(mcbm0.weights - mcbm1.weights)), 1e-20)
		self.assertLess(max(abs(mcbm0.features - mcbm1.features)), 1e-20)
		self.assertLess(max(abs(mcbm0.predictors - mcbm1.predictors)), 1e-20)
		self.assertLess(max(abs(mcbm0.input_bias - mcbm1.input_bias)), 1e-20)
		self.assertLess(max(abs(mcbm0.output_bias - mcbm1.output_bias)), 1e-20)