Пример #1
0
Файл: isa.py Проект: afcarl/isa
		def compute_map(X):
			"""
			Computes the MAP for Laplacian prior and Gaussian additive noise.
			"""

			AA = dot(A.T, A)
			Ax = dot(A.T, X)

			def f(y, i):
				y = y.reshape(-1, 1)
				return sum(square(X[:, [i]] - dot(A, y))) / (2. * noise_var) + beta * sum(log(1. + square(y / sigma)))

			def df(y, i):
				y = y.reshape(-1, 1)
				grad = (dot(AA, y) - Ax[:, [i]]) / noise_var + (2. * beta / sigma**2) * y / (1. + square(y / sigma))
				return grad.ravel()

			# initial hidden states
			Y = asshmarray(dot(A.T, X) / sum(square(A), 0).reshape(-1, 1))

			def parfor(i):
				Y[:, i] = fmin_cg(f, Y[:, i], df, (i,), disp=False, maxiter=100, gtol=tol)
			mapp(parfor, range(X.shape[1]))

			return Y
Пример #2
0
Файл: isa.py Проект: afcarl/isa
	def _sample_posterior_cond(self, Y, X, S, W, WX, Q):
		"""
		Samples posterior conditioned on scales.

		B{References:}
			- Doucet, A. (2010). I{A Note on Efficient Conditional Simulation of
			Gaussian Distributions.}
		"""

		# sample hidden states conditioned on scales
		Y_ = multiply(randn(self.num_hiddens, X.shape[1]), S)

		X_ = X - dot(self.A, Y_)

		# variances and incomplete covariance matrices
		v = square(S).reshape(-1, 1, X.shape[1])
		C = multiply(v, self.A.T.reshape(self.num_hiddens, -1, 1)).transpose([2, 0, 1]) # TODO: FIX MEMORY ISSUES

		# update hidden states
		Y = asshmarray(Y)
		def parfor(i):
			Y[:, i] = dot(C[i], solve(dot(self.A, C[i]), X_[:, i], sym_pos=True))
		mapp(parfor, range(X.shape[1]))

		return WX + dot(Q, Y + Y_)
Пример #3
0
	def loglikelihood(self, data):
		# allocate memory
		logjoint = shmarray.zeros([len(self), data.shape[1]])

		# compute joint density over components and data points
		def loglikelihood_(i):
			logjoint[i, :] = self[i].loglikelihood(data) + log(self.priors[i])
		mapp(loglikelihood_, range(len(self)))

		# marginalize
		return asarray(logsumexp(logjoint, 0)).flatten()
Пример #4
0
    def loglikelihood(self, data):
        # allocate memory
        logjoint = shmarray.zeros([len(self), data.shape[1]])

        # compute joint density over components and data points
        def loglikelihood_(i):
            logjoint[i, :] = self[i].loglikelihood(data) + log(self.priors[i])

        mapp(loglikelihood_, range(len(self)))

        # marginalize
        return asarray(logsumexp(logjoint, 0)).flatten()
Пример #5
0
def _preprocess(images, input_mask, output_mask):
    """
    Extract causal neighborhoods from images.

    @type  images: C{ndarray}/C{list}
    @param images: array or list of images to process

    @rtype: C{tuple}
    @return: one array storing inputs (neighborhoods) and one array storing outputs (pixels)
    """
    def process(image):
        inputs, outputs = generate_data_from_image(image, input_mask,
                                                   output_mask)
        inputs = asarray(inputs.T.reshape(
            image.shape[0] - input_mask.shape[0] + 1,
            image.shape[1] - input_mask.shape[1] + 1, -1),
                         dtype='float32')
        outputs = asarray(outputs.T.reshape(
            image.shape[0] - input_mask.shape[0] + 1,
            image.shape[1] - input_mask.shape[1] + 1, -1),
                          dtype='float32')
        return inputs, outputs

    inputs, outputs = zip(*mapp(process, images))

    return asarray(inputs), asarray(outputs)
Пример #6
0
def getDistance(labelMap):
    '''
    return a N*N martrix, the spatial distance(infinite norm) of any two super pixel 
    see paper Eq(2)
    
    '''
    m, n = labelMap.shape
    maxLabel = labelMap.max() + 1
    pos = []
    m, n = labelMap.shape
    for label in range(maxLabel):
        mask = labelMap == label
        x = (mask.sum(axis=0) * np.array(range(n))).sum() / float(mask.sum())
        y = (mask.sum(axis=1) * np.array(range(m))).sum() / float(mask.sum())
        pos += [(x, y)]

    def f_distance(_, i, j):
        if i == j:
            return 0.0
#        dis = ((pos[i][0]-pos[j][0])**2+(pos[i][1]-pos[j][1])**2)**0.5
        '''see paper Eq(2)'''
        dis = max([
            abs(pos[i][0] - pos[j][0]) / float(n),
            abs(pos[i][1] - pos[j][1]) / float(m)
        ])
        # infinity norm distance
        return dis

    distanceMa = mapp(f_distance,
                      np.zeros((maxLabel, maxLabel)),
                      need_i_j=True)
    #io.imshow(distanceMa[:,:])
    return distanceMa
Пример #7
0
	def logposterior(self, data):
		"""
		Computes the log-posterior distribution over components.

		@type  data: array_like
		@param data: data points stored in columns
		"""

		# allocate memory
		logpost = shmarray.zeros([len(self), data.shape[1]])

		# compute log-joint
		def logposterior_(i):
			logpost[i, :] = self[i].loglikelihood(data) + log(self.priors[i])
		mapp(logposterior_, range(len(self)))

		# normalize to get log-posterior
		logpost -= logsumexp(logpost, 0)

		return asarray(logpost)
Пример #8
0
    def logposterior(self, data):
        """
		Computes the log-posterior distribution over components.

		@type  data: array_like
		@param data: data points stored in columns
		"""

        # allocate memory
        logpost = shmarray.zeros([len(self), data.shape[1]])

        # compute log-joint
        def logposterior_(i):
            logpost[i, :] = self[i].loglikelihood(data) + log(self.priors[i])

        mapp(logposterior_, range(len(self)))

        # normalize to get log-posterior
        logpost -= logsumexp(logpost, 0)

        return asarray(logpost)
Пример #9
0
    def f(refinedImg):
        h, w = refinedImg.shape[:2]
        a, b, c, d = int(0.25 * h), int(0.75 * h), int(0.25 * w), int(0.75 * w)
        center = refinedImg[a:b, c:d].sum()
        ratio = float(center) / (refinedImg.sum() - center)

        m, n = refinedImg.shape
        distribut = 1. / mapp(
            lambda x, i, j: float(x) *
            ((i - m / 2)**2 + (j - n / 2)**2), refinedImg, True).sum()

        var = np.var(refinedImg)
        return ratio, distribut, var
Пример #10
0
def dc_component(images, patch_size):
    input_mask = ones([patch_size, patch_size], dtype=bool)
    output_mask = zeros([patch_size, patch_size], dtype=bool)

    num_samples_per_image = int(1000000. / images.shape[0] + 1.)

    def extract(image):
        patches = generate_data_from_image(image, input_mask, output_mask,
                                           num_samples_per_image)[0]
        return patches

    patches = vstack(mapp(extract, images))
    patches = patches.reshape(patches.shape[0], -1)

    return mean(patches, 1)[None, :]
Пример #11
0
def dc_component(images, patch_size):
	input_mask = ones([patch_size, patch_size], dtype=bool)
	output_mask = zeros([patch_size, patch_size], dtype=bool)

	num_samples_per_image = int(1000000. / images.shape[0] + 1.)

	def extract(image):
		patches = generate_data_from_image(image,
			input_mask,
			output_mask,
			num_samples_per_image)[0]
		return patches
	patches = vstack(mapp(extract, images))
	patches = patches.reshape(patches.shape[0], -1)

	return mean(patches, 1)[None, :]
Пример #12
0
Файл: isa.py Проект: afcarl/isa
	def train_prior(self, Y, **kwargs):
		"""
		Optimize parameters of the marginal distribution over the hidden variables.
		The parameters are fit to maximize the average log-likelihood of the
		columns in `Y`.

		@type  Y: array_like
		@param Y: hidden stats
		"""

		max_iter = kwargs.get('max_iter', 10)
		tol = kwargs.get('tol', 1e-7)

		offset = [0]
		for model in self.subspaces:
			offset.append(offset[-1] + model.dim)

		def parfor(i):
			model = self.subspaces[i]
			model.train(Y[offset[i]:offset[i] + model.dim], max_iter=max_iter, tol=tol)
			return model
		self.subspaces = mapp(parfor, range(len(self.subspaces)))
Пример #13
0
    def train(self, data, weights=None, num_epochs=100, threshold=1e-5):
        """
		Adapt the parameters of the model using expectation maximization (EM).

		@type  data: array_like
		@param data: data stored in columns

		@type  weights: array_like
		@param weights: an optional weight for every data point

		@type  num_epochs: integer
		@param num_epochs: maximum number of training epochs

		@type  threshold: float
		@param threshold: training stops if performance gain is below threshold
		"""

        if not self.initialized:
            # initialize components
            def initialize_(i):
                self.components[i].initialize(data)
                return self.components[i]

            self.components = mapp(initialize_,
                                   range(len(self)),
                                   max_processes=1)
            self.initialized = True

        # current performance
        value = self.evaluate(data)

        if Distribution.VERBOSITY >= 2:
            print 'Epoch 0\t', value

        for epoch in range(num_epochs):
            # compute posterior over components (E)
            post = exp(self.logposterior(data))
            post /= sum(post, 0)

            # incorporate conditional prior
            if weights is not None:
                post *= weights

            # adjust priors over components (M)
            self.priors = sum(post, 1)

            if self.alpha is not None:
                # regularization with Dirichlet prior
                self.priors += self.alpha - 1.
            self.priors /= sum(self.priors)

            # adjust components (M)
            def train_(i):
                self.components[i].train(data, weights=post[i, :])
                return self.components[i]

            self.components = mapp(train_, range(len(self)))

            # check for convergence
            new_value = self.evaluate(data)

            if Distribution.VERBOSITY >= 2:
                print 'Epoch ', epoch, '\t', new_value

            if value - new_value < threshold:
                if Distribution.VERBOSITY >= 1:
                    print 'Training converged...'
                return
            value = new_value

        if Distribution.VERBOSITY >= 1:
            print 'Training finished...'
Пример #14
0
def main(argv):
    experiment = Experiment()

    parser = ArgumentParser(argv[0], description=__doc__)
    parser.add_argument('--data',
                        '-d',
                        type=str,
                        default='data/vanhateren_deq2_train.mat')
    parser.add_argument('--num_data', '-N', type=int, default=1000000)
    parser.add_argument('--num_valid', '-V', type=int, default=200000)
    parser.add_argument('--input_size', '-i', type=int, default=9)
    parser.add_argument('--max_iter', '-I', type=int, default=3000)
    parser.add_argument('--num_components', '-c', type=int, default=128)
    parser.add_argument('--num_features', '-f', type=int, default=48)
    parser.add_argument('--num_scales', '-s', type=int, default=4)
    parser.add_argument('--verbosity', '-v', type=int, default=1)
    parser.add_argument('--output',
                        '-o',
                        type=str,
                        default='results/vanhateren_deq2/mcgsm.{0}.{1}.xpck')

    args = parser.parse_args(argv[1:])

    ### DATA HANDLING

    if args.verbosity > 0:
        print 'Loading data...'

    # load data
    images = loadmat(args.data)['data']

    # define causal neighborhood
    input_mask, output_mask = generate_masks(input_size=args.input_size,
                                             output_size=1)

    # extract causal neighborhoods
    num_samples = int((args.num_data + args.num_valid) / images.shape[0] + .9)

    def extract(image):
        return generate_data_from_image(image, input_mask, output_mask,
                                        num_samples)

    inputs, outputs = zip(*mapp(extract, images))
    inputs, outputs = hstack(inputs), hstack(outputs)

    inputs_train = inputs[:, :args.num_data]
    outputs_train = outputs[:, :args.num_data]
    inputs_valid = inputs[:, args.num_data:]
    outputs_valid = outputs[:, args.num_data:]

    if inputs_valid.size < 100:
        print 'Not enough data for validation.'
        inputs_valid = None
        outputs_valid = None

    ### MODEL TRAINING

    if args.verbosity > 0:
        print 'Preconditioning...'

    preconditioner = WhiteningPreconditioner(inputs_train, outputs_train)

    inputs_train, outputs_train = preconditioner(inputs_train, outputs_train)
    if inputs_valid is not None:
        inputs_valid, outputs_valid = preconditioner(inputs_valid,
                                                     outputs_valid)

    # free memory
    del inputs
    del outputs

    if args.verbosity > 0:
        print 'Training model...'

    model = MCGSM(dim_in=inputs_train.shape[0],
                  dim_out=outputs_train.shape[0],
                  num_components=args.num_components,
                  num_features=args.num_features,
                  num_scales=args.num_scales)

    def callback(i, mcgsm):
        experiment['args'] = args
        experiment['model'] = mcgsm
        experiment['preconditioner'] = preconditioner
        experiment['input_mask'] = input_mask
        experiment['output_mask'] = output_mask
        experiment.save(args.output)

    model.train(inputs_train,
                outputs_train,
                inputs_valid,
                outputs_valid,
                parameters={
                    'verbosity': args.verbosity,
                    'cb_iter': 500,
                    'callback': callback,
                    'max_iter': args.max_iter
                })

    ### SAVE RESULTS

    experiment['args'] = args
    experiment['model'] = model
    experiment['preconditioner'] = preconditioner
    experiment['input_mask'] = input_mask
    experiment['output_mask'] = output_mask
    experiment.save(args.output)

    return 0
Пример #15
0
    def train_prior(self, Y, **kwargs):
        def parfor(i):
            self.marginals[i].train(Y[[i]], **kwargs)
            self.marginals[i].normalize()

        mapp(parfor, range(self.dim))
Пример #16
0
Файл: isa.py Проект: afcarl/isa
	def loglikelihood(self, X, num_samples=10, method='biased', sampling_method=('ais', {'num_steps': 10}), **kwargs):
		"""
		Computes the log-likelihood (in nats) for a set of data samples. If the model is overcomplete,
		the log-likelihood is estimated using one of two importance sampling methods. The biased method
		tends to underestimate the log-likelihood. To get rid of the bias, use more samples.
		The unbiased method oftentimes suffers from extremely high variance and should be used with
		caution.

		@type  X: array_like
		@param X: a number of visible states stored in columns

		@type  method: string
		@param method: whether to use the 'biased' or 'unbiased' method

		@type  num_samples: integer
		@param num_samples: number of generated importance weights

		@type  sampling_method: tuple
		@param sampling_method: method and parameters to generate importance weights

		@type  return_all: boolean
		@param return_all: if true, return all important weights and don't average (default: False)

		@rtype: ndarray
		@return: the log-probability of each data point
		"""

		return_all = kwargs.get('return_all', False)

		if self.num_hiddens == self.num_visibles:
			return self.prior_loglikelihood(dot(inv(self.A), X)) - slogdet(self.A)[1]

		else:
			if method == 'biased':
				# sample importance weights
				log_is_weights = asshmarray(empty([num_samples, X.shape[1]]))
				def parfor(i):
					log_is_weights[i] = self.sample_posterior_ais(X, **sampling_method[1])[1]
				mapp(parfor, range(num_samples))

				if return_all:
					return asarray(log_is_weights)
				else:
					# average importance weights to get log-likelihoods
					return logmeanexp(log_is_weights, 0)

			elif method == 'unbiased':
				loglik = empty(X.shape[1])

				# sample importance weights
				log_is_weights = asshmarray(empty([num_samples, X.shape[1]]))
				def parfor(i):
					log_is_weights[i] = self.sample_posterior_ais(X, **sampling_method[1])[1]
				mapp(parfor, range(num_samples))

				# obtain an initial first guess using the biased method
				is_weights = exp(log_is_weights)
				is_mean = mean(is_weights, 0)
				is_var = var(is_weights, 0, ddof=1)

				# Taylor series expansion points
				c = (is_var + square(is_mean)) / is_mean

				# logarithmic series distribution parameters
				p = sqrt(is_var / (is_var + square(is_mean)))

				# sample "number of importance samples" for each data point
				num_samples = array([logseries(p_) for p_ in p], dtype='uint32')

				for k in range(1, max(num_samples) + 1):
					# data points for which to generate k importance weights
					indices = where(num_samples == k)[0]

					# sample importance weights
					if len(indices) > 0:
						log_is_weights = asshmarray(empty([k, len(indices)]))

						def parfor(i):
							log_is_weights[i] = self.sample_posterior_ais(X[:, indices], num_steps=num_steps)[1]
						mapp(parfor, range(k))

						# hyperparameter used for selected datapoints
						c_ = c[indices]
						p_ = p[indices]

						# unbiased estimate of log-likelihood
						loglik[indices] = log(c_) + log(1. - p_) * prod((c_ - exp(log_is_weights)) / (c_ * p_), 0)

				if return_all:
					return loglik
				else:
					return mean(loglik, 0).reshape(1, -1)

			else:
				raise NotImplementedError('Unknown method \'{0}\'.'.format(method))
Пример #17
0
	def train(self, data, weights=None, num_epochs=100, threshold=1e-5):
		"""
		Adapt the parameters of the model using expectation maximization (EM).

		@type  data: array_like
		@param data: data stored in columns

		@type  weights: array_like
		@param weights: an optional weight for every data point

		@type  num_epochs: integer
		@param num_epochs: maximum number of training epochs

		@type  threshold: float
		@param threshold: training stops if performance gain is below threshold
		"""

		if not self.initialized:
			# initialize components
			def initialize_(i):
				self.components[i].initialize(data)
				return self.components[i]
			self.components = mapp(initialize_, range(len(self)), max_processes=1)
			self.initialized = True

		# current performance
		value = self.evaluate(data)

		if Distribution.VERBOSITY >= 2:
			print 'Epoch 0\t', value

		for epoch in range(num_epochs):
			# compute posterior over components (E)
			post = exp(self.logposterior(data))
			post /= sum(post, 0)

			# incorporate conditional prior
			if weights is not None:
				post *= weights

			# adjust priors over components (M)
			self.priors = sum(post, 1)

			if self.alpha is not None:
				# regularization with Dirichlet prior
				self.priors += self.alpha - 1.
			self.priors /= sum(self.priors)

			# adjust components (M)
			def train_(i):
				self.components[i].train(data, weights=post[i, :])
				return self.components[i]
			self.components = mapp(train_, range(len(self)))

			# check for convergence
			new_value = self.evaluate(data)

			if Distribution.VERBOSITY >= 2:
				print 'Epoch ', epoch, '\t', new_value

			if value - new_value < threshold:
				if Distribution.VERBOSITY >= 1:
					print 'Training converged...'
				return
			value = new_value

		if Distribution.VERBOSITY >= 1:
			print 'Training finished...'
Пример #18
0
def main(argv):
	experiment = Experiment()

	parser = ArgumentParser(argv[0], description=__doc__)
	parser.add_argument('--data',           '-d', type=str, default='data/vanhateren_deq2_train.mat')
	parser.add_argument('--num_data',       '-N', type=int, default=1000000)
	parser.add_argument('--num_valid',      '-V', type=int, default=200000)
	parser.add_argument('--input_size',     '-i', type=int, default=9)
	parser.add_argument('--max_iter',       '-I', type=int, default=3000)
	parser.add_argument('--num_components', '-c', type=int, default=128)
	parser.add_argument('--num_features',   '-f', type=int, default=48)
	parser.add_argument('--num_scales',     '-s', type=int, default=4)
	parser.add_argument('--verbosity',      '-v', type=int, default=1)
	parser.add_argument('--output',         '-o', type=str, default='results/vanhateren_deq2/mcgsm.{0}.{1}.xpck')

	args = parser.parse_args(argv[1:])


	### DATA HANDLING

	if args.verbosity > 0:
		print 'Loading data...'

	# load data
	images = loadmat(args.data)['data']

	# define causal neighborhood
	input_mask, output_mask = generate_masks(input_size=args.input_size, output_size=1)

	# extract causal neighborhoods
	num_samples = int((args.num_data + args.num_valid) / images.shape[0] + .9)

	def extract(image):
		return generate_data_from_image(
			image, input_mask, output_mask, num_samples)

	inputs, outputs = zip(*mapp(extract, images))
	inputs, outputs = hstack(inputs), hstack(outputs)

	inputs_train = inputs[:, :args.num_data]
	outputs_train = outputs[:, :args.num_data]
	inputs_valid = inputs[:, args.num_data:]
	outputs_valid = outputs[:, args.num_data:]

	if inputs_valid.size < 100:
		print 'Not enough data for validation.'
		inputs_valid = None
		outputs_valid = None


	### MODEL TRAINING

	if args.verbosity > 0:
		print 'Preconditioning...'

	preconditioner = WhiteningPreconditioner(inputs_train, outputs_train)

	inputs_train, outputs_train = preconditioner(inputs_train, outputs_train)
	if inputs_valid is not None:
		inputs_valid, outputs_valid = preconditioner(inputs_valid, outputs_valid)

	# free memory
	del inputs
	del outputs

	if args.verbosity > 0:
		print 'Training model...'

	model = MCGSM(
		dim_in=inputs_train.shape[0],
		dim_out=outputs_train.shape[0],
		num_components=args.num_components,
		num_features=args.num_features,
		num_scales=args.num_scales)

	def callback(i, mcgsm):
		experiment['args'] = args
		experiment['model'] = mcgsm
		experiment['preconditioner'] = preconditioner
		experiment['input_mask'] = input_mask
		experiment['output_mask'] = output_mask
		experiment.save(args.output)

	model.train(
		inputs_train, outputs_train,
		inputs_valid, outputs_valid,
		parameters={
			'verbosity': args.verbosity,
			'cb_iter': 500,
			'callback': callback,
			'max_iter': args.max_iter})


	### SAVE RESULTS

	experiment['args'] = args
	experiment['model'] = model
	experiment['preconditioner'] = preconditioner
	experiment['input_mask'] = input_mask
	experiment['output_mask'] = output_mask
	experiment.save(args.output)

	return 0
Пример #19
0
	def train_prior(self, Y, **kwargs):
		def parfor(i):
			self.marginals[i].train(Y[[i]], **kwargs)
			self.marginals[i].normalize()
		mapp(parfor, range(self.dim))