Пример #1
0
def train_model(img, input_mask, output_mask):
    # generate data
    inputs, outputs = generate_data_from_image(img, input_mask, output_mask,
                                               120000)

    # split data into training and validation sets
    data_train = inputs[:, :100000], outputs[:, :100000]
    data_valid = inputs[:, 100000:], outputs[:, 100000:]

    # compute normalizing transformation
    pre = WhiteningPreconditioner(*data_train)

    # intialize model
    model = MCGSM(dim_in=data_train[0].shape[0],
                  dim_out=data_train[1].shape[0],
                  num_components=8,
                  num_scales=4,
                  num_features=30)

    # fit parameters
    model.initialize(*pre(*data_train))
    model.train(*chain(pre(*data_train), pre(*data_valid)),
                parameters={
                    'verbosity': 1,
                    'max_iter': 1000,
                    'threshold': 1e-7,
                    'val_iter': 5,
                    'val_look_ahead': 10,
                    'num_grad': 20,
                })

    return model, pre
Пример #2
0
def train_model(img, input_mask, output_mask):
	# generate data
	inputs, outputs = generate_data_from_image(
		img, input_mask, output_mask, 120000)

	# split data into training and validation sets
	data_train = inputs[:, :100000], outputs[:, :100000]
	data_valid = inputs[:, 100000:], outputs[:, 100000:]

	# compute normalizing transformation
	pre = WhiteningPreconditioner(*data_train)

	# intialize model
	model = MCGSM(
		dim_in=data_train[0].shape[0],
		dim_out=data_train[1].shape[0],
		num_components=8,
		num_scales=4,
		num_features=30)

	# fit parameters
	model.initialize(*pre(*data_train))
	model.train(*chain(pre(*data_train), pre(*data_valid)),
		parameters={
			'verbosity': 1,
			'max_iter': 1000,
			'threshold': 1e-7,
			'val_iter': 5,
			'val_look_ahead': 10,
			'num_grad': 20,
		})

	return model, pre
Пример #3
0
def main(argv):
	# load image and turn into grayscale
	img = rgb2gray(imread('media/newyork.png'))

	# generate data
	inputs, outputs = generate_data_from_image(
		img, input_mask, output_mask, 220000)

	# split data into training, test, and validation sets
	inputs  = split(inputs,  [100000, 200000], 1)
	outputs = split(outputs, [100000, 200000], 1)

	data_train = inputs[0], outputs[0]
	data_test  = inputs[1], outputs[1]
	data_valid = inputs[2], outputs[2]

	# compute normalizing transformation
	pre = WhiteningPreconditioner(*data_train)

	# intialize model
	model = MCGSM(
		dim_in=data_train[0].shape[0],
		dim_out=data_train[1].shape[0],
		num_components=8,
		num_scales=4,
		num_features=32)

	# fit parameters
	model.initialize(*pre(*data_train))
	model.train(*chain(pre(*data_train), pre(*data_valid)),
		parameters={
			'verbosity': 1,
			'max_iter': 1000,
			'threshold': 1e-7,
			'val_iter': 5,
			'val_look_ahead': 10,
			'num_grad': 20,
		})

	# evaluate model
	print 'Average log-likelihood: {0:.4f} [bit/px]'.format(
			-model.evaluate(data_test[0], data_test[1], pre))

	# synthesize a new image
	img_sample = sample_image(img, model, input_mask, output_mask, pre)

	imwrite('newyork_sample.png', img_sample,
		cmap='gray',
		vmin=min(img),
		vmax=max(img))

	# save model
	with open('image_model.pck', 'wb') as handle:
		dump({
			'model': model,
			'input_mask': input_mask,
			'output_mask': output_mask}, handle, 1)

	return 0
def robust_linear_regression(x, y, num_scales=3, max_iter=1000):
    """
	Performs linear regression with Gaussian scale mixture residuals.
	$$y = ax + b + \\varepsilon,$$
	where $\\varepsilon$ is assumed to be Gaussian scale mixture distributed.
	@type  x: array_like
	@param x: list of one-dimensional inputs
	@type  y: array_like
	@param y: list of one-dimensional outputs
	@type  num_scales: int
	@param num_scales: number of Gaussian scale mixture components
	@type  max_iter: int
	@param max_iter: number of optimization steps in parameter search
	@rtype: tuple
	@return: slope and y-intercept
	"""

    x = asarray(x).reshape(1, -1)
    y = asarray(y).reshape(1, -1)

    # preprocess inputs
    m = mean(x)
    s = std(x)

    x = (x - m) / s

    # preprocess outputs using simple linear regression
    C = cov(x, y)
    a = C[0, 1] / C[0, 0]
    b = mean(y) - a * mean(x)

    y = y - (a * x + b)

    # robust linear regression
    model = MCGSM(dim_in=1,
                  dim_out=1,
                  num_components=1,
                  num_scales=num_scales,
                  num_features=0)

    model.initialize(x, y)
    model.train(x, y, parameters={'train_means': True, 'max_iter': max_iter})

    a = (a + float(model.predictors[0])) / s
    b = (b + float(model.means)) - a * m

    return a, b
Пример #5
0
def main(argv):
    # load image and turn into grayscale
    img = rgb2gray(imread('media/newyork.png'))

    # generate data
    inputs, outputs = generate_data_from_image(img, input_mask, output_mask,
                                               220000)

    # split data into training, test, and validation sets
    inputs = split(inputs, [100000, 200000], 1)
    outputs = split(outputs, [100000, 200000], 1)

    data_train = inputs[0], outputs[0]
    data_test = inputs[1], outputs[1]
    data_valid = inputs[2], outputs[2]

    # compute normalizing transformation
    pre = WhiteningPreconditioner(*data_train)

    # intialize model
    model = MCGSM(dim_in=data_train[0].shape[0],
                  dim_out=data_train[1].shape[0],
                  num_components=8,
                  num_scales=4,
                  num_features=32)

    # fit parameters
    model.initialize(*pre(*data_train))
    model.train(*chain(pre(*data_train), pre(*data_valid)),
                parameters={
                    'verbosity': 1,
                    'max_iter': 1000,
                    'threshold': 1e-7,
                    'val_iter': 5,
                    'val_look_ahead': 10,
                    'num_grad': 20,
                })

    # evaluate model
    print 'Average log-likelihood: {0:.4f} [bit/px]'.format(
        -model.evaluate(data_test[0], data_test[1], pre))

    # synthesize a new image
    img_sample = sample_image(img, model, input_mask, output_mask, pre)

    imwrite('newyork_sample.png',
            img_sample,
            cmap='gray',
            vmin=min(img),
            vmax=max(img))

    # save model
    with open('image_model.pck', 'wb') as handle:
        dump(
            {
                'model': model,
                'input_mask': input_mask,
                'output_mask': output_mask
            }, handle, 1)

    return 0