def test_sample_image(self): xmask = asarray([ [1, 1], [1, 0]], dtype='bool') ymask = asarray([ [0, 0], [0, 1]], dtype='bool') img_init = asarray([ [1., 2.], [3., 4.]]) model = MCGSM(3, 1) img_sample = sample_image(img_init, model, xmask, ymask) # only the bottom right-pixel should have been replaced self.assertLess(max(abs((img_init - img_sample).ravel()[:3])), 1e-10) # test using preconditioner wt = WhiteningPreconditioner(randn(3, 1000), randn(1, 1000)) sample_image(img_init, model, xmask, ymask, wt) # test what happens if invalid preconditioner is given self.assertRaises(TypeError, sample_image, (img_init, model, xmask, ymask, 10.)) self.assertRaises(TypeError, sample_image, (img_init, model, xmask, ymask, model))
def main(argv): # load image and turn into grayscale img = rgb2gray(imread('media/newyork.png')) # generate data inputs, outputs = generate_data_from_image( img, input_mask, output_mask, 220000) # split data into training, test, and validation sets inputs = split(inputs, [100000, 200000], 1) outputs = split(outputs, [100000, 200000], 1) data_train = inputs[0], outputs[0] data_test = inputs[1], outputs[1] data_valid = inputs[2], outputs[2] # compute normalizing transformation pre = WhiteningPreconditioner(*data_train) # intialize model model = MCGSM( dim_in=data_train[0].shape[0], dim_out=data_train[1].shape[0], num_components=8, num_scales=4, num_features=32) # fit parameters model.initialize(*pre(*data_train)) model.train(*chain(pre(*data_train), pre(*data_valid)), parameters={ 'verbosity': 1, 'max_iter': 1000, 'threshold': 1e-7, 'val_iter': 5, 'val_look_ahead': 10, 'num_grad': 20, }) # evaluate model print 'Average log-likelihood: {0:.4f} [bit/px]'.format( -model.evaluate(data_test[0], data_test[1], pre)) # synthesize a new image img_sample = sample_image(img, model, input_mask, output_mask, pre) imwrite('newyork_sample.png', img_sample, cmap='gray', vmin=min(img), vmax=max(img)) # save model with open('image_model.pck', 'wb') as handle: dump({ 'model': model, 'input_mask': input_mask, 'output_mask': output_mask}, handle, 1) return 0
def main(argv): # load image and turn into grayscale img = rgb2ycc(imread('media/newyork.png')) # generate masks for grayscale and color model, respectively input_mask0, output_mask0 = generate_masks(7, 1) input_mask1, output_mask1 = generate_masks([5, 7, 7], 1, [1, 0, 0]) # train model model0, pre0 = train_model(img[:, :, 0], input_mask0, output_mask0) model1, pre1 = train_model(img, input_mask1, output_mask1) # synthesize a new image img_sample = img.copy() # sample intensities img_sample[:, :, 0] = sample_image(img_sample[:, :, 0], model0, input_mask0, output_mask0, pre0) # sample color img_sample = sample_image(img_sample, model1, input_mask1, output_mask1, pre1) # convert back to RGB and enforce constraints img_sample = ycc2rgb(img_sample) imwrite('newyork_sample.png', img_sample, vmin=0, vmax=255) # save model with open('image_model.pck', 'wb') as handle: dump( { 'model0': model0, 'model1': model1, 'input_mask0': input_mask0, 'input_mask1': input_mask1, 'output_mask0': output_mask0, 'output_mask1': output_mask1 }, handle, 1) return 0
def test_sample_image(self): xmask = asarray([[1, 1], [1, 0]], dtype='bool') ymask = asarray([[0, 0], [0, 1]], dtype='bool') img_init = asarray([[1., 2.], [3., 4.]]) model = MCGSM(3, 1) img_sample = sample_image(img_init, model, xmask, ymask) # only the bottom right-pixel should have been replaced self.assertLess(max(abs((img_init - img_sample).ravel()[:3])), 1e-10) # test using preconditioner wt = WhiteningPreconditioner(randn(3, 1000), randn(1, 1000)) sample_image(img_init, model, xmask, ymask, wt) # test what happens if invalid preconditioner is given self.assertRaises(TypeError, sample_image, (img_init, model, xmask, ymask, 10.)) self.assertRaises(TypeError, sample_image, (img_init, model, xmask, ymask, model))
def main(argv): # load image and turn into grayscale img = rgb2ycc(imread('media/newyork.png')) # generate masks for grayscale and color model, respectively input_mask0, output_mask0 = generate_masks(7, 1) input_mask1, output_mask1 = generate_masks([5, 7, 7], 1, [1, 0, 0]) # train model model0, pre0 = train_model(img[:, :, 0], input_mask0, output_mask0) model1, pre1 = train_model(img, input_mask1, output_mask1) # synthesize a new image img_sample = img.copy() # sample intensities img_sample[:, :, 0] = sample_image( img_sample[:, :, 0], model0, input_mask0, output_mask0, pre0) # sample color img_sample = sample_image( img_sample, model1, input_mask1, output_mask1, pre1) # convert back to RGB and enforce constraints img_sample = ycc2rgb(img_sample) imwrite('newyork_sample.png', img_sample, vmin=0, vmax=255) # save model with open('image_model.pck', 'wb') as handle: dump({ 'model0': model0, 'model1': model1, 'input_mask0': input_mask0, 'input_mask1': input_mask1, 'output_mask0': output_mask0, 'output_mask1': output_mask1}, handle, 1) return 0
def main(argv): experiment = Experiment() parser = ArgumentParser(argv[0], description=__doc__) parser.add_argument('--model', '-m', type=str, required=True) parser.add_argument('--data', '-d', type=str, default='data/deadleaves_train.mat') parser.add_argument('--width', '-W', type=int, default=512) parser.add_argument('--height', '-H', type=int, default=512) parser.add_argument('--crop', '-C', type=int, default=16) parser.add_argument('--log', '-L', type=int, default=0) parser.add_argument('--output', '-o', type=str, default='results/sample.png') args = parser.parse_args(argv[1:]) images = loadmat(args.data)['data'] vmin = percentile(images, 0.02) vmax = percentile(images, 98.) experiment = Experiment(args.model) img = empty([args.height + args.crop, args.width + 2 * args.crop]) img.ravel()[:] = images.ravel()[random_select(img.size, images.size)] img = sample_image(img, experiment['model'], experiment['input_mask'], experiment['output_mask'], experiment['preconditioner'], min_value=vmin, max_value=vmax) if args.log: # linearize and gamma-correct img = power(exp(img), .45) vmin = power(exp(vmin), .45) vmax = power(exp(vmax), .45) imwrite( args.output, imformat(img[args.crop:, args.crop:-args.crop], vmin=vmin, vmax=vmax, symmetric=False)) savez('sample.npz', sample=img) return 0
def main(argv): experiment = Experiment() parser = ArgumentParser(argv[0], description=__doc__) parser.add_argument('--model', '-m', type=str, required=True) parser.add_argument('--data', '-d', type=str, default='data/deadleaves_train.mat') parser.add_argument('--width', '-W', type=int, default=512) parser.add_argument('--height', '-H', type=int, default=512) parser.add_argument('--crop', '-C', type=int, default=16) parser.add_argument('--log', '-L', type=int, default=0) parser.add_argument('--output', '-o', type=str, default='results/sample.png') args = parser.parse_args(argv[1:]) images = loadmat(args.data)['data'] vmin = percentile(images, 0.02) vmax = percentile(images, 98.) experiment = Experiment(args.model) img = empty([args.height + args.crop, args.width + 2 * args.crop]) img.ravel()[:] = images.ravel()[random_select(img.size, images.size)] img = sample_image( img, experiment['model'], experiment['input_mask'], experiment['output_mask'], experiment['preconditioner'], min_value=vmin, max_value=vmax) if args.log: # linearize and gamma-correct img = power(exp(img), .45) vmin = power(exp(vmin), .45) vmax = power(exp(vmax), .45) imwrite(args.output, imformat(img[args.crop:, args.crop:-args.crop], vmin=vmin, vmax=vmax, symmetric=False)) savez('sample.npz', sample=img) return 0
def main(argv): # load image and turn into grayscale img = rgb2gray(imread('media/newyork.png')) # generate data inputs, outputs = generate_data_from_image(img, input_mask, output_mask, 220000) # split data into training, test, and validation sets inputs = split(inputs, [100000, 200000], 1) outputs = split(outputs, [100000, 200000], 1) data_train = inputs[0], outputs[0] data_test = inputs[1], outputs[1] data_valid = inputs[2], outputs[2] # compute normalizing transformation pre = WhiteningPreconditioner(*data_train) # intialize model model = MCGSM(dim_in=data_train[0].shape[0], dim_out=data_train[1].shape[0], num_components=8, num_scales=4, num_features=32) # fit parameters model.initialize(*pre(*data_train)) model.train(*chain(pre(*data_train), pre(*data_valid)), parameters={ 'verbosity': 1, 'max_iter': 1000, 'threshold': 1e-7, 'val_iter': 5, 'val_look_ahead': 10, 'num_grad': 20, }) # evaluate model print 'Average log-likelihood: {0:.4f} [bit/px]'.format( -model.evaluate(data_test[0], data_test[1], pre)) # synthesize a new image img_sample = sample_image(img, model, input_mask, output_mask, pre) imwrite('newyork_sample.png', img_sample, cmap='gray', vmin=min(img), vmax=max(img)) # save model with open('image_model.pck', 'wb') as handle: dump( { 'model': model, 'input_mask': input_mask, 'output_mask': output_mask }, handle, 1) return 0