def test_depth(pool): "using greater depth should trace out the mask edge more closely" image = np.zeros((1000, 1000)) rr, cc = draw.circle(300, 500, 150) image[rr, cc] = 1 image = pm.rescale_commensurate(image, (5, 5), depth=2) mask = image.astype(bool) tiles0 = pm.partition(image, (5, 5), mask=mask, depth=0) tiles1 = pm.partition(image, (5, 5), mask=mask, depth=1) tiles2 = pm.partition(image, (5, 5), mask=mask, depth=2) assert len(tiles0) < len(tiles1) < len(tiles2)
def detail_mosaic(filename, dirname): from skimage.io import imread image = imread(filename) # Size the image to be evenly divisible by the tiles. from skimage import img_as_float image = img_as_float(image) # Use perceptually uniform colorspace for all analysis. import photomosaic as pm converted_img = pm.perceptual(image) pool = pm.make_pool(dirname + '/*.jpg') # Adapt the color palette of the image to resemble the palette of the pool. adapted_img = pm.adapt_to_pool(converted_img, pool) scaled_img = pm.rescale_commensurate(adapted_img, grid_dims=(30, 30), depth=1) tiles = pm.partition(scaled_img, grid_dims=(30, 30), depth=1) annotated_img = pm.draw_tile_layout(pm.rgb(scaled_img), tiles) from skimage.io import imsave imsave(filename[:-4] + '_detail_mosaic' + filename[-4:], annotated_img)
def create_mosaic(input_img, images_dataset_path, grid_dims): # Load a sample image image = cv2.imread(img_path) image = img_as_float(image) #ensure image is float ranging from 0 to 1 # Analyze the collection (the "pool") of images. pool = pm.make_pool(images_dataset_path) # Use perceptually uniform colorspace for all analysis. converted_img = pm.perceptual(image) # Adapt the color palette of the image to resemble the palette of the pool. #adapted_img = pm.adapt_to_pool(converted_img, pool) adapted_img = converted_img #scale = 1 #scaled_img = Image.new('RGB', (adapted_img.shape[0] * scale, adapted_img.shape[1] * scale)) #scaled_img = Image.new('RGB', (5040, 5040)) scaled_img = pm.rescale_commensurate(adapted_img, grid_dims=grid_dims, depth=0) tiles = pm.partition(scaled_img, grid_dims=grid_dims, depth=0) # Reshape the 3D array (height, width, color_channels) into # a 2D array (num_pixels, color_channels) and average over the pixels. tile_colors = [ np.mean(scaled_img[tile].reshape(-1, 3), 0) for tile in tiles ] # Match a pool image to each tile. match = pm.simple_matcher(pool) matches = [match(tc) for tc in tile_colors] matches_list = [x[0] for x in matches] # Perform neural network object detection to see what classes are on which images detect_images(matches_list) # Concatenate list of matches images to a single mosaic image concatenateImages(matches_list, grid_dims)
import os import numpy as np import photomosaic as pm from skimage.io import imsave from skimage.data import chelsea from skimage import img_as_float here = os.path.dirname(__file__) POOL_PATH = '/tmp/photomosaic-docs-pool/pool.json' pool = pm.import_pool(os.path.join(POOL_PATH)) image = img_as_float(chelsea()) converted_img = pm.perceptual(image) scaled_img = pm.rescale_commensurate(converted_img, grid_dims=(30, 30), depth=0) tiles = pm.partition(scaled_img, grid_dims=(30, 30), depth=0) tile_colors = [np.mean(scaled_img[tile].reshape(-1, 3), 0) for tile in tiles] match = pm.simple_matcher(pool) matches = [match(tc) for tc in tile_colors] canvas = np.ones_like(scaled_img) # white canvas mos = pm.draw_mosaic(canvas, tiles, matches) imsave(os.path.join(here, '..', '_static', 'generated_images', 'no-palette-adjustment.png'), mos) adapted_img = pm.adapt_to_pool(converted_img, pool) imsave(os.path.join(here, '..', '_static', 'generated_images', 'adapted-chelsea.png'), pm.rgb(adapted_img))
orig_img.load() W, H = orig_img.size Hx = 0 Wx = W screen = pygame.display.set_mode((W + Wx, H + Hx)) draw(orig_img, (0, 0)) pygame.display.flip() if tune: img = pm.tune( orig_img, database) # Adjust colors levels to what's availabe in the pool. else: img = orig_img tiles = pm.partition(img, (10, 10), depth=DEPTH) for tile in sorted(tiles, key=analyze_sort): pm.analyze_one(tile) tx, ty = pm.tile_position(tile) w, h = tile.size for (x, y), color in zip(locs, tile.rgb): rect = (Wx + tx + w * x / 2, Hx + ty + h * y / 2, w / 2, h / 2) pygame.draw.rect(screen, color, rect) pygame.draw.rect(screen, (0, 0, 0), rect, 1) pygame.display.flip() db = pm.connect(database) try: pm.reset_usage(db) for tile in sorted(tiles, key=match_sort):
orig_img = pm.open( infile ) orig_img.load() W,H = orig_img.size Hx = 0 Wx = W screen = pygame.display.set_mode((W+Wx,H+Hx)) draw(orig_img, (0,0)) pygame.display.flip() if tune: img = pm.tune(orig_img, database) # Adjust colors levels to what's availabe in the pool. else: img = orig_img tiles = pm.partition(img, (10, 10), depth=DEPTH) for tile in sorted(tiles, key=analyze_sort): pm.analyze_one(tile) tx,ty = pm.tile_position(tile) w,h = tile.size for (x,y), color in zip(locs, tile.rgb): rect = (Wx+tx + w*x/2,Hx+ty+h*y/2,w/2,h/2) pygame.draw.rect(screen, color, rect) pygame.draw.rect(screen, (0,0,0), rect, 1) pygame.display.flip() db = pm.connect(database) try: pm.reset_usage(db) for tile in sorted(tiles, key=match_sort):