def render_example(model_id): # select the car as object obj = bpy.data.objects[model_id] obj.select = True # scale the DimsPlane to illustrate dimensions plane = bpy.data.objects['DimsPlane'] plane.location = [0, 0, 0] roi = bounds(obj) dims = [ roi.x.max - roi.x.min, roi.y.max - roi.y.min, roi.z.max - roi.z.min ] dims = dict(zip(['x', 'y', 'z'], dims)) plane.scale.x = dims['x'] * 0.5 plane.scale.y = dims['y'] * 0.5 # save a rendered example example_file = model['example_file'] if not op.exists(atcadillac(op.dirname(example_file))): os.makedirs(atcadillac(op.dirname(example_file))) logging.info('writing example to %s' % example_file) render_scene(atcadillac(example_file))
def alias(img,amount): #cheating with a mask length, width = bounds(img) channel = channels(img) post = avail(img) mask = [[0,0,0],[255,255,255],[0,0,0], [255,255,255],[255,255,255],[255,255,255], [0,0,0],[255,255,255],[0,0,0]] result = find(img,mask,(3,3)) return result
def find(img, mask, dims): # this isn't actually blobbing anything length, width = bounds(img) height, breadth = dims result = like(img) img = img.load() height = int(height / 2) breadth = int(breadth / 2) for l in range(height, length - height): for w in range(breadth, width - breadth): pixel = img[l, w] return result
def find(img,mask,dims): # this isn't actually blobbing anything length, width = bounds(img) height, breadth = dims result = like(img) img = img.load() height = int(height / 2) breadth = int(breadth / 2) for l in range(height,length-height): for w in range(breadth,width-breadth): pixel = img[l,w] return result
def edge(img): avails = avail(img) result = like(img).load() length, width = bounds(img) img = img.load() for a in range(0, len(avails)): av = avails[a] for x in range(0, length): for y in range(0, width): if av == img[x, y]: result[x, y] == av return result
def edge(img): avails = avail(img) result = like(img).load() length,width = bounds(img) img = img.load() for a in range(0,len(avails)): av = avails[a] for x in range(0,length): for y in range(0,width): if av == img[x,y]: result[x,y] == av return result
def sharpen(img, amount): # not actually sharpening result = like(img) length, width = bounds(img) img = img.load() result = result.load() for left in range(1, length - 1): for top in range(1, width - 1): pixel = [0, 0, 0, 0] for l in range(-1, 1): for w in range(-1, 1): pixel[0] += img[left + l, top + w][0] pixel[1] += img[left + l, top + w][1] pixel[2] += img[left + l, top + w][2] result[left, top] = tuple(pixel) return result
def sharpen(img,amount): #not actually sharpening result = like(img) length, width = bounds(img) img = img.load() result = result.load() for left in range(1,length-1): for top in range(1,width-1): pixel = [0,0,0,0] for l in range(-1,1): for w in range(-1, 1): pixel[0] += img[left+l,top+w][0] pixel[1] += img[left+l,top+w][1] pixel[2] += img[left+l,top+w][2] result[left,top] = tuple(pixel) return result
def alias(img, amount): # cheating with a mask length, width = bounds(img) channel = channels(img) post = avail(img) mask = [ [0, 0, 0], [255, 255, 255], [0, 0, 0], [255, 255, 255], [255, 255, 255], [255, 255, 255], [0, 0, 0], [255, 255, 255], [0, 0, 0], ] result = find(img, mask, (3, 3)) return result
def get_dims(model_id): # select the car as object obj = bpy.data.objects[model_id] obj.select = True # scale the DimsPlane to illustrate dimensions #plane = bpy.data.objects['DimsPlane'] #plane.location = [0, 0, 0] roi = bounds(obj) dims = [ roi.x.max - roi.x.min, roi.y.max - roi.y.min, roi.z.max - roi.z.min ] dims = dict(zip(['x', 'y', 'z'], dims)) x_wheels = get_x_wheels(obj) return {'dims': dims, 'x_wheels': x_wheels}
def group(img): # automatically length, width = bounds(img) result = like(img).load() img = img.load() color = [255, 255, 255, 255] tolerance = [2, 2, 2, 2] for inc in range(1, 127): # this is really really slow / iterative for x in range(length): for y in range(width): matches = 0 if abs(img[x, y][0] - color[0]) < tolerance: matches += 1 if abs(img[x, y][1] - color[1]) < tolerance: matches += 1 if abs(img[x, y][2] - color[2]) < tolerance: matches += 1 if matches > 3: result[x, y] = tuple(color) return result
def group(img):#automatically length, width = bounds(img) result = like(img).load() img = img.load() color = [255,255,255,255] tolerance = [2,2,2,2] for inc in range(1,127): # this is really really slow / iterative for x in range(length): for y in range(width): matches = 0 if (abs(img[x,y][0] - color[0]) < tolerance): matches += 1 if (abs(img[x,y][1] - color[1]) < tolerance): matches += 1 if (abs(img[x,y][2] - color[2]) < tolerance): matches += 1 if matches > 3: result[x,y] = tuple(color) return result
#!/usr/bin/env python from utils import (load, take, show, bgr, image, like, bounds, channels, crop, scale, color, avail, colorPicker) from proto import alias, sharpen, group, find, edge, center, distance from PIL import Image print "# fast stuff" img = load('samples/abstract/colors.png') #b = take() show(img) b, g, r = bgr(img) img = image(b,b,b) test = like(img) bound = bounds(b) channel = channels(b) coord = (0,0,50,50) closer = crop(img, coord) bigger = scale(closer, 2.0) eyedrop = color(img, 0, 30) pallet = avail(img) colorPicker(img,0,30) print "# slow stuff" res1 = alias(img, .3) res2 = sharpen(img, .3) blob1 = group(img) mask = Image.new("RGB", (50, 10), "white") blob3 = find(img,mask,(3,3)) coords1 = edge(img) coords2 = center(blob1) dist = distance(0,3)