def binpack_2d(self, t, *args, **kwargs): if isinstance(t, type): t = t.__name__ rects = [] a = list(kwargs.keys())[0] b = list(kwargs.keys())[1] parts = self.get_bom_parts() for v in parts: if v["type"] == t: rects += [(rectpack.float2dec(v[a], 3), rectpack.float2dec(v[b], 3))] packer = rectpack.newPacker() for r in rects: packer.add_rect(*r) packer.add_bin(kwargs[a], kwargs[b], count=float("inf")) packer.pack() return packer
def wood_pack_2d(type, *args, **kwargs): rects = [] a = list(kwargs.keys())[0] b = list(kwargs.keys())[1] for k, v in wood_bom_table.items(): if v["type"] == type: rects += [(rectpack.float2dec(v[a], 3), rectpack.float2dec(v[b], 3))] * v[ "count" ] packer = rectpack.newPacker() for r in rects: packer.add_rect(*r) packer.add_bin(kwargs[a], kwargs[b], count=float("inf")) packer.pack() return packer
def pack(allRect, canvasSize): rectangles = [] # Create a list of tuples containing the height and width of the rectangles # received at input for rect in allRect: rectangles.append((rect.height, rect.width)) packer = newPacker() # Add rectangles to rectangle packer import for rectangle in rectangles: packer.add_rect(*rectangle) # Set bin size which rectangles must be arranged in packer.add_bin(*canvasSize) # Arrange rectangles within the specified bin size packer.pack() packedRectangles = [] # Build new list containing rectangle objects of type Rectangle for rectangle in packer.rect_list(): rect = Rectangle(rectangle[4], rectangle[3], rectangle[1], rectangle[2]) packedRectangles.append(rect) # Return list of rectangles return packedRectangles
def test_offline_modes(self): for bin_algo, algo_name in self.bin_algos: for algo in self.algos: packer = rectpack.newPacker(pack_algo=algo, mode=PackingMode.Offline, bin_algo=bin_algo, sort_algo=self.sort_algo) self.setup_packer(packer, self.bins, self.rectangles) time = self.packing_time(packer) self.first_bin_wasted_space(packer) wasted = self.first_bin_wasted_space(packer) # Test wasted spaced threshold if self.log: print( "Offline {0} {1:<20s} {2:>10.3f}s {3:>10.3f}% {4:>10} bins" .format(algo_name, algo.__name__, time, wasted, len(packer))) self.assertTrue(wasted < 50) # Validate rectangle packing for b in packer: b.validate_packing()
def arrange(self): packer = newPacker(rotation=False) windows = self.root.query_tree().children self.window_rectangles = {} self.window_objects = {} mapped_windows = [] if len(windows) > 0: for w in windows: try: self.log(f'window id={w.id:x} map_state = {w.get_attributes().map_state}') if w.get_attributes().map_state == 2: self.window_objects[w.id] = w mapped_windows.append(w) g = w.get_geometry() width = g.width height = g.height self.log(f"Adding window size: {width}x{height}") packer.add_rect(g.width, g.height, w.id) # else: # raise Exception() except BadWindow: pass except Exception as e: print(e) g = self.root.get_geometry() self.log(f'Packing into root size {g.width}x{g.height}') packer.add_bin(g.width, g.height) packer.pack() rects = packer.rect_list() for _, x, y, w, h, wid in rects: if wid in self.window_objects: win = self.window_objects.get(wid) win.configure(x=x, y=y, width=w, height=h) self.window_rectangles[wid] = (x, y, w, h) else: self.log('Packer has no bins')
def layoutParts(parts, viewport_size, margin=0, outermargin=0): #packer = rectpack.newPacker(mode=rectpack.PackingMode.Offline, bin_algo=rectpack.PackingBin.Global, rotation=True) packer = rectpack.newPacker(mode=rectpack.PackingMode.Offline, rotation=True) #packer = rectpack.newPacker(mode=rectpack.PackingMode.Offline, bin_algo=rectpack.PackingBin.Global, rotation=False) #packer = rectpack.newPacker(mode=rectpack.PackingMode.Offline, rotation=False) for n,part in enumerate(parts): part.bbox = (part.bbox[0]+margin, part.bbox[1]+margin) #part.bbox = (part.bsize[0]+margin, part.bsize[1]+margin) rid = packer.add_rect(part.bbox[0], part.bbox[1], n) packer.add_bin(viewport_size[0]-outermargin*2, viewport_size[1]-outermargin*2, count=float("inf")) packer.pack() pages = [] for abin in packer: logging.debug("packed %d of %d parts" % (len(abin), len(parts))) page = [] pages.append(page) for rect in abin: part = parts[rect.rid] page.append(part) part.x = rect.x+margin//2 part.y = rect.y+margin//2 rratio = rect.width / rect.height pratio = part.bbox[0] / part.bbox[1] dr = abs(rratio - pratio) if dr > 0.01: part.rotated = True #print ("%s %d %.2f %.2f %.2f %s %s" % (part.rotated, rect.rid, dr, rratio, pratio, rect, part.bbox)) return pages
def _find_100(self, rects, min_rects=1): r = self.ratio lenrec = len(rects) minarea = min([x * y for x, y in rects]) minh = min([n[1] for n in rects]) minw = min([n[0] for n in rects]) totarea = sum([a * b for a, b in rects]) y = sqrt(totarea * (1 / r)) x = y * r wstart = int(x) hstart = int(y) sub = 0 best_tot = 0 while True: packer = newPacker(rotation=self.rotation) cw = wstart - sub ch = hstart - sub for r in rects: packer.add_rect(*r) packer.add_bin(cw, ch) packer.pack() total_area_used = sum([box.used_area() for box in packer]) if (total_area_used / (cw * ch)) > best_tot: best_tot = total_area_used / (cw * ch) champ = (cw, ch) if (cw * ch < minarea or cw < minw or ch < minh or len(packer[0]) < min_rects): print("no solution possible for current min_pics") print(f"returning best possible solution {100*best_tot:.2f}") return champ if abs((total_area_used / (cw * ch)) - 1) > 1e-2: sub = sub + 1 else: break return wstart - sub, hstart - sub
def maxrects_multiple_layers(superitems_pool, pallet_dims, add_single=True): """ Given a superitems pool and the maximum dimensions to pack them into, return a layer pool with warm start placements """ logger.debug("MR-ML-Offline starting") logger.debug( f"MR-ML-Offline {'used' if add_single else 'not_used'} as warm_start") logger.debug(f"MR-ML-Offline {len(superitems_pool)} superitems to place") # Return a layer with a single item if only one is present in the superitems pool if len(superitems_pool) == 1: layer_pool = layers.LayerPool(superitems_pool, pallet_dims, add_single=True) uncovered = 0 else: generated_pools = [] for strategy in MAXRECTS_PACKING_STRATEGIES: # Build initial layer pool layer_pool = layers.LayerPool(superitems_pool, pallet_dims, add_single=add_single) # Create the maxrects packing algorithm packer = newPacker( mode=PackingMode.Offline, bin_algo=PackingBin.Global, pack_algo=strategy, sort_algo=SORT_AREA, rotation=False, ) # Add an infinite number of layers (no upper bound) packer.add_bin(pallet_dims.width, pallet_dims.depth, count=float("inf")) # Add superitems to be packed ws, ds, _ = superitems_pool.get_superitems_dims() for i, (w, d) in enumerate(zip(ws, ds)): packer.add_rect(w, d, rid=i) # Start the packing procedure packer.pack() # Build a layer pool for layer in packer: spool, scoords = [], [] for superitem in layer: spool += [superitems_pool[superitem.rid]] scoords += [utils.Coordinate(superitem.x, superitem.y)] spool = superitems.SuperitemPool(superitems=spool) layer_pool.add(layers.Layer(spool, scoords, pallet_dims)) layer_pool.sort_by_densities(two_dims=False) # Add the layer pool to the list of generated pools generated_pools += [layer_pool] # Find the best layer pool by considering the number of placed superitems, # the number of generated layers and the density of each layer dense uncovered = [ len(pool.not_covered_superitems()) for pool in generated_pools ] n_layers = [len(pool) for pool in generated_pools] densities = [ pool[0].get_density(two_dims=False) for pool in generated_pools ] pool_indexes = utils.argsort(list(zip(uncovered, n_layers, densities)), reverse=True) layer_pool = generated_pools[pool_indexes[0]] uncovered = uncovered[pool_indexes[0]] logger.debug( f"MR-ML-Offline generated {len(layer_pool)} layers with 3D densities {layer_pool.get_densities(two_dims=False)}" ) logger.debug( f"MR-ML-Offline placed {len(superitems_pool) - uncovered}/{len(superitems_pool)} superitems" ) return layer_pool
def pack( pictures: List[Picture], page_width: float, page_height: float ) -> List[List[PictureLocation]]: """ Pack a series of pictures into pages of the specified size. Returns a series of lists, one per page, containing :py:class:`PictureLocation` objects for each picture placed on that page. """ packer = rectpack.newPacker( # Offline packing mode mode=rectpack.PackingMode.Offline, # Try each bin in turn, most promising first bin_algo=rectpack.PackingBin.Global, # Use the guillotine algorithm with Best-Area-First ('Baf'; meaning picking # the smallest rectangle which can fit the picture) and Minimum-Area-Split # ('Minas'; attempt to split free space to make the largest rectangle # possible). pack_algo=rectpack.GuillotineBafMinas, # Pack starting with the largest pictures first sort_algo=rectpack.SORT_AREA, # Allow pictures to be rotated. rotation=True, ) packer.add_bin( width=to_decimal(page_width), height=to_decimal(page_height), # Use as many pages as necessary count=float("inf"), ) for picture in pictures: packer.add_rect( to_decimal(picture.width), to_decimal(picture.height), picture, ) packer.pack() out = [ [ PictureLocation( picture, float(x), float(y), float(width), float(height), width != to_decimal(picture.width), ) for x, y, width, height, picture in page.rect_list() ] for page in packer ] # Check for missing pictures missing_pictures = list(pictures) for page in out: for picture_location in page: missing_pictures.remove(picture_location.picture) if missing_pictures: raise PicturesDoNotFitError(missing_pictures) return out
def __init__(self): with (config.COMMON / 'sprite-data.json').open('r') as fp: self.data = json.load(fp) OUTPUT_FOLDER.mkdir(parents=True, exist_ok=True) for prop_set in self.data: self.soup = BeautifulSoup('<svg></svg>', 'xml') self.root = self.soup.svg self.root['xmlns'] = 'http://www.w3.org/2000/svg' self.root['xmlns:xlink'] = 'http://www.w3.org/1999/xlink' prop_elements = [] packer = newPacker(rotation=False) packer.add_bin(BIN_SIZE, BIN_SIZE, count=float('inf')) count = 0 prop_set_data = self.data[prop_set] # set_element = self.soup.new_tag('g', id='prop_set_' + prop_set) # self.root.append(set_element) for prop_group in prop_set_data: prop_group_data = prop_set_data[prop_group] prop_group_name = prop_group_data['name'] # group_element = self.soup.new_tag('g', id='prop_group_' + prop_group) # set_element.append(group_element) for prop_index in prop_group_data['sprites']: prop_data = prop_group_data['sprites'][prop_index] prop_element = self.soup.new_tag('g', id=f'prop_{prop_set}_{prop_group}_{prop_index}') rect = prop_data['palettes'][0][0]['rect'] prop_element.append(self.soup.new_tag('rect', stroke='none', fill='none', style='display:none', x=rect[0], y=rect[1], width=rect[2], height=rect[3])) title_element = self.soup.new_tag('title', style='display:none') title_element.string = prop_data['name_nice'] prop_element.append(title_element) prop_filename = f"{prop_group_name}_{prop_set}_{prop_group}_{prop_index}_{prop_data['name']}" image_filename = str(os.path.relpath(config.PROP_SPRITES / f'{prop_filename}.png', OUTPUT_FOLDER)).replace('\\', '/') image_element = self.soup.new_tag('image', width=rect[2], height=rect[3]) image_element['xlink:href'] = image_filename image_element['xmlns:xlink'] = 'http://www.w3.org/1999/xlink' prop_element.append(image_element) with (OUTLINES_INPUT / f'{prop_filename}.d').open('r') as file: path = self.soup.new_tag('path', d=file.read(), stroke='#ff0000', fill='none') path['stroke-width'] = 0.5 prop_element.append(path) # if count < 20: packer.add_rect(rect[2] + PADDING_2, rect[3] + PADDING_2, count) self.root.append(prop_element) prop_elements.append(prop_element) count += 1 packer.pack() all_rects = packer.rect_list() num_bins = len(packer) num_columns = int(math.ceil(math.sqrt(num_bins))) num_rows = int(math.ceil(num_bins / num_columns)) self.root['viewBox'] = f'0 0 {num_columns * BIN_SIZE} {num_rows * BIN_SIZE}' for rect in all_rects: b, x, y, w, h, rid = rect bin_row = int(b / num_columns) bin_column = b - bin_row * num_columns bin_x = bin_column * BIN_SIZE bin_y = bin_row * BIN_SIZE prop_element = prop_elements[rid] prop_element['transform'] = f'translate({bin_x + x + PADDING}, {bin_y + y + PADDING})' template_file = OUTPUT_FOLDER / f'{prop_set}.svg' with template_file.open('w') as fp: fp.write(self.soup.prettify()) pass
def pack(allRect, canvasSize): allRectangles = allRect listOfHeightAndWidth = [] * len(allRectangles) for aR in allRectangles: print(aR) heightOfRectObj = aR.getHeight() #print(heightOfRectObj) widthOfRectObj = aR.getWidth() #print(widthOfRectObj) heightandWidthInAList = [heightOfRectObj, widthOfRectObj] listOfHeightAndWidth.append(heightandWidthInAList) #print(listOfHeightAndWidth) packer = newPacker() for r in listOfHeightAndWidth: packer.add_rect(*r) canvasSizeForBin = canvasSize bins = canvasSizeForBin height = canvasSizeForBin[0] width = canvasSizeForBin[1] #print(bins) prints out a tuple of the height of canvas and width packer.add_bin(height, width) packer.pack() nbins = len(packer) abin = packer[0] height = abin.height width = abin.width rect = packer[0][0] x = rect.x y = rect.y w = rect.width h = rect.height #print("below will show us where to put things in") CorrectVertex = [] * len(allRectangles) all_rects = packer.rect_list() for rect in all_rects: #print(rect) b, x, y, w, h, rid = rect addAllTheVertex = [h, w, x, y] CorrectVertex.append(addAllTheVertex) #print(h) #print(w) #print(x) #print(y) #print(rid) #print(CorrectVertex) #rectangleObjects = [] * len(rectangleCoordinates) #for y in rectangleCoordinates: #heightOfRectangle = y[0] #print(heightOfRectangle) #widthOfRectangle = y[1] #print(widthOfRectangle) #z = Rectangle(heightOfRectangle, widthOfRectangle, 0, 0) #rectangleObjects.append(z) #print(z.getHeight) returnRectObjects = [] * len(allRectangles) for changeObjects in CorrectVertex: finalHeigh = changeObjects[0] finalWidth = changeObjects[1] finalX = changeObjects[2] finalY = changeObjects[3] addRectObj = Rectangle(finalHeigh, finalWidth, finalX, finalY) addRectObj.setX(finalX) addRectObj.setY(finalY) print("Here is the real high and width x and y") print(finalHeigh) print(finalWidth) print(finalX) print(finalY) returnRectObjects.append(addRectObj) print() print() print() print() print("France France France France France ") for returedObj in returnRectObjects: h = returedObj.getHeight() w = returedObj.getWidth() x = returedObj.getX() y = returedObj.getY() print(h) print(w) print(x) print(y) print() print() print() print() return returnRectObjects
def generateOne(iterationId, imageArrayAllClasses, baseImgName, baseImgObj): imageId = 0 deltaW = 0 deltaH = 0 writeObj = io.StringIO() objectBoundary = [5, 5] doRandomScale = True doRandomAlpha = True packer = newPacker(rotation=False) format = 'RGBA' #create a list of PIL Image objects scaledImageArray = [] scales = [1.0] if enableScaleDown is True: for zm in [0.5, 0.6, 0.8, 0.9]: scales.append(zm) if enableScaleUp is True: for zm in [1.1, 1.3, 1.5, 1.7, 1.8]: scales.append(zm) # imageArrayAllClasses[numClasses][imagesPerClass] - for all classes, Choose a random image in each class for classId in range(0, numClasses): perClassCount = len(imageArrayAllClasses[classId]) selectedInClass = random.randrange(0, perClassCount) img = imageArrayAllClasses[classId][selectedInClass] deltaW = random.randrange(10, 20) deltaH = random.randrange(10, 20) scaleW = 1 scaleH = 1 if (True == doRandomScale): scaleW = scaleH = scales[random.randrange(0, len(scales) - 1)] img = img.resize( (int(img.size[0] * scaleW), int(img.size[1] * scaleH)), Image.BICUBIC) scaledImageArray.append(img) packer.add_rect(img.size[0] + deltaW, img.size[1] + deltaH, imageId) imageId = imageId + 1 # Add the bins where the rectangles will be placed for b in [(cfgWidth, cfgHeight)]: packer.add_bin(*b) # Start packing packer.pack() # Open the target background image as copy finalImage = baseImgObj.copy() all_rects = packer.rect_list() bad = False for rect in all_rects: # rectpack coordinate is 0,0 at bot left # b - Bin index # x - Rectangle bottom-left corner x coordinate # y - Rectangle bottom-left corner y coordinate # w - Rectangle width # h - Rectangle height # rid - User asigned rectangle id or None b, x, y, w, h, rid = rect # leftx, lefty, rightx, righty area1 = [ x + objectBoundary[0], y + objectBoundary[1], x + objectBoundary[0] + scaledImageArray[rid].size[0], y + objectBoundary[1] + scaledImageArray[rid].size[1] ] # Dont write image if exceeding base image - TODO - rectpack debug if area1[2] > cfgWidth or area1[3] > cfgHeight: bad = True break area2 = (area1[0], area1[1], area1[2], area1[3]) # crop original for blend # PIL crop requires {topleft.x,topleft.y, botright.x,botright.y} - 0,0 is in top-left corner cropped = finalImage.crop(area2) alphas = [0.7, 0.73, 0.75, 0.78, 0.8] alpha = 0.8 if (True == doRandomAlpha): alpha = alphas[random.randrange(0, 5)] if (alpha < minAlpha): alpha = minAlpha blended = Image.blend(cropped, scaledImageArray[rid], alpha) finalImage.paste(blended, area2) if writeOutFormat == "yolo": write2Yolo([cfgWidth, cfgHeight], area1, writeObj, rid) elif writeOutFormat == "kitti": write2Kitti([cfgWidth, cfgHeight], area1, writeObj, rid) elif writeOutFormat == "pascalvoc": writeObject2VOC(str(rid), area1[0], area1[1], area1[2], area1[3], writeObj) return finalImage, writeObj, bad
# check for duplicate if frame["name"] in loadedFrameTags: print("duplicated: {}".format(frame["name"])) sys.exit(0) loadedFrameTags[frame["name"]] = { "conf": conf, "img": v["img"], "frame": frame } # HACK: need to change once we need to pack into multiple images size = (1024, 1024) packed_image = Image.new('RGBA', size, 0x00000000) # prepare the packer packer = rectpack.newPacker(rotation=False) packer.add_bin(size[0], size[1]) # for each of the loadedFrameTags, add each of the frame into the packer for name, frameTag in loadedFrameTags.items(): conf = frameTag["conf"] for frame in range(frameTag["frame"]["from"], frameTag["frame"]["to"] + 1): f = conf["frames"][frame] packer.add_rect(f["spriteSourceSize"]["w"], f["spriteSourceSize"]["h"], (name, frame)) # pack packer.pack() # unpack all the rect and store them in the original conf so we can pack them properly
def gen_atlas(overrides, src, dst, binsize, atlasname, border=1, force_single=False, crop=True, leanify=True): overrides = Path(overrides).resolve() src = Path(src).resolve() dst = Path(dst).resolve() try: texture_local_overrides = (src / 'atlas.tex').read_text() except FileNotFoundError: texture_local_overrides = None try: texture_global_overrides = (overrides / 'atlas.tex').read_text() except FileNotFoundError: texture_global_overrides = None total_images = 0 packed_images = 0 rects = [] for path in src.glob('**/*.png'): img = Image.open(path) sprite_name = path.relative_to(src).with_suffix('').as_posix() rects.append((img.size[0] + border * 2, img.size[1] + border * 2, (img, sprite_name))) total_images = len(rects) make_packer = lambda: rectpack.newPacker( # No rotation support in Taisei yet rotation=False, # Fine-tuned for least area used after crop sort_algo=rectpack.SORT_SSIDE, bin_algo=rectpack.PackingBin.BFF, pack_algo=rectpack.MaxRectsBl, ) binsize = list(binsize) if force_single: while True: packer = make_packer() packer.add_bin(*binsize) for rect in rects: packer.add_rect(*rect) packer.pack() if sum(len(bin) for bin in packer) == total_images: break if binsize[1] < binsize[0]: binsize[1] *= 2 else: binsize[0] *= 2 else: packer = make_packer() for rect in rects: packer.add_rect(*rect) packer.add_bin(*binsize) packer.pack() packed_images = sum(len(bin) for bin in packer) if total_images != packed_images: missing = total_images - packed_images raise TaiseiError( "{} sprite{} not packed (bin size is too small?)".format( missing, "s were" if missing > 1 else " was")) with ExitStack() as stack: # Do everything in a temporary directory first temp_dst = Path( stack.enter_context( TemporaryDirectory( prefix='taisei-atlas-{}'.format(atlasname)))) # Run multiple leanify processes in parallel, in case we end up with multiple pages # Yeah I'm too lazy to use Popen properly executor = stack.enter_context(ThreadPoolExecutor()) for i, bin in enumerate(packer): textureid = 'atlas_{}_{}'.format(atlasname, i) dstfile = temp_dst / '{}.png'.format(textureid) print(dstfile) dstfile_meta = temp_dst / '{}.tex'.format(textureid) write_texture_def(dstfile_meta, textureid, texture_global_overrides, texture_local_overrides) actual_size = [0, 0] if crop: for rect in bin: if rect.x + rect.width > actual_size[0]: actual_size[0] = rect.x + rect.width if rect.y + rect.height > actual_size[1]: actual_size[1] = rect.y + rect.height else: actual_size = (bin.width, bin.height) rootimg = Image.new('RGBA', tuple(actual_size), (0, 0, 0, 0)) for rect in bin: rotated = False img, name = rect.rid if tuple(img.size) != (rect.width - border*2, rect.height - border*2) and \ tuple(img.size) == (rect.height - border*2, rect.width - border*2): rotated = True region = (rect.x + border, rect.y + border, rect.x + rect.width - border, rect.y + rect.height - border) print(rect, region, name) if rotated: rimg = img.transpose(Image.ROTATE_90) rootimg.paste(rimg, region) rimg.close() else: rootimg.paste(img, region) img.close() # random_fill(rootimg, region) override_path = overrides / get_override_file_name(name) if override_path.exists(): override_contents = override_path.read_text() else: override_contents = None write_override_template(override_path, img.size) write_sprite_def(temp_dst / '{}.spr'.format(name), textureid, (region[0], region[1], region[2] - region[0], region[3] - region[1]), img.size, overrides=override_contents) print('Atlas texture area: ', rootimg.size[0] * rootimg.size[1]) rootimg.save(dstfile) if leanify: executor.submit(lambda: subprocess.check_call( ["leanify", '-v', str(dstfile)])) # Wait for leanify to complete executor.shutdown(wait=True) # Only now, if everything is ok so far, copy everything to the destination, possibly overwriting previous results pattern = re.compile('^atlas_{}_\d+.png$'.format(re.escape(atlasname))) for path in dst.iterdir(): if pattern.match(path.name): path.unlink() targets = list(temp_dst.glob('**/*')) for dir in (p.relative_to(temp_dst) for p in targets if p.is_dir()): (dst / dir).mkdir(parents=True, exist_ok=True) for file in (p.relative_to(temp_dst) for p in targets if not p.is_dir()): shutil.copyfile(str(temp_dst / file), str(dst / file))
def Submit(self, l, b, gsm, num, u_id, s_id, lam, job_card): t_val = self.Check(l, b, job_card, gsm, num) if t_val == 0: return 0 elif t_val == 1: try: quo = cost_cal.cost(l, b, num, job_card, gsm=gsm, lam=lam) connection = mysql.connector.connect(host="localhost", user="******", passwd='', database="loginsystem") cursor = connection.cursor() query = "INSERT INTO printing_request (user_id, store_id,lamination,length,width,jobcard_type,master_printer_job,gsm,num_of_copies,order_cost) VALUES (%s, %s, %s, %s, %s,%s,%s,%s,%s,%s)" # Insert Querry val = (u_id, s_id, lam, l, b, job_card, 1, gsm, num, quo ) # Setting insert value cursor.execute(query, val) # Executing the query connection.commit() # Committing the database to change recs_details = recs_list.Update_list(job_card, gsm) # Packing the rectangles recs = [] for idnt, l, b in recs_details: recs.append((l, b)) fit_packer = newPacker() # Add the rectangles to packing queue for r in recs: fit_packer.add_rect(*r) # initializing self.bin attribute new_bins = recs_list.Update_bin(job_card, gsm) # Add the bins where the rectangles will be placed for b in new_bins: fit_packer.add_bin(*b) # Start packing fit_packer.pack() success = 1 for x in range(len(new_bins)): # Making the display image background = Image.open("Solid_White_Futon_Cover.jpg") background = background.resize( (int(new_bins[x][0]) * 10, int(new_bins[x][1]) * 10)) blue = Image.open("new_img.jpg") count = 0 # Adding the images of covered rectagles from the packer list try: for lp in range(len(fit_packer[x])): rect = fit_packer[x][count] wid = rect.width hei = rect.height idnt = -1 ind = -1 for ord in recs_details: if ord[1] == hei and ord[2] == wid or ord[ 2] == hei and ord[1] == wid: idnt = ord[0] ind = recs_details.index(ord) break temp = blue.resize( (int(rect.width) * 10, int(rect.height) * 10)) draw = ImageDraw.Draw(temp) msg = " " + str(idnt) + "\n" + str( hei) + 'x' + str(wid) w, h = draw.textsize(msg) draw.text((((int(rect.width) * 10 - w)) / 2, ((int(rect.height) * 10 - h)) / 2), msg, fill="black") background.paste(im=temp, box=(int(rect.x) * 10, int(rect.y) * 10)) recs_details.pop(ind) count += 1 except: break try: name = "Master/" + job_card + "_" + str( gsm) + "_(" + str(new_bins[x][0]) + "x" + str( new_bins[x][1]) + ")" + "/" + job_card + str( x + 1) + ".jpg" background.save(name, 'JPEG') except FileNotFoundError: success = 4 recs.pop() return success except: return 3
def _pack_single_bin( rect_dict: Dict[int, Tuple[int, int]], aspect_ratio: Tuple[int, int], max_size: ndarray, sort_by_area: bool, density: float, precision: float, verbose: bool, ) -> Tuple[Dict[int, Tuple[int, int, int, int]], Dict[Any, Any]]: """ Takes a `rect_dict` argument of the form {id:(w,h)} and tries to pack it into a bin as small as possible with aspect ratio `aspect_ratio` Will iteratively grow the bin size until everything fits or the bin size reaches `max_size`. Returns: a dictionary of of the packed rectangles in the form {id:(x,y,w,h)}, and a dictionary of remaining unpacked rects """ # Compute total area and use it for an initial estimate of the bin size total_area = 0 for r in rect_dict.values(): total_area += r[0] * r[1] aspect_ratio = np.asarray(aspect_ratio) / np.linalg.norm( aspect_ratio) # Normalize # Setup variables box_size = np.asarray(aspect_ratio * np.sqrt(total_area), dtype=np.float64) box_size = np.clip(box_size, None, max_size) if sort_by_area: rp_sort = rectpack.SORT_AREA else: rp_sort = rectpack.SORT_NONE # Repeatedly run the rectangle-packing algorithm with increasingly larger # areas until everything fits or we've reached the maximum size while True: # Create the pack object rect_packer = rectpack.newPacker( mode=rectpack.PackingMode.Offline, pack_algo=rectpack.MaxRectsBlsf, sort_algo=rp_sort, bin_algo=rectpack.PackingBin.BBF, rotation=False, ) # Add each rectangle to the pack, create a single bin, and pack for rid, r in rect_dict.items(): rect_packer.add_rect(width=r[0], height=r[1], rid=rid) rect_packer.add_bin(width=box_size[0], height=box_size[1]) rect_packer.pack() # Adjust the box size for next time box_size *= density # Increase area to try to fit box_size = np.clip(box_size, None, max_size) if verbose: print("Trying to pack in bin size (%0.2f, %0.2f)" % tuple(box_size * precision)) # Quit the loop if we've packed all the rectangles or reached the max size if len(rect_packer.rect_list()) == len(rect_dict): if verbose: print("Success!") break elif all(box_size >= max_size): if verbose: print("Reached max_size, creating an additional bin") break # Separate packed from unpacked rectangles, make dicts of form {id:(x,y,w,h)} packed_rect_dict = {r[-1]: r[:-1] for r in rect_packer[0].rect_list()} unpacked_rect_dict = {} for k, v in rect_dict.items(): if k not in packed_rect_dict: unpacked_rect_dict[k] = v return (packed_rect_dict, unpacked_rect_dict)
def _pack_single_bin( rect_dict: Dict[int, Tuple[Number, Number]], aspect_ratio: Tuple[Number, Number], max_size: Tuple[float, float], sort_by_area: bool, density: float, precision: float, ) -> Tuple[Dict[int, Tuple[Number, Number, Number, Number]], Dict[Any, Any]]: """Packs a dict of rectangles {id:(w,h)} and tries to pack it into a bin as small as possible with aspect ratio `aspect_ratio` Will iteratively grow the bin size until everything fits or the bin size reaches `max_size`. Args: rect_dict: dict of rectangles {id: (w, h)} to pack aspect_ratio: max_size: tuple of max X, Y size sort_by_area: sorts components by area density: of packing. Values closer to 1 require more computation to pack tighter precision: Desired precision for rounding vertex coordinates. Returns: packed rectangles dict {id:(x,y,w,h)} dict of remaining unpacked rectangles """ # Compute total area and use it for an initial estimate of the bin size total_area = 0 for r in rect_dict.values(): total_area += r[0] * r[1] aspect_ratio = np.asarray(aspect_ratio) / np.linalg.norm( aspect_ratio) # Normalize # Setup variables box_size = np.asarray(aspect_ratio * np.sqrt(total_area), dtype=np.float64) box_size = np.clip(box_size, None, max_size) if sort_by_area: rp_sort = rectpack.SORT_AREA else: rp_sort = rectpack.SORT_NONE # Repeatedly run the rectangle-packing algorithm with increasingly larger # areas until everything fits or we've reached the maximum size while True: # Create the pack object rect_packer = rectpack.newPacker( mode=rectpack.PackingMode.Offline, pack_algo=rectpack.MaxRectsBlsf, sort_algo=rp_sort, bin_algo=rectpack.PackingBin.BBF, rotation=False, ) # Add each rectangle to the pack, create a single bin, and pack for rid, r in rect_dict.items(): rect_packer.add_rect(width=r[0], height=r[1], rid=rid) rect_packer.add_bin(width=box_size[0], height=box_size[1]) rect_packer.pack() # Adjust the box size for next time box_size *= density # Increase area to try to fit box_size = np.clip(box_size, None, max_size) # Quit the loop if we've packed all the rectangles or reached the max size if len(rect_packer.rect_list()) == len(rect_dict): break if all(box_size >= max_size): break # Separate packed from unpacked rectangles, make dicts of form {id:(x,y,w,h)} packed_rect_dict = {r[-1]: r[:-1] for r in rect_packer[0].rect_list()} unpacked_rect_dict = {} for k, v in rect_dict.items(): if k not in packed_rect_dict: unpacked_rect_dict[k] = v return (packed_rect_dict, unpacked_rect_dict)
def main(): parser = argparse.ArgumentParser(description='Merge PDFs') parser.add_argument('--page', default="A3", help="A3 or A4") parser.add_argument('--maxpages', default=20, type=int) parser.add_argument('--landscape', default=True) parser.add_argument('file', nargs="+") parser.add_argument('--output', required=True) parser.add_argument('--allsame', action="store_true") parser.add_argument('--margin', default=20, help="mm", type=int) parser.add_argument('--spacing', default=10, help="mm", type=int) args = parser.parse_args() mm2pts = 2.83464567 if args.page == "A3": page = (297, 420) elif args.page == "A4": page = (210, 297) elif args.page == "Aq": page = (120, 120) else: print "unknown pagetype A4,A3" return if args.landscape: page = (page[1], page[0]) pagept = (page[0] * mm2pts, page[1] * mm2pts) pagebin = (page[0] - 2 * args.margin, page[1] - 2 * args.margin) if args.allsame: packer = samePacker() else: packer = newPacker() for i in range(0, args.maxpages): # 10 pages packer.add_bin(*pagebin) contents = {} for x in sorted(args.file): a = PyPDF2.PdfFileReader(open(x, "rb")) # all pages for j in range(0, a.getNumPages()): print "adding", x, j mb = a.getPage(j).mediaBox ll = mb.lowerLeft ur = mb.upperRight w = float(ur[0] - ll[0]) / mm2pts h = float(ur[1] - ll[1]) / mm2pts print ll, ur contents[(x, j)] = (a, j) # add spacing in mm packer.add_rect(w + args.spacing * 2, h + args.spacing * 2, (x, j)) packer.pack() # Obtain number of bins used for packing nbins = len(packer) print "pages", nbins outfile = PyPDF2.PdfFileWriter() for i in range(0, nbins): page = outfile.addBlankPage(*pagept) # Index first bin abin = packer[i] # Bin dimmensions (bins can be reordered during packing) width, height = abin.width, abin.height nrect = len(packer[i]) for j in range(0, nrect): rect = packer[i][j] print "result", i, j, rect, rect.rid a, pagenum = contents[rect.rid] page.mergeTranslatedPage( a.getPage(pagenum), (args.margin + args.spacing + rect.x) * mm2pts, (args.margin + args.spacing + rect.y) * mm2pts) outfile.write(open(args.output, "wb"))
def generateOne(iterationId, imageArrayAllClasses, baseImgName, baseImgObj): imageId = 0 deltaW = 0 deltaH = 0 writeObj = io.StringIO() objectBoundary = [5, 5] doRandomScale = True doRandomAlpha = True packer = newPacker(rotation=False) format = 'RGBA' #create a list of PIL Image objects scaledImageArray = [] scales = [1.1, 1.3, 1.5, 1.7, 1.8] # imageArrayAllClasses[numClasses][imagesPerClass] - for all classes, Choose a random image in each class for classId in range(0, numClasses): perClassCount = len(imageArrayAllClasses[classId]) selectedInClass = random.randrange(0, perClassCount) img = imageArrayAllClasses[classId][selectedInClass] deltaW = random.randrange(10, 20) deltaH = random.randrange(10, 20) scaleW = 1 scaleH = 1 if (True == doRandomScale): scaleW = scaleH = scales[random.randrange(0, 5)] img = img.resize( (int(img.size[0] * scaleW), int(img.size[1] * scaleH)), Image.BICUBIC) scaledImageArray.append(img) packer.add_rect(img.size[0] + deltaW, img.size[1] + deltaH, imageId) imageId = imageId + 1 # Add the bins where the rectangles will be placed for b in [(cfgWidth, cfgHeight)]: packer.add_bin(*b) # Start packing packer.pack() # Open the target background image finalImage = Image.open(baseImgName).convert(format) all_rects = packer.rect_list() for rect in all_rects: b, x, y, w, h, rid = rect # left, top, right, bottom area1 = [ x + objectBoundary[0], y + objectBoundary[1], x + objectBoundary[0] + scaledImageArray[rid].size[0], y + objectBoundary[1] + scaledImageArray[rid].size[1] ] area2 = (area1[0], area1[1], area1[2], area1[3]) # crop original for blend cropped = finalImage.crop(area2) alphas = [0.7, 0.73, 0.75, 0.78, 0.8] alpha = 0.8 if (True == doRandomAlpha): alpha = alphas[random.randrange(0, 5)] blended = Image.blend(cropped, scaledImageArray[rid], alpha) finalImage.paste(blended, area2) # Generate yolo notation write2Yolo([cfgWidth, cfgHeight], area1, writeObj, rid) # Generate kitti notation # write2Kitti([cfgWidth, cfgHeight], area1,writeObj, rid) return finalImage, writeObj
def generateOne(iterationId, imageArrayAllClasses, baseImgName, baseImgObj): imageId = 0 deltaW = 0 deltaH = 0 writeObj = io.StringIO() objectBoundary = [5,5] doRandomScale = True doRandomAlpha = True packer = newPacker(rotation=False) format = 'RGBA' #create a list of PIL Image objects scaledImageArray = [] scales = [1.0] if enableScaleDown is True: for zm in [0.5, 0.6, 0.8, 0.9]: scales.append(zm) if enableScaleUp is True: for zm in [1.1, 1.3, 1.5, 1.7, 1.8]: scales.append(zm) # imageArrayAllClasses[numClasses][imagesPerClass] - for all classes, Choose a random image in each class for classId in range(0, numClasses): perClassCount = len(imageArrayAllClasses[classId]) selectedInClass = random.randrange(0, perClassCount) img = imageArrayAllClasses[classId][selectedInClass] deltaW = random.randrange(10, 20) deltaH = random.randrange(10, 20) scaleW = 1 scaleH = 1 if(True == doRandomScale): scaleW = scaleH = scales[random.randrange(0, len(scales)-1)] img = img.resize((int(img.size[0]*scaleW),int(img.size[1]*scaleH)), Image.BICUBIC) scaledImageArray.append(img) packer.add_rect( img.size[0] + deltaW, img.size[1] + deltaH, imageId) imageId = imageId + 1 # Add the bins where the rectangles will be placed for b in [(cfgWidth, cfgHeight)]: packer.add_bin(*b) # Start packing packer.pack() # Open the target background image as copy finalImage = baseImgObj.copy() all_rects = packer.rect_list() bad = False for rect in all_rects: # rectpack coordinate is 0,0 at bot left # b - Bin index # x - Rectangle bottom-left corner x coordinate # y - Rectangle bottom-left corner y coordinate # w - Rectangle width # h - Rectangle height # rid - User asigned rectangle id or None b, x, y, w, h, rid = rect # leftx, lefty, rightx, righty area1 = [ x+objectBoundary[0], y+objectBoundary[1], x+objectBoundary[0]+scaledImageArray[rid].size[0], y+objectBoundary[1]+scaledImageArray[rid].size[1]] # Dont write image if exceeding base image - TODO - rectpack debug if area1[2] > cfgWidth or area1[3] > cfgHeight: bad = True break area2 = (area1[0], area1[1], area1[2], area1[3]) # crop original for blend # PIL crop requires {topleft.x,topleft.y, botright.x,botright.y} - 0,0 is in top-left corner cropped = finalImage.crop(area2) alphas = [0.7, 0.73, 0.75, 0.78, 0.8] alpha = 0.8 if(True == doRandomAlpha): alpha = alphas[random.randrange(0, 5)] if (alpha < minAlpha): alpha = minAlpha blended = Image.blend(cropped, scaledImageArray[rid], alpha) finalImage.paste(blended, area2) # Generate yolo notation write2Yolo([cfgWidth, cfgHeight], area1,writeObj, rid) # Generate kitti notation # write2Kitti([cfgWidth, cfgHeight], area1,writeObj, rid) return finalImage, writeObj, bad
def maxrects_single_layer_online(superitems_pool, pallet_dims, superitems_duals=None): """ Given a superitems pool and the maximum dimensions to pack them into, try to fit the greatest number of superitems in a single layer following the given order """ logger.debug("MR-SL-Online starting") # If no duals are given use superitems' heights as a fallback ws, ds, hs = superitems_pool.get_superitems_dims() if superitems_duals is None: superitems_duals = np.array(hs) # Sort rectangles by duals indexes = utils.argsort(list(zip(superitems_duals, hs)), reverse=True) logger.debug( f"MR-SL-Online {sum(superitems_duals[i] > 0 for i in indexes)} non-zero duals to place" ) # Iterate over each placement strategy generated_layers, num_duals = [], [] for strategy in MAXRECTS_PACKING_STRATEGIES: # Create the maxrects packing algorithm packer = newPacker( mode=PackingMode.Online, pack_algo=strategy, rotation=False, ) # Add one bin representing one layer packer.add_bin(pallet_dims.width, pallet_dims.depth, count=1) # Online packing procedure n_packed, non_zero_packed, layer_height = 0, 0, 0 for i in indexes: if superitems_duals[i] > 0 or hs[i] <= layer_height: packer.add_rect(ws[i], ds[i], i) if len(packer[0]) > n_packed: n_packed = len(packer[0]) if superitems_duals[i] > 0: non_zero_packed += 1 if hs[i] > layer_height: layer_height = hs[i] num_duals += [non_zero_packed] # Build layer after packing spool, coords = [], [] for s in packer[0]: spool += [superitems_pool[s.rid]] coords += [utils.Coordinate(s.x, s.y)] layer = layers.Layer(superitems.SuperitemPool(spool), coords, pallet_dims) generated_layers += [layer] # Find the best layer by taking into account the number of # placed superitems with non-zero duals and density layer_indexes = utils.argsort( [(duals, layer.get_density(two_dims=False)) for duals, layer in zip(num_duals, generated_layers)], reverse=True, ) layer = generated_layers[layer_indexes[0]] logger.debug( f"MR-SL-Online generated a new layer with {len(layer)} superitems " f"(of which {num_duals[layer_indexes[0]]} with non-zero dual) " f"and {layer.get_density(two_dims=False)} 3D density") return layer
def nest(output, files, wbin, hbin, enclosing_rectangle=False): packer = newPacker() def float2dec(x): return _float2dec(x, 4) def bbox_paths(paths): bbox = None for p in paths: p_bbox = p.bbox() if bbox is None: bbox = p_bbox else: bbox = (min(p_bbox[0], bbox[0]), max(p_bbox[1], bbox[1]), min(p_bbox[2], bbox[2]), max(p_bbox[3], bbox[3])) return tuple(float2dec(x) for x in bbox) all_paths = {} for svg\ in files: paths, attributes = svg2paths(svg) bbox = bbox_paths(paths) for i in range(files[svg]): rid = svg + str(i) all_paths[rid] = {'paths': paths, 'bbox': bbox} print(rid) packer.add_rect(bbox[1] - bbox[0], bbox[3] - bbox[2], rid=rid) print('Rectangle packing...') while True: packer.add_bin(wbin, hbin) packer.pack() rectangles = {r[5]: r for r in packer.rect_list()} if len(rectangles) == len(all_paths): break else: print('not enough space in the bin, adding ') combineds = {} print('packing into SVGs...') for rid, obj in all_paths.items(): paths = obj['paths'] bbox = obj['bbox'] group = Group() width, height = (float2dec(bbox[1] - bbox[0]), float2dec(bbox[3] - bbox[2])) bin, x, y, w, h, _ = rectangles[rid] if bin not in combineds: svg_file = output if bin != 0: splitext = os.path.splitext(svg_file) svg_file = splitext[0] + '.%s' % bin + splitext[1] dwg = Drawing(svg_file, profile='tiny', size=('%smm' % wbin, '%smm' % hbin), viewBox="0 0 %s %s" % (wbin, hbin)) combineds[bin] = dwg combined = combineds[bin] if (width > height and w > h) or \ (width < height and w < h) or \ (width == height and w == h): rotate = 0 dx = -bbox[0] dy = -bbox[2] else: rotate = 90 dx = -bbox[2] dy = -bbox[0] for p in paths: path = Path(d=p.d()) path.stroke(color='red', width='1') path.fill(opacity=0) group.add(path) group.translate(x + dx, y + dy) group.rotate(rotate) combined.add(group) for combined in combineds.values(): if enclosing_rectangle: r = Rect(size=(wbin, hbin)) r.fill(opacity=0) r.stroke(color='lightgray') combined.add(r) print('SVG saving...') combined.save(pretty=True)
def CreateSVGDocument(imagedatalist, size: str): # Curate the images by filling up SVG data global savepath, a4h, a4w, a3h, a3w, dpi, padding, longest_dimension, offsetx, offsety, srtalgo global widthredfactor global heightredfactor global bincount binw = 0 binh = 0 svgImageContent = "" packer = newPacker(mode=PackingMode.Offline, bin_algo=PackingBin.Global, pack_algo=pckalgo, sort_algo=srtalgo) if size == "A3": documentTemplate = Template(svgA3template) binw = a3wmm binh = a3hmm longest_dimension = 350 packer.add_bin(width=binw * widthredfactor, height=binh * heightredfactor, count=bincount) else: documentTemplate = Template(svgA4template) binw = a4wmm binh = a4hmm longest_dimension = 300 packer.add_bin(width=binw * widthredfactor, height=binh * widthredfactor, count=bincount) # Set the positions of the images before hand. # iterate through the images and set sizes as per dpi for image in imagedatalist: # Load image from disk. loc = os.path.join(savepath, image.src) img = Image.open(loc) (image.width, image.height) = img.size # scale the width and heights to match # document dpi ih = float(image.height) iw = float(image.width) # Calculate and scale the image based on longest dimension factor. if ih > iw: scalefactor = ih / float(longest_dimension) else: scalefactor = iw / float(longest_dimension) print("Scale Factor is : " + str(scalefactor)) # This preserves the aspect ratio if scalefactor == 0.0: scalefactor = 1 ih = ih / scalefactor iw = iw / scalefactor # calculate dimension of image in mm in the document. ih = int(ih / pxpermm) iw = int(iw / pxpermm) # ih = int((25.4 * ih) / dpi) # iw = int((25.4 * iw) / dpi) image.height = ih image.width = iw packer.add_rect(iw, ih, rid=image.id) packer.pack() ''' packer.rect_list(): Returns the list of packed rectangles, each one represented by the tuple (b, x, y, w, h, rid) where: b: Index for the bin the rectangle was packed into x: X coordinate for the rectangle bottom-left corner y: Y coordinate for the rectangle bottom-left corner w: Rectangle width h: Rectangle height rid: User provided id or None ''' results = {} rlist = packer.rect_list() if len(rlist) == len(imagedatalist): for rect in rlist: results[rect[5]] = rect for image in imagedatalist: r = results[image.id] # x and y co-ordinate are given to bottom left of image. # we are converting co-ordinates to topleft. xpos = r[1] + offsetx ypos = r[2] + offsety rw = r[3] rh = r[4] # Translation to screen co-ordinates sx = xpos #xpos + binw/2 sy = ypos #binh/2 - ypos image.xpos = sx image.ypos = sy image.width -= padding image.height -= padding else: print("Unable to pack images.. Going for a random packing.") # Set random locations for the images for image in imagedatalist: image.xpos = randint(0, 100) image.ypos = randint(0, 100) # start setting image positions based on obtained packing # Initialize the bin packing algo. try: for image in imagedatalist: # Download the image # TODO : Make a multi-threaded downloader svgImageContent += GenerateSVGImageTag(image, image.src) except Exception as e: print(e) return "500" doc = {} doc["images"] = svgImageContent content = documentTemplate.render(doc) file = NamedTemporaryFile(mode='w', dir=savepath, delete=False, suffix=".svg") fn = file.name file.write(content) file.close() print(fn) os.startfile(fn) return "200"
def _detect_textures(coreLibrary, modLibrary, mod): textures_path = os.path.join(mod, 'textures') if not os.path.isdir(textures_path): return {} mapping_n_region = {} modded_textures = {} seen_textures = set() def _add_texture(filename): region_id = str.join(".", filename.split('.')[:-1]) isCoreRegion = region_id.isdecimal() and int(region_id) <= coreLibrary['_last_core_region_id'] # Early exit if this texture exists if (region_id in modded_textures) or (region_id in mapping_n_region): return path = os.path.join(textures_path, filename) #core region file without an associated file, return early if isCoreRegion and not os.path.exists(path): return if not isCoreRegion: # adding a new texture, this gets tricky as they have to have consecutive numbers. core_region_id = str(coreLibrary['_next_region_id']) mapping_n_region[filename] = core_region_id coreLibrary['_next_region_id'] += 1 ui.log.log(f" Allocated new core region idx {core_region_id:>5} to file {filename}") else: core_region_id = region_id ui.log.log(f" Mod updated texture region {core_region_id}") seen_textures.add(filename) modded_textures[core_region_id] = { 'mapped_from_id' : region_id, 'filename' : filename, 'path' : path, } autoAnimations = False for animation_chunk in modLibrary['library/animations']: filenameAssetPos = animation_chunk.find("//assetPos[@filename]") if filenameAssetPos is not None: autoAnimations = True # no textures.xml file and no autoAnimations, we're done if 'library/textures' not in modLibrary and not autoAnimations: return modded_textures # Create a textures xml tree if there was no manually-defined file if 'library/textures' not in modLibrary and autoAnimations: texRoot = lxml.etree.Element("AllTexturesAndRegions") lxml.etree.SubElement(texRoot, "textures") lxml.etree.SubElement(texRoot, "regions") modLibrary['library/textures'] = [lxml.etree.ElementTree(texRoot)] #FIXME verify that there's only one file # TODO Maybe don't require only a single file? textures_count = len(modLibrary['library/textures']) if len(modLibrary['library/textures']) != 1: ui.log.log(f" Expected 1 library/textures but found {textures_count}") textures_mod = modLibrary['library/textures'][0] # Allocate any manually defined texture regions into the CTC lib for texture_pack in textures_mod.xpath("//t[@i]"): cim_id = texture_pack.get('i') coreLibrary['_custom_textures_cim'][cim_id] = texture_pack.attrib # Map manually defined regions in textures file to autoIDs for region in textures_mod.xpath("//re[@n]"): region_id = region.get('n') _add_texture(region_id) # no custom mod textures, no need to remap ids if not mapping_n_region and not autoAnimations: return modded_textures ########################################################################## # Custom Mod Textures processing starts here ########################################################################## needs_autogeneration = set() for animation_chunk in modLibrary['library/animations']: # iterate on autogeneration nodes for asset in animation_chunk.xpath("//assetPos[@filename]"): # asset.get will never return null here mod_local_id = asset.get("filename").lstrip("/") if ".png" not in mod_local_id: mod_local_id += ".png" if mod_local_id not in needs_autogeneration: needs_autogeneration.add(mod_local_id) _add_texture(mod_local_id) if mod_local_id not in mapping_n_region: continue new_id = mapping_n_region[mod_local_id] asset.set('a', new_id) # iterate on manually defined nodes for asset in animation_chunk.xpath("//assetPos[@a and not(@filename)]"): mod_local_id = asset.get('a') if not str.isdecimal(mod_local_id): raise ValueError(f"Cannot specify a non-numerical 'a' attribute {mod_local_id}. " + "Specify in 'filename' attribute instead.") _add_texture(mod_local_id + ".png") if mod_local_id not in mapping_n_region: continue new_id = mapping_n_region[mod_local_id] asset.set('a', new_id) if len(needs_autogeneration): image_count = len(needs_autogeneration) regionsNode = textures_mod.find("//regions") texturesNode = textures_mod.find("//textures") textureID:int = ui.database.ModDatabase.getMod(mod).prefix # Catch missing Modder ID. Still try to process and move forward. if not textureID or textureID <= 0: ui.log.log("ERROR: info.xml is missing <modid>. Mod Author should set this to their Discord ID for all mods they make.") textureID = 9999 packer = rectpack.newPacker(rotation=False) sumA:int = 0 # Total Area sumW:int = 0 # Total Width sumH:int = 0 # Total Height minRequiredDimension = 2048 # First get all the files and pack them into a new texture square for regionName in needs_autogeneration: (w, h, rows, info) = png.Reader(textures_path + "/" + regionName).asRGBA() packer.add_rect(w, h, regionName) minRequiredDimension = max(minRequiredDimension, w, h) sumA += (w * h) sumW += w sumH += h # The absolute largest size that can be needed to fit everything. maxRequiredDimension = int(math.ceil(math.sqrt(sumH*sumW))) # Increase size estimate until it is large enough. size:int = 0 sizeEstimate = 1.2 basearea = max( int(math.sqrt(sumA)) , minRequiredDimension ) while size < maxRequiredDimension and image_count != len( packer.rect_list() or [] ) : size = int(basearea * sizeEstimate) packer.reset() packer.add_bin(size, size) packer.pack() sizeEstimate += 0.1 newTex = lxml.etree.SubElement(texturesNode, "t") newTex.set("i", str(textureID)) newTex.set("w", str(size)) newTex.set("h", str(size)) coreLibrary['_custom_textures_cim'][str(textureID)] = newTex.attrib # prepare to export packed PNG to mod directory. kwargs = {} kwargs['create'] = True kwargs['width'] = size kwargs['height'] = size export_path = os.path.join(mod, f"custom_texture_{textureID}.png") custom_png:Texture = Texture( export_path, **kwargs ) packedRectsSorted = {} for rect in packer.rect_list(): b, x, y, w, h, rid = rect remappedID = mapping_n_region[rid] packedRectsSorted[remappedID] = (str(x), str(y), str(w), str(h), str(rid)) custom_png.pack_png( os.path.join(textures_path,rid) , x,y,w,h ) # write back the cim file as png for debugging # this only includes textures from this mod, not the final generated cim. custom_png.export_png(export_path) # NOT YET SORTED packedRectsSorted = {k: v for k,v in sorted(packedRectsSorted.items())} # NOW SORTED: We need this to make sure the IDs are added to the textures file in the correct order for remappedID, data in packedRectsSorted.items(): x, y, w, h, regionFileName = data remapData = modded_textures[remappedID] newNode = lxml.etree.SubElement(regionsNode, "re") newNode.set("n", remappedID) newNode.set("t", str(textureID)) newNode.set("x", x) newNode.set("y", y) newNode.set("w", w) newNode.set("h", h) newNode.set("file", regionFileName) for asset in textures_mod.xpath("//re[@n]"): mod_local_id = asset.get('n') if mod_local_id not in mapping_n_region: continue new_id = mapping_n_region[mod_local_id] ui.log.log(" Mapping texture 're' {} to {}...".format(mod_local_id, new_id)) asset.set('n', new_id) # write the new textures XML if changed. if autoAnimations: modLibrary['library/textures'][0].write( os.path.join(mod, "library", "generated_textures.xml"), pretty_print=True ) return modded_textures
def packwood(whatToFit, binList, kerf): def ex(a, b): a.extend(b) return a # Create rectpack compatible list, expand to a list of list i = map( lambda x: [(x[1].get('size')[0] + kerf, x[1].get('size')[ 1] + kerf, "%s # %d\n%dx%d" % (x[0], y + 1, x[1].get('size')[0], x[1].get('size')[1])) for y in range(0, x[1].get('count', 1))], iter(whatToFit.items())) rectangles = reduce(ex, i, []) packer = rectpack.newPacker(rectpack.PackingMode.Offline, rectpack.PackingBin.Global) # Add the rectangles to packing queue for r in rectangles: packer.add_rect(*r) # Add the bins where the rectangles will be placed for b in binList: packer.add_bin(*b) # Start packing packer.pack() # Full rectangle list all_rects = packer.rect_list() # row = 0 previous = None for b in packer: row += 1 print("Sheet ", b.width, "x", b.height, "subplot", (row, 1, row)) # Sheet %dcm x %dcm"%(b.width/10,b.height/10) fig1 = plot.figure() # several subplot per fiegure #ax1 = fig1.add_subplot(plot.subplot(len(packer),1,row)) #previous=ax1 # on plot per fiegure ax1 = fig1.add_subplot(111) for r in b: ax1.add_patch( patches.Rectangle( (r.x, r.y), # (x,y) r.width, # width r.height, # height edgecolor="red")) rotation = 0 if r.height > r.width: rotation = 90 ax1.annotate(r.rid, (r.x + r.width / 2, r.y + r.height / 2), color='w', weight='bold', fontsize=6, ha='center', va='center', rotation=rotation) print(" Rect ", r.x, r.y, r.width, r.height, r.rid) ax1.set_xlim((0, b.width)) ax1.set_ylim((0, b.height)) ax1.set_aspect('equal') ax1.set_title('Sheet %dx%d ' % (b.width, b.height), fontsize=8) #fig1.subplots_adjust(hspace=0.5) fig1.savefig('rect-%d.png' % (row), dpi=300, bbox_inches='tight') print(os.path.abspath(os.curdir))
def gen_atlas(overrides, src, dst, binsize, atlasname, tex_format=texture_formats[0], border=1, force_single=False, crop=True, leanify=True): overrides = Path(overrides).resolve() src = Path(src).resolve() dst = Path(dst).resolve() sprite_configs = {} def get_border(sprite, default_border=border): return max(default_border, int(sprite_configs[sprite].get('border', default_border))) try: texture_local_overrides = (src / 'atlas.tex').read_text() except FileNotFoundError: texture_local_overrides = None try: texture_global_overrides = (overrides / 'atlas.tex').read_text() except FileNotFoundError: texture_global_overrides = None total_images = 0 packed_images = 0 rects = [] for path in src.glob('**/*.*'): if path.is_file() and path.suffix[1:].lower() in texture_formats: img = Image.open(path) sprite_name = path.relative_to(src).with_suffix('').as_posix() sprite_config_path = overrides / ( get_override_file_name(sprite_name) + '.conf') sprite_configs[sprite_name] = parse_sprite_conf(sprite_config_path) border = get_border(sprite_name) rects.append((img.size[0] + border * 2, img.size[1] + border * 2, (path, sprite_name))) img.close() total_images = len(rects) make_packer = lambda: rectpack.newPacker( # No rotation support in Taisei yet rotation=False, # Fine-tuned for least area used after crop sort_algo=rectpack.SORT_SSIDE, bin_algo=rectpack.PackingBin.BFF, pack_algo=rectpack.MaxRectsBl, ) binsize = list(binsize) if force_single: while True: packer = make_packer() packer.add_bin(*binsize) for rect in rects: packer.add_rect(*rect) packer.pack() if sum(len(bin) for bin in packer) == total_images: break if binsize[1] < binsize[0]: binsize[1] *= 2 else: binsize[0] *= 2 else: packer = make_packer() for rect in rects: packer.add_rect(*rect) packer.add_bin(*binsize) packer.pack() packed_images = sum(len(bin) for bin in packer) if total_images != packed_images: missing = total_images - packed_images raise TaiseiError( f'{missing} sprite{"s were" if missing > 1 else " was"} not packed (bin size is too small?)' ) futures = [] with ExitStack() as stack: # Do everything in a temporary directory first temp_dst = Path( stack.enter_context( TemporaryDirectory(prefix=f'taisei-atlas-{atlasname}'))) # Run multiple leanify processes in parallel, in case we end up with multiple pages # Yeah I'm too lazy to use Popen properly executor = stack.enter_context(ThreadPoolExecutor()) for i, bin in enumerate(packer): textureid = f'atlas_{atlasname}_{i}' # dstfile = temp_dst / f'{textureid}.{tex_format}' # NOTE: we always save PNG first and convert with an external tool later if needed. dstfile = temp_dst / f'{textureid}.png' print(dstfile) dstfile_meta = temp_dst / f'{textureid}.tex' write_texture_def(dstfile_meta, textureid, tex_format, texture_global_overrides, texture_local_overrides) actual_size = [0, 0] if crop: for rect in bin: if rect.x + rect.width > actual_size[0]: actual_size[0] = rect.x + rect.width if rect.y + rect.height > actual_size[1]: actual_size[1] = rect.y + rect.height else: actual_size = (bin.width, bin.height) composite_cmd = [ 'convert', '-verbose', '-size', f'{actual_size[0]}x{actual_size[1]}', 'xc:none', ] for rect in bin: img_path, name = rect.rid border = get_border(name) composite_cmd += [ str(img_path), '-geometry', '{:+}{:+}'.format(rect.x + border, rect.y + border), '-composite' ] override_path = overrides / get_override_file_name(name) if override_path.exists(): override_contents = override_path.read_text() else: override_contents = None write_override_template(override_path, img.size) write_sprite_def( temp_dst / f'{name}.spr', textureid, (rect.x + border, rect.y + border, rect.width - border * 2, rect.height - border * 2), img.size, overrides=override_contents) composite_cmd += [str(dstfile)] @executor.submit def process(dstfile=dstfile): subprocess.check_call(composite_cmd) oldfmt = dstfile.suffix[1:].lower() if oldfmt != tex_format: new_dstfile = dstfile.with_suffix(f'.{tex_format}') if tex_format == 'webp': subprocess.check_call([ 'cwebp', '-progress', '-preset', 'drawing', '-z', '9', '-lossless', '-q', '100', str(dstfile), '-o', str(new_dstfile), ]) else: raise TaiseiError( f'Unhandled conversion {oldfmt} -> {tex_format}') dstfile.unlink() dstfile = new_dstfile if leanify: subprocess.check_call(['leanify', '-v', str(dstfile)]) futures.append(process) # Wait for subprocesses to complete. wait_for_futures(futures) executor.shutdown(wait=True) # Only now, if everything is ok so far, copy everything to the destination, possibly overwriting previous results pattern = re.compile( rf'^atlas_{re.escape(atlasname)}_\d+.({"|".join(texture_formats + ["tex"])})$' ) for path in dst.iterdir(): if pattern.match(path.name): path.unlink() targets = list(temp_dst.glob('**/*')) for dir in (p.relative_to(temp_dst) for p in targets if p.is_dir()): (dst / dir).mkdir(parents=True, exist_ok=True) for file in (p.relative_to(temp_dst) for p in targets if not p.is_dir()): shutil.copyfile(str(temp_dst / file), str(dst / file))
def draw_packed_reid_image(image_folder, reid_dataset_folder, output_path, without_distractors, take_count, pack_image_dims=(1024, 512)): def add_border(image, border_in_px=3): width, height = image.size new_width, new_height = width + border_in_px, height + border_in_px new_image = Image.new('RGB', (new_width, new_height), color='white') center_x = int((new_width - width) / 2) center_y = int((new_height - height) / 2) new_image.paste(image, (center_x, center_y)) return new_image image_names = sorted(os.listdir(image_folder)) if without_distractors: distractors = getAllDistractors(reid_dataset_folder) image_names = set(image_names) image_names = list(image_names - distractors) image_names = sorted(image_names) random.seed(574) image_names = random.choices(image_names, k=take_count) images = [] packer = newPacker(rotation=False) for image_id, image_name in enumerate(image_names): image_path = os.path.join(image_folder, image_name) img = Image.open(image_path) img = add_border(image=img, border_in_px=3) images.append(img) width, height = img.size packer.add_rect(width=width, height=height, rid=image_id) # Add the bins where the rectangles will be placed packer.add_bin(width=pack_image_dims[0], height=pack_image_dims[1], count=1) # Start packing packer.pack() bin = packer[0] print("bin len {}".format(len(bin))) img = Image.new('RGB', (pack_image_dims[0], pack_image_dims[1]), color='white') for rect in bin: w = rect.width h = rect.height # rect is a Rectangle object x = rect.x # rectangle bottom-left x coordinate y = pack_image_dims[ 1] - rect.y - h # rectangle bottom-left y coordinate rid = rect.rid image = images[rid] img.paste(image, (x, y)) img.show() img.save(output_path)
def genSVG(binsize, partDict, rectList, filename): wp = cq.Workplane("XY") for i in rectList: print(i) cx = i[1] + (i[3] / float2dec(2.0, 3)) cy = binsize[1] - (i[2] + (i[4] / float2dec(2.0, 3))) name = i[5] print(cx, cy, name) wp = wp.union(partDict[name].local_obj.translate((cx, cy, 0))) SVGexport.exportSVG(wp, filename) fb = TurnTable(width=90, length=90) ex = Extractor(fb) ex.scan(fb, "") parts = ex.get_parts() rects = getRects(parts, gap=3) p = newPacker(rotation=False) print("RECTS") for r in rects: print(r) p.add_rect(*r) bins = [(1024, 1024)] for b in bins: p.add_bin(*b, count=10) p.pack() rects = p.rect_list() print(rects) print("LAYOUT") genSVG(bins[0], parts, rects, "box.svg")
def Submit(self, l, b, u_id, s_id, lam, job_card): t_val = self.Check(l, b, job_card) if t_val == 0: return 0 elif t_val == 1: try: connection = mysql.connector.connect(host="localhost", user="******", passwd='', database="hello") cursor = connection.cursor() query = "INSERT INTO printing_request (user_id, store_id,lamination,length,width,jobcard_type,master_printer_job) VALUES (%s, %s, %s, %s, %s,%s,%s)" #Insert Querry val = (u_id, s_id, lam, l, b, job_card, 1 ) #Setting insert value cursor.execute(query, val) #Executing the query connection.commit() #Commiting the database to change recs_details = recs_list.Update_list(job_card) #Packing the rectangles recs = [] for idnt, l, b in recs_details: recs.append((l, b)) fit_packer = newPacker() # Add the rectangles to packing queue for r in recs: fit_packer.add_rect(*r) # Add the bins where the rectangles will be placed for b in self.bins: fit_packer.add_bin(*b) # Start packing fit_packer.pack() #Making the display image background = Image.open("Solid_White_Futon_Cover.jpg") background = background.resize((500, 500)) blue = Image.open("new_img.jpg") count = 0 for idnt, h, w in recs_details: try: rect = fit_packer[0][count] temp = blue.resize((rect.width, rect.height)) draw = ImageDraw.Draw(temp) msg = str(idnt) + "\n" + str(h) + 'x' + str(w) w, h = draw.textsize(msg) font = ImageFont.truetype(size=50) draw.text( ((rect.width - w) / 2, (rect.height - h) / 2), msg, fill="black", font=font) background.paste(im=temp, box=(rect.x, rect.y)) count += 1 except IndexError: break try: name = "Master1/" + job_card + ".jpg" background.save(name, 'JPEG') return 1 except FileNotFoundError: return 3 recs.pop() except: return 3
def main(): parse = argparse.ArgumentParser(description='collage maker') parse.add_argument('-f', '--folder', dest='folder', help='folder with images (*.jpg)', default='.') parse.add_argument('-o', '--output', dest='output', help='output collage image filename', default='collage.jpg') parse.add_argument('-n', '--normalize', dest='normalizeHeight', type=bool, help='set True to normalize image heights', default=False) parse.add_argument('-H', '--height', dest='height', type=int, help='height of normalized image', default=100) parse.add_argument('-m', '--multiplier', dest='multiplier', type=float, help='Scale final image by a factor of', default=3) parse.add_argument('-s', '--scaleFactor', dest='scaleFactor', type=float, help='Scale each image by a factor of', default=1) parse.add_argument('-r', '--resolution', dest='resolution', type=int, help='Final image resolution', default=2000) parse.add_argument( '-p', '--productivityHexagon', dest='hexagonal', type=bool, help='set to make hexagonal (other flags will not effect this output)', default=False) args = parse.parse_args() packer = newPacker(pack_algo=GuillotineBssfSas, rotation=False) multiplier = args.multiplier if not args.hexagonal: if args.normalizeHeight: multiplier = args.resolution / args.height sizes = getSizes(args.folder, args.normalizeHeight, args.scaleFactor, args.height, packer) x, y = makeBins(sizes, multiplier, packer) print("Packing (this could take some time)") packer.pack() rectangles = makeRectangles(packer) background = makeCanvas(x, y) pasteImages(args.folder, args.normalizeHeight, args.scaleFactor, args.height, rectangles, x, y, background, args.output) else: directory = args.folder count = len( [file for file in os.listdir(directory) if file.endswith(".jpg")]) print(count) for file in tqdm(os.listdir(directory)): if file.endswith(".jpg"): im = Image.open("%s/%s" % (directory, file)) else: print("%s is not a .jpg") makeHexagonal(directory, side_length=count)
(110, 100), (130, 240), (130, 220), (90, 160), (40, 100), (50, 140), (150, 250), (70, 200), (160, 120), (120, 120), \ (100, 190), (190, 240), (120, 270), (60, 130), (160, 230), (170, 170), (200, 170), (90, 210), (60, 190), \ (120, 180), (110, 190), (180, 270), (160, 120), (160, 100), \ #order 4 (90, 220), (110, 260), (80, 120), (80, 280), (50, 280), (80, 270), (160, 190), (40, 190), (90, 250), (180, 210), \ (180, 250), (110, 160), (170, 270), (110, 270), (80, 140), (100, 270), (140, 210), (120, 200), (120, 150)] bins = [(500, 600), (500, 600), (500, 600), (500, 600), (500, 600), (500, 600), (500, 600), (500, 600)] packer = newPacker() # Add the rectangles to packing queue for r in rectangles: packer.add_rect(*r) # Add the bins where the rectangles will be placed for b in bins: packer.add_bin(*b) # Start packing packer.pack() # Obtain number of bins used for packing nbins = len(packer)
def _detect_textures(coreLibrary, modLibrary, mod): textures_path = os.path.join(mod, 'textures') if not os.path.isdir(textures_path): return {} mapping_n_region = {} modded_textures = {} seen_textures = set() def _add_texture(filename): region_id = str.join(".", filename.split('.')[:-1]) isCoreRegion = region_id.isdecimal( ) and int(region_id) <= coreLibrary['_last_core_region_id'] # Early exit if this texture exists if (region_id in modded_textures) or (region_id in mapping_n_region): return path = os.path.join(textures_path, filename) #core region file without an associated file, return early if isCoreRegion and not os.path.exists(path): return if not isCoreRegion: # adding a new texture, this gets tricky as they have to have consecutive numbers. core_region_id = str(coreLibrary['_next_region_id']) mapping_n_region[filename] = core_region_id coreLibrary['_next_region_id'] += 1 ui.log.log( f" Allocated new core region idx {core_region_id:>5} to file {filename}" ) else: core_region_id = region_id ui.log.log(f" Mod updated texture region {core_region_id}") seen_textures.add(filename) modded_textures[core_region_id] = { 'mapped_from_id': region_id, 'filename': filename, 'path': path, } autoAnimations = False for animation_chunk in modLibrary['library/animations']: filenameAssetPos = animation_chunk.find("//assetPos[@filename]") if filenameAssetPos is not None: autoAnimations = True # no textures.xml file and no autoAnimations, we're done if 'library/textures' not in modLibrary and not autoAnimations: return modded_textures # Create a textures xml tree if there was no manually-defined file if 'library/textures' not in modLibrary and autoAnimations: texRoot = lxml.etree.Element("AllTexturesAndRegions") lxml.etree.SubElement(texRoot, "textures") lxml.etree.SubElement(texRoot, "regions") modLibrary['library/textures'] = [lxml.etree.ElementTree(texRoot)] #FIXME verify that there's only one file # TODO Maybe don't require only a single file? textures_mod = modLibrary['library/textures'][0] # Allocate any manually defined texture regions into the CTC lib for texture_pack in textures_mod.xpath("//t[@i]"): cim_id = texture_pack.get('i') coreLibrary['_custom_textures_cim'][cim_id] = texture_pack.attrib # Map manually defined regions in textures file to autoIDs for region in textures_mod.xpath("//re[@n]"): region_id = region.get('n') _add_texture(region_id) # no custom mod textures, no need to remap ids if not mapping_n_region and not autoAnimations: return modded_textures needs_autogeneration = set() for animation_chunk in modLibrary['library/animations']: # iterate on autogeneration nodes for asset in animation_chunk.xpath("//assetPos[@filename]"): # asset.get will never return null here mod_local_id = asset.get("filename").lstrip("/") if ".png" not in mod_local_id: mod_local_id += ".png" if mod_local_id not in needs_autogeneration: needs_autogeneration.add(mod_local_id) _add_texture(mod_local_id) if mod_local_id not in mapping_n_region: continue new_id = mapping_n_region[mod_local_id] asset.set('a', new_id) # iterate on manually defined nodes for asset in animation_chunk.xpath( "//assetPos[@a and not(@filename)]"): mod_local_id = asset.get('a') if not str.isdecimal(mod_local_id): raise ValueError( f"Cannot specify a non-numerical 'a' attribute {mod_local_id}. " + "Specify in 'filename' attribute instead.") _add_texture(mod_local_id + ".png") if mod_local_id not in mapping_n_region: continue new_id = mapping_n_region[mod_local_id] asset.set('a', new_id) if len(needs_autogeneration): regionsNode = textures_mod.find("//regions") texturesNode = textures_mod.find("//textures") textureID = ui.database.ModDatabase.getMod(mod).prefix packer = rectpack.newPacker(rotation=False) sum = 0 minRequiredDimension = 0 # First get all the files and pack them into a new texture square for regionName in needs_autogeneration: (w, h, rows, info) = png.Reader(textures_path + "/" + regionName).asRGBA() packer.add_rect(w, h, regionName) minRequiredDimension = max(minRequiredDimension, w, h) sum += (w * h) sizeEstimate = 1.2 size = max(int(math.sqrt(sum) * sizeEstimate), minRequiredDimension) packer.add_bin(size, size) packer.pack() if len(needs_autogeneration) != len(packer.rect_list()): raise IndexError( f"Unable to pack all {len(needs_autogeneration)} regions with size estimate {sizeEstimate}" + f", was able to pack {len(packer.rect_list())} rectangles. Please file a bug report." ) newTex = lxml.etree.SubElement(texturesNode, "t") newTex.set("i", str(textureID)) newTex.set("w", str(size)) newTex.set("h", str(size)) coreLibrary['_custom_textures_cim'][str(textureID)] = newTex.attrib packedRectsSorted = {} for rect in packer.rect_list(): b, x, y, w, h, rid = rect remappedID = mapping_n_region[rid] packedRectsSorted[remappedID] = (str(x), str(y), str(w), str(h), str(rid)) # NOT YET SORTED packedRectsSorted = { k: v for k, v in sorted(packedRectsSorted.items()) } # NOW SORTED: We need this to make sure the IDs are added to the textures file in the correct order for remappedID, data in packedRectsSorted.items(): x, y, w, h, regionFileName = data remapData = modded_textures[remappedID] newNode = lxml.etree.SubElement(regionsNode, "re") newNode.set("n", remappedID) newNode.set("t", str(textureID)) newNode.set("x", x) newNode.set("y", y) newNode.set("w", w) newNode.set("h", h) newNode.set("file", regionFileName) for asset in textures_mod.xpath("//re[@n]"): mod_local_id = asset.get('n') if mod_local_id not in mapping_n_region: continue new_id = mapping_n_region[mod_local_id] ui.log.log(" Mapping texture 're' {} to {}...".format( mod_local_id, new_id)) asset.set('n', new_id) # write the new textures XML if changed. if autoAnimations: modLibrary['library/textures'][0].write(os.path.join( mod, "library", "generated_textures.xml"), pretty_print=True) return modded_textures