def circles( vector_data: Document, count, delta, quantization, layer_count, random_layer, layer, offset, ): start_layer_id = single_to_layer_id(layer, vector_data) for i in range(count): if random_layer: lid = start_layer_id + random.randint(0, layer_count - 1) else: lid = start_layer_id + (i % layer_count) vector_data.add( LineCollection([ circle( (i + 1) * delta, quantization) + offset[0] + 1j * offset[1] ]), lid, ) return vector_data
def test_document_bounds_empty_layer(): doc = Document() doc.add(LineCollection([(0, 10 + 10j)]), 1) doc.add(LineCollection()) assert doc.bounds() == (0, 0, 10, 10)
def dread( document: vp.Document, file, quantization: float, simplify: bool, parallel: bool, query: str, groupby: str, ) -> vp.Document: """ Extract geometries from a DXF file. """ dxf = ezdxf.readfile(file) elements = [] unit = dxf.header.get("$INSUNITS") # TODO: Load this into correct units. if unit is not None and unit != 0: du = units.DrawingUnits(96.0, unit="in") scale = du.factor(decode(unit)) else: scale = 1 all_entities_by_attribute = dxf.query(query=query).groupby(groupby) for group in all_entities_by_attribute.values(): for entity in group: entity_to_svg(elements, dxf, entity, scale) lc = i_trample_your_api(elements, quantization, simplify, parallel) document.add(lc) elements.clear() return document
def efill(document: vp.Document, tolerance: float, distance: float): """ Implements the Eulerian fill algorithm which fills any closed shapes with as few paths as there are contiguous regions. With scanlines to fill any shapes, even those with holes, with an even-odd fill order and direct pathing. """ for layer in list(document.layers.values() ): # Add all the closed paths to the efill. efill = EulerianFill(distance) for p in layer: if np.abs(p[0] - p[-1]) <= tolerance: efill += vp.as_vector(p) fill = efill.get_fill() # Get the resulting fill. lc = vp.LineCollection() cur_line = [] for pt in fill: if pt is None: if cur_line: lc.append(cur_line) cur_line = [] else: cur_line.append(complex(pt[0], pt[1])) if cur_line: lc.append(cur_line) document.add(lc) return document
def test_document_empty_copy(): doc = Document() doc.add(LineCollection([(0, 1)]), 1) doc.page_size = 3, 4 new_doc = doc.empty_copy() assert len(new_doc.layers) == 0 assert new_doc.page_size == (3, 4)
def test_document_lid_iteration(): lc = LineCollection([(0, 1 + 1j)]) doc = Document() doc.add(lc, 1) for lc in doc.layers_from_ids([1, 2, 3, 4]): lc.append([3, 3 + 3j]) assert doc.count() == 1 assert len(doc.layers[1]) == 2
def iread(document: vp.Document, input_file: str, color, distance: float): """ Image Read and Vectorization. This is a pure python polygon producer. The goal of this project is to vector trace images according to some given criteria. The default mode does black v. white. However, multiple colors can be specified along with a color distance and those colors will be extracted and traced. """ image = Image.open(input_file) width, height = image.size if len(color) == 0: if image.mode != 'L': image = image.convert('L') image = image.point(lambda e: int(e > 127) * 255) lc = vp.LineCollection() document.add(lc) for points in _vectrace(image.load(), width, height): lc.append(points) return document distance_sq = distance * distance def dist(c, pixel): r = c.red - pixel[0] g = c.green - pixel[1] b = c.blue - pixel[2] return r * r + g * g + b * b <= distance_sq if image.mode != "RGBA": image = image.convert("RGBA") for c in color: v = Image.new('L', image.size, 255) v_data = v.load() new_data = image.load() for y in range(height): for x in range(width): pixel = new_data[x, y] if pixel[3] == 0: continue if dist(c, pixel): new_data[x, y] = (255, 255, 255, 0) v_data[x, y] = 0 lc = vp.LineCollection() document.add(lc) for points in _vectrace(v_data, width, height): lc.append(points) return document
def eread(document: vp.Document, filename: str): # populate the vp_source[s] properties document.set_property(vp.METADATA_FIELD_SOURCE, pathlib.Path(filename).absolute()) document.add_to_sources(filename) pattern = EmbPattern(filename) for stitches, color in pattern.get_as_stitchblock(): if len(stitches) == 0: continue lc = vp.LineCollection() lc.scale(1.0 / _EMB_SCALE_FACTOR) stitch_block = np.asarray(stitches, dtype="float") stitch_block = stitch_block[..., 0] + 1j * stitch_block[..., 1] lc.append(stitch_block) lc.set_property(vp.METADATA_FIELD_COLOR, vp.Color(color.hex_color())) document.add(lc, with_metadata=True) return document
def test_ops_on_document_with_emtpy_layer(): doc = Document() lc = LineCollection() doc.add(lc, 1) _all_document_ops(doc)
def test_document_bounds(): doc = Document() doc.add(LineCollection([(-10, 10), (0, 0)]), 1) doc.add(LineCollection([(0, 0), (-10j, 10j)]), 2) assert doc.bounds() == (-10, -10, 10, 10)
def mdgrid( document: vp.Document, seed: Optional[int], size, count, pen_width, fat_grid, global_rate, rate_fill, rate_gradient, rate_bigdot, rate_star, rate_hatch, ): """Create nice random grids with stuff in them.""" if len(rate_fill) == 0 and global_rate is not None: rate_fill = [global_rate] rate_gradient = check_default(rate_gradient, global_rate) rate_bigdot = check_default(rate_bigdot, global_rate) rate_star = check_default(rate_star, global_rate) rate_hatch = check_default(rate_hatch, global_rate) logging.info( f"mdgrid: rates: fill = {rate_fill}, gradient = {rate_gradient}, " f"bigdot = {rate_bigdot}, star = {rate_star}, hatch = {rate_hatch}") # handle seed if seed is None: seed = random.randint(0, int(1e9)) logging.info(f"mdgrid: no seed provided, generating one ({seed})") np.random.seed(seed) random.seed(seed) grid_lc = vp.LineCollection() # build the grid col_widths = distribute_widths(count[0], size[0]) row_widths = distribute_widths(count[1], size[1]) col_seps = np.hstack([0, np.cumsum(col_widths)]) row_seps = np.hstack([0, np.cumsum(row_widths)]) # outer boundaries must be a single loop (for fat grid to work nicely) grid_lc.append([ col_seps[0] + row_seps[0] * 1j, col_seps[0] + row_seps[-1] * 1j, col_seps[-1] + row_seps[-1] * 1j, col_seps[-1] + row_seps[0] * 1j, col_seps[0] + row_seps[0] * 1j, ]) grid_lc.extend([x + row_seps[0] * 1j, x + row_seps[-1] * 1j] for x in col_seps) grid_lc.extend([y * 1j + col_seps[0], y * 1j + col_seps[-1]] for y in row_seps) # implement fat grid fat_grid_lc = vp.LineCollection() if fat_grid: mls = grid_lc.as_mls() fat_grid_lc.extend( unary_union([ mls_parallel_offset(mls, pen_width, "left"), mls_parallel_offset(mls, pen_width, "right"), ])) # generate content in each cell fill_lcs = [vp.LineCollection() for _ in range(len(rate_fill))] grad_lc = vp.LineCollection() bigdot_lc = vp.LineCollection() star_lc = vp.LineCollection() hatch_lc = vp.LineCollection() for (x, y) in itertools.product(range(count[0]), range(count[1])): rect = ( col_seps[x], row_seps[y], col_seps[x + 1] - col_seps[x], row_seps[y + 1] - row_seps[y], ) filled = False for i, r in enumerate(rate_fill): if random.random() < r: fill_lcs[i].extend(generate_fill(rect, pen_width)) filled = True break if not filled: if random.random() < rate_gradient: grad_lc.extend( generate_dot_gradient(rect, pen_width, density=0.3)) elif random.random() < rate_bigdot: bigdot_lc.extend( generate_big_dot_gradient(rect, pen_width, 3, density=0.01)) elif random.random() < rate_star: star_lc.extend(generate_star(rect, line_count=20)) elif random.random() < rate_hatch: hatch_lc.extend(generate_hatch(rect)) # populate vector data with layer content document.add(grid_lc, 1) document.add(fat_grid_lc, 2) document.add(star_lc, 3) document.add(hatch_lc, 4) document.add(grad_lc, 5) document.add(bigdot_lc, 6) for i, lc in enumerate(fill_lcs): document.add(lc, 7 + i) return document
def read( document: Document, file, single_layer: bool, layer: Optional[int], quantization: float, simplify: bool, parallel: bool, no_crop: bool, display_size: Tuple[float, float], display_landscape: bool, ) -> Document: """Extract geometries from a SVG file. By default, the `read` command attempts to preserve the layer structure of the SVG. In this context, top-level groups (<svg:g>) are each considered a layer. If any, all non-group, top-level SVG elements are imported into layer 1. The following logic is used to determine in which layer each SVG top-level group is imported: - If a `inkscape:label` attribute is present and contains digit characters, it is \ stripped of non-digit characters the resulting number is used as target layer. If the \ resulting number is 0, layer 1 is used instead. - If the previous step fails, the same logic is applied to the `id` attribute. - If both previous steps fail, the target layer matches the top-level group's order \ of appearance. Using `--single-layer`, the `read` command operates in single-layer mode. In this mode, \ all geometries are in a single layer regardless of the group structure. The current target \ layer is used default and can be specified with the `--layer` option. This command only extracts path elements as well as primitives (rectangles, ellipses, lines, polylines, polygons). Other elements such as text and bitmap images are discarded, and so is all formatting. All curved primitives (e.g. bezier path, ellipses, etc.) are linearized and approximated by polylines. The quantization length controls the maximum length of individual segments. Optionally, a line simplification with tolerance set to quantization can be applied on the SVG's curved element (e.g. circles, ellipses, arcs, bezier curves, etc.). This is enabled with the `--simplify` flag. This process reduces significantly the number of segments used to approximate the curve while still guaranteeing an accurate conversion, but may increase the execution time of this command. The `--parallel` option enables multiprocessing for the SVG conversion. This is recommended ONLY when using `--simplify` on large SVG files with many curved elements. By default, the geometries are cropped to the SVG boundaries defined by its width and length attributes. The crop operation can be disabled with the `--no-crop` option. In general, SVG boundaries are determined by the `width` and `height` of the top-level <svg> tag. However, the some SVG may have their width and/or height specified as percent value or even miss them altogether (in which case they are assumed to be set to 100%). In these cases, vpype considers by default that 100% corresponds to a A4 page in portrait orientation. The options `--display-size FORMAT` and `--display-landscape` can be used to specify a different format. Examples: Multi-layer import: vpype read input_file.svg [...] Single-layer import: vpype read --single-layer input_file.svg [...] Single-layer import with target layer: vpype read --single-layer --layer 3 input_file.svg [...] Multi-layer import with specified quantization and line simplification enabled: vpype read --quantization 0.01mm --simplify input_file.svg [...] Multi-layer import with cropping disabled: vpype read --no-crop input_file.svg [...] """ width, height = display_size if display_landscape: width, height = height, width if single_layer: lc, width, height = read_svg( file, quantization=quantization, crop=not no_crop, simplify=simplify, parallel=parallel, default_width=width, default_height=height, ) document.add(lc, single_to_layer_id(layer, document)) document.extend_page_size((width, height)) else: if layer is not None: logging.warning("read: target layer is ignored in multi-layer mode") document.extend( read_multilayer_svg( file, quantization=quantization, crop=not no_crop, simplify=simplify, parallel=parallel, default_width=width, default_height=height, ) ) return document
def pixelart(document: vp.Document, image, mode, pen_width: float): """Plot pixel art. Two modes are available: - "big" creates a square spiral for each pixel - "line" create single horizontal lines for contiguous pixels of the same color """ # this should be dealt with by add_to_source() in a future release document.set_property(vp.METADATA_FIELD_SOURCE, pathlib.Path(image).absolute()) document.add_to_sources(image) img = imageio.imread(image, pilmode="RGBA") colors = np.unique(img[:, :, 0:3][img[:, :, 3] == 255], axis=0) if mode == "big": for col_idx, color in enumerate(colors, start=1): indice_i, indice_j = np.nonzero( np.all(img[:, :, 0:3] == color, axis=2) & (img[:, :, 3] == 255) ) lines = [] for i, j in zip(indice_j, indice_i): line = np.array(PIXEL_TRAJECTORY) + i * PIXEL_OFFSET + j * PIXEL_OFFSET * 1j line *= pen_width lines.append(line) document.add(vp.LineCollection(lines), col_idx) elif mode == "line": for row_idx, line in enumerate(img): start = 0 while True: while start < len(line) and line[start, 3] != 255: start += 1 # loop ending condition if start == len(line): break # find the end of the current pixel run end = start while ( end < len(line) and np.all(line[end, 0:3] == line[start, 0:3]) and line[end, 3] == 255 ): end += 1 # layer_id = np.where(np.all(colors == line[start, 0:3], axis=1))[0][0] + 1 segment = np.array([row_idx * 1j + (start - 0.1), row_idx * 1j + (end - 0.9)]) segment *= pen_width document.add(vp.LineCollection([segment]), layer_id) # move to the next line start = end for col_idx, color in enumerate(colors, start=1): document.layers[col_idx].set_property(vp.METADATA_FIELD_COLOR, vp.Color(*color)) document.layers[col_idx].set_property(vp.METADATA_FIELD_PEN_WIDTH, pen_width) return document