def process(self): objects = self.inputs['Object'].sv_get(deepcopy=False, default=[]) attr_name = self.inputs['Attr name'].sv_get(deepcopy=False, default=[]) sock_name = 'Value' if self.value_type in ['FLOAT', 'INT', 'BOOLEAN'] else \ 'Vector' if self.value_type in ['FLOAT_VECTOR', 'FLOAT2'] else 'Color' values = self.inputs[sock_name].sv_get(deepcopy=False, default=[]) # first step remove attributes from previous update if necessary iter_attr_names = chain(flat_iter(attr_name), cycle([None])) for last, obj, attr in zip(self.last_objects, chain(objects, cycle([None])), iter_attr_names): if last.obj != obj or last.attr != attr: last.remove_attr() correct_collection_length(self.last_objects, len(objects)) # assign new attribute iter_attr_names = fixed_iter(flat_iter(attr_name), len(objects)) for last, obj, attr, val in zip(self.last_objects, objects, iter_attr_names, fixed_iter(values, len(objects))): last.add_attr(obj, attr) set_attribute_node(obj=obj, values=val, attr_name=attr, domain=self.domain, value_type=self.value_type) self.outputs['Object'].sv_set(objects)
def process(self): data = self.inputs['data'].sv_get(deepcopy=False, default=[]) items = self.inputs['Item'].sv_get(deepcopy=False, default=[]) if not all((data, items)): self.outputs['Index'].sv_set([]) return start_indexes = self.inputs['Start index'].sv_get(deepcopy=False) end_indexes = self.inputs['End index'].sv_get(deepcopy=False) unpack_data = list(list_level_iter(data, self.level)) max_len = max(len(unpack_data), len(items), len(start_indexes), len(end_indexes)) out_indexes = [] for lst, its, starts, ends in zip(repeat_last(unpack_data), fixed_iter(items, max_len), fixed_iter(start_indexes, max_len), fixed_iter(end_indexes, max_len)): indexes = [] for it, s, e in zip(its, repeat_last(starts), repeat_last(ends)): try: indexes.append( lst.index(it, *([s, e] if self.use_range else []))) except ValueError: indexes.append(-1) out_indexes.append(indexes) self.outputs['Index'].sv_set(out_indexes)
def process(self): vals = self.inputs['Values'].sv_get(deepcopy=False) data = self.inputs['Data'].sv_get(deepcopy=False, default=[]) _range = self.inputs['Range'].sv_get(deepcopy=False) obj_n = max(len(vals), len(data), len(_range)) out = [] ind_out = [] for v, d, r in zip(fixed_iter(vals, obj_n, []), fixed_iter(data, obj_n, []), fixed_iter(_range, obj_n, [])): if not all((v, d, r)): break extended_data = np.array(d + [-np.inf, np.inf]) sorting_indexes = np.argsort(extended_data) if self.mode == 'range': len_input = max([len(v), len(r)]) values = np.fromiter(repeat_last(v), float, count=len_input) range_values = np.fromiter(repeat_last(r), float, count=len_input) l_values = values - range_values l_indexes = np.searchsorted(extended_data, l_values, side='right', sorter=sorting_indexes) r_values = values + range_values r_indexes = np.searchsorted(extended_data, r_values, side='right', sorter=sorting_indexes) closest_indexes = [[sorting_indexes[i] for i in range(l, r)] for l, r in zip(l_indexes, r_indexes)] ind_out.append(closest_indexes) out.append( [extended_data[ci].tolist() for ci in closest_indexes]) else: right_indexes = np.searchsorted(extended_data, v, sorter=sorting_indexes) left_indexes = right_indexes - 1 left_distance = v - extended_data[ sorting_indexes[left_indexes]] left_distance = np.where(left_distance < 0, -left_distance, left_distance) right_distance = extended_data[ sorting_indexes[right_indexes]] - v right_distance = np.where(right_distance < 0, -right_distance, right_distance) result_indexes = np.where(left_distance < right_distance, left_indexes, right_indexes) ind_out.append(sorting_indexes[result_indexes].tolist()) out.append(extended_data[ind_out[-1]].tolist()) self.outputs['Closest values'].sv_set(out) self.outputs['Closest indexes'].sv_set(ind_out)
def join_data(self, other: MeshElements): """ Merging two elements data into first Also attributes are merged """ if self._attrs or other._attrs: for key in self._attrs.keys() | other._attrs.keys(): self._attrs[key] = list(chain(fixed_iter(self._attrs.get(key), len(self.data)), fixed_iter(other._attrs.get(key), len(other.data)))) if isinstance(self.data, list): self.data = self.data + other.data elif isinstance(self.data, np.ndarray): self.data = np.concatenate([self.data, other.data]) else: raise TypeError(f'Type "{type(self.data).__name__}" of "data" attribute does not supported')
def process(self): objects = self.inputs['Object'].sv_get(deepcopy=False, default=[]) collections = self.inputs['Collection'].sv_get(deepcopy=False, default=[]) # first step is undo previous changes by the node if necessary len_last = len(self.last_objects) for last, obj, col in zip(self.last_objects, chain(objects, cycle([None])), fixed_iter(collections, len_last)): last.unlink() correct_collection_length(self.last_objects, len(objects)) # save and assign new collections for last, obj, col in zip(self.last_objects, objects, fixed_iter(collections, len(objects))): last.link(obj, col) self.outputs['Object'].sv_set(objects)
def set_attribute_node(*, obj=None, values=None, attr_name=None, domain='POINT', value_type='FLOAT'): if not obj: return {'obj': None} out = {'obj': obj} if not all([values, attr_name]): return out attr = obj.data.attributes.get(attr_name) if attr is None: attr = obj.data.attributes.new(attr_name, value_type, domain) elif attr.data_type != value_type or attr.domain != domain: obj.data.attributes.remove(attr) attr = obj.data.attributes.new(attr_name, value_type, domain) if domain == 'POINT': amount = len(obj.data.vertices) elif domain == 'EDGE': amount = len(obj.data.edges) elif domain == 'CORNER': amount = len(obj.data.loops) elif domain == 'FACE': amount = len(obj.data.polygons) else: raise TypeError(f'Unsupported domain {domain}') if value_type in ['FLOAT', 'INT', 'BOOLEAN']: data = list(fixed_iter(values, amount)) elif value_type in ['FLOAT_VECTOR', 'FLOAT_COLOR']: data = [co for v in fixed_iter(values, amount) for co in v] elif value_type == 'FLOAT2': data = [co for v in fixed_iter(values, amount) for co in v[:2]] else: raise TypeError(f'Unsupported type {value_type}') if value_type in ["FLOAT", "INT", "BOOLEAN"]: attr.data.foreach_set("value", data) elif value_type in ["FLOAT_VECTOR", "FLOAT2"]: attr.data.foreach_set("vector", data) else: attr.data.foreach_set("color", data) attr.data.update() return out
def process(self): if not any(output.is_linked for output in self.outputs): return vertices_s = self.inputs['Vertices'].sv_get(deepcopy=False, default=[[]]) edges_s = self.inputs['Edges'].sv_get(deepcopy=False, default=[[]]) faces_s = self.inputs['Faces'].sv_get(deepcopy=False, default=[[]]) verts_mask_s = self.inputs['VerticesMask'].sv_get(deepcopy=False, default=[[True]]) edge_mask_s = self.inputs['EdgesMask'].sv_get(deepcopy=False, default=[[True]]) face_mask_s = self.inputs['FacesMask'].sv_get(deepcopy=False, default=[[True]]) out = [] data = [ vertices_s, edges_s, faces_s, verts_mask_s, edge_mask_s, face_mask_s ] obj_n = max(map(len, data)) iter_data = zip(*[fixed_iter(d, obj_n, None) for d in data]) for v, e, f, vm, em, fm in iter_data: out.append( mask_converter_node(v, e, f, vm, em, fm, self.mode, self.include_partial)) vm, em, fm = list(zip(*out)) self.outputs['VerticesMask'].sv_set(vm) self.outputs['EdgesMask'].sv_set(em) self.outputs['FacesMask'].sv_set(fm)
def split_by_vertices(verts, edges=None, faces=None, selected_verts: List[bool] = None, face_data=None): """it ignores edges for now""" edges = edges or [] faces = faces or [] if not selected_verts: selected_verts = [True] * len(verts) elif len(selected_verts) != len(verts): selected_verts = list(fixed_iter(selected_verts, len(verts))) out_verts = [] out_faces = [] old_new_verts: Dict[int, int] = dict() for face in faces: new_face = [] for i in face: if selected_verts[i]: out_verts.append(verts[i]) new_face.append(len(out_verts) - 1) else: if i in old_new_verts: new_face.append(old_new_verts[i]) else: out_verts.append(verts[i]) old_new_verts[i] = len(out_verts) - 1 new_face.append(len(out_verts) - 1) out_faces.append(new_face) out_edges = polygons_to_edges_np([out_faces], unique_edges=True)[0] return out_verts, out_edges, out_faces, face_data
def mask_converter_node(vertices=None, edges=None, faces=None, vertices_mask=None, edges_mask=None, faces_mask=None, mode='BY_VERTEX', include_partial=False): vertices = vertices or [] edges = edges or [] faces = faces or [] if mode == 'BY_VERTEX': len_verts = len(vertices) len_edges_verts = (max(i for e in edges for i in e) + 1) if edges else 0 len_faces_verts = (max(i for f in faces for i in f) + 1) if faces else 0 len_verts = max(len_verts, len_edges_verts, len_faces_verts) vertices_mask = list(fixed_iter(vertices_mask, len_verts)) out_edges_mask, out_faces_mask = by_vertex(vertices_mask, edges, faces, include_partial) out_verts_mask = vertices_mask elif mode == 'BY_EDGE': edges_mask = list(fixed_iter(edges_mask, len(edges))) out_verts_mask, out_faces_mask = by_edge(edges_mask, vertices, edges, faces, include_partial) out_edges_mask = edges_mask elif mode == 'BY_FACE': faces_mask = list(fixed_iter(faces_mask, len(faces))) out_verts_mask, out_edges_mask = by_face(faces_mask, vertices, edges, faces, include_partial) out_faces_mask = faces_mask else: raise ValueError("Unknown mode: " + mode) return out_verts_mask, out_edges_mask, out_faces_mask
def process(self): # upgrade old nodes if 'mask size' in self.inputs: self.inputs['mask size'].name = 'Mask size' if 'data to mask' in self.inputs: self.inputs['data to mask'].name = 'Data masking' if 'mask' in self.outputs: self.outputs['mask'].name = 'Mask' index = self.inputs["Index"].sv_get(deepcopy=False, default=[]) mask_size = self.inputs['Mask size'].sv_get(deepcopy=False, default=[None]) data_to_mask = self.inputs['Data masking'].sv_get( deepcopy=False, default=[] if self.data_to_mask else [None]) obj_num = max(len(d) for d in [index, mask_size, data_to_mask]) masks = [] for ind, mask, data in zip(fixed_iter(index, obj_num), fixed_iter(mask_size, obj_num), fixed_iter(data_to_mask, obj_num)): if not self.data_to_mask: mask = mask[0] if mask is not None else 0 mask = np.zeros(int(mask), dtype=bool) else: if self.is_topo_mask: mask = np.zeros(len(data), dtype=bool) else: # inconsistent mode with Sverchok data structure, should be reconsidered in MK2 version mask = np.zeros_like(data, dtype=bool) mask[ind] = True masks.append(mask) if self.output_numpy: self.outputs['Mask'].sv_set(masks) else: self.outputs['Mask'].sv_set([m.tolist() for m in masks])
def split_mesh_elements_node(vertices=None, edges=None, faces=None, face_data=None, mask=None, mask_mode='BY_VERTEX', split_type='VERTS'): if not vertices: return [], [], [], [] edges = edges or [] faces = faces or [] face_data = list(fixed_iter(face_data, len(faces))) if face_data else None mask = mask or [] if split_type == 'VERTS': if mask_mode != 'BY_VERTEX': mask, _, _ = mask_converter_node( vertices, edges, faces, edges_mask=mask if mask_mode == 'BY_EDGE' else None, faces_mask=mask if mask_mode == 'BY_FACE' else None, mode=mask_mode) vs, es, fs, fds = split_by_vertices(vertices, edges, faces, mask, face_data) elif split_type == 'EDGES': if mask_mode != 'BY_EDGE': _, mask, _ = mask_converter_node( vertices, edges, faces, vertices_mask=mask if mask_mode == 'BY_VERTEX' else None, faces_mask=mask if mask_mode == 'BY_FACE' else None, mode=mask_mode) vs, es, fs, fds = split_by_edges(vertices, edges, faces, face_data, mask) else: raise TypeError(f'Unknown "split_typ" mode = {split_type}') return vs, es, fs, fds
def process(self): vertices = self.inputs['Vertices'].sv_get(deepcopy=False, default=[]) edges = self.inputs['Edges'].sv_get(deepcopy=False, default=[]) faces = self.inputs['Faces'].sv_get(deepcopy=False, default=[]) face_data = self.inputs['FaceData'].sv_get(deepcopy=False, default=[]) mask = self.inputs['Mask'].sv_get(deepcopy=False, default=[]) out = [] data = [vertices, edges, faces, face_data, mask] obj_n = max(map(len, data)) iter_data = zip(*[fixed_iter(d, obj_n, None) for d in data]) for v, e, f, fd, m in iter_data: out.append( split_mesh_elements_node(v, e, f, fd, m, self.mask_mode, self.split_type)) vs, es, fs, fds = list(zip(*out)) if out else ([], [], [], []) self.outputs['Vertices'].sv_set(vs) self.outputs['Edges'].sv_set(es) self.outputs['Faces'].sv_set(fs) self.outputs['FaceData'].sv_set(fds)
def vec(arr): return fixed_iter(arr, obj_n, [])