def _updateModel(self): dirs = Resources.getAllPathsForType(Resources.Settings) for dir in dirs: if not os.path.isdir(dir): continue for file in os.listdir(dir): data = None path = os.path.join(dir, file) if os.path.isdir(path): continue with open(path, "rt", -1, "utf-8") as f: try: data = json.load(f) except ValueError as e: Logger.log("e", "Error when loading file {0}: {1}".format(file, e)) continue if not data.get("id"): continue # Any model without an ID is seen as an 'abstract' _file = file _id = data.get("id") _name = data.get("name") _pages = data.get("add_pages") while _pages == None: searchPath = os.path.join(dir, data.get("inherits")) json_data = open(searchPath).read() data = json.loads(json_data) _pages = data.get("add_pages") _pages = self._createPagesModel(_pages) self.appendItem({ "id": _id, "name": _name, "pages": _pages, "file": _file})
def findNodePlacement(self, node, offset_shape_arr, hull_shape_arr, step = 1): new_node = copy.deepcopy(node) best_spot = self.bestSpot( offset_shape_arr, start_prio = self._last_priority, step = step) x, y = best_spot.x, best_spot.y # Save the last priority. self._last_priority = best_spot.priority # Ensure that the object is above the build platform new_node.removeDecorator(ZOffsetDecorator.ZOffsetDecorator) if new_node.getBoundingBox(): center_y = new_node.getWorldPosition().y - new_node.getBoundingBox().bottom else: center_y = 0 if x is not None: # We could find a place new_node.setPosition(Vector(x, center_y, y)) found_spot = True self.place(x, y, hull_shape_arr) # place the object in arranger else: Logger.log("d", "Could not find spot!"), found_spot = False new_node.setPosition(Vector(200, center_y, 100)) return new_node, found_spot
def _materialWarningMessageAction(self, message, button): if button == "Undo": container_manager = ContainerManager.getInstance() container_manager.setContainerMetaDataEntry(self._material_diameter_warning_message.material_id, "properties/diameter", self._material_diameter_warning_message.previous_diameter) message.hide() else: Logger.log("w", "Unknown button action for material diameter warning message: {action}".format(action = button))
def restore(self) -> bool: if not self.zip_file or not self.meta_data or not self.meta_data.get("cura_release", None): # We can restore without the minimum required information. Logger.log("w", "Tried to restore a Cura backup without having proper data or meta data.") self._showMessage( self.catalog.i18nc("@info:backup_failed", "Tried to restore a Cura backup without having proper data or meta data.")) return False current_version = CuraApplication.getInstance().getVersion() version_to_restore = self.meta_data.get("cura_release", "master") if current_version != version_to_restore: # Cannot restore version older or newer than current because settings might have changed. # Restoring this will cause a lot of issues so we don't allow this for now. self._showMessage( self.catalog.i18nc("@info:backup_failed", "Tried to restore a Cura backup that does not match your current version.")) return False version_data_dir = Resources.getDataStoragePath() archive = ZipFile(io.BytesIO(self.zip_file), "r") extracted = self._extractArchive(archive, version_data_dir) # Under Linux, preferences are stored elsewhere, so we copy the file to there. if Platform.isLinux(): preferences_file_name = CuraApplication.getInstance().getApplicationName() preferences_file = Resources.getPath(Resources.Preferences, "{}.cfg".format(preferences_file_name)) backup_preferences_file = os.path.join(version_data_dir, "{}.cfg".format(preferences_file_name)) Logger.log("d", "Moving preferences file from %s to %s", backup_preferences_file, preferences_file) shutil.move(backup_preferences_file, preferences_file) return extracted
def read(self, file_name): if file_name.split(".")[-1] != "gcode": return None prefix = ";SETTING_" + str(GCodeProfileReader.version) + " " prefix_length = len(prefix) # Loading all settings from the file. # They are all at the end, but Python has no reverse seek any more since Python3. # TODO: Consider moving settings to the start? serialized = "" # Will be filled with the serialized profile. try: with open(file_name) as f: for line in f: if line.startswith(prefix): # Remove the prefix and the newline from the line and add it to the rest. serialized += line[prefix_length : -1] except IOError as e: Logger.log("e", "Unable to open file %s for reading: %s", file_name, str(e)) return None serialized = unescapeGcodeComment(serialized) Logger.log("i", "Serialized the following from %s: %s" %(file_name, repr(serialized))) json_data = json.loads(serialized) profile_strings = [json_data["global_quality"]] profile_strings.extend(json_data.get("extruder_quality", [])) return [readQualityProfileFromString(profile_string) for profile_string in profile_strings]
def addInputDevice(self, device): name = device.getPluginId() if(name not in self._input_devices): self._input_devices[name] = device device.event.connect(self.event) else: Logger.log("w", "%s was already added to input device list. Unable to add it again." % name)
def read(self, file_name, **kwargs): try: for id, reader in self._mesh_readers.items(): result = reader.read(file_name) if result is not None: if kwargs.get("center", True): # If the result has a mesh and no children it needs to be centered if result.getMeshData() and len(result.getChildren()) == 0: extents = result.getMeshData().getExtents() move_vector = Vector() move_vector.setX(extents.center.x) move_vector.setY(extents.center.y) # Ensure that bottom is on 0 (above plate) move_vector.setZ(extents.center.z) result.setCenterPosition(move_vector) if result.getMeshData().getExtents().bottom != 0: result.translate(Vector(0,-result.getMeshData().getExtents().bottom ,0)) # Move all the meshes of children so that toolhandles are shown in the correct place. for node in result.getChildren(): if node.getMeshData(): extents = node.getMeshData().getExtents() m = Matrix() m.translate(-extents.center) node.setMeshData(node.getMeshData().getTransformed(m)) node.translate(extents.center) return result except OSError as e: Logger.log("e", str(e)) Logger.log("w", "Unable to read file %s", file_name) return None #unable to read
def _sendNextGcodeLine(self): if self._gcode_position >= len(self._gcode): return if self._gcode_position == 100: self._print_start_time_100 = time.time() line = self._gcode[self._gcode_position] if ";" in line: line = line[:line.find(";")] line = line.strip() # Don't send empty lines. But we do have to send something, so send # m105 instead. # Don't send the M0 or M1 to the machine, as M0 and M1 are handled as # an LCD menu pause. if line == "" or line == "M0" or line == "M1": line = "M105" try: if ("G0" in line or "G1" in line) and "Z" in line: z = float(re.search("Z([0-9\.]*)", line).group(1)) if self._current_z != z: self._current_z = z except Exception as e: Logger.log("e", "Unexpected error with printer connection, could not parse current Z: %s: %s" % (e, line)) self._setErrorState("Unexpected error: %s" %e) checksum = functools.reduce(lambda x,y: x^y, map(ord, "N%d%s" % (self._gcode_position, line))) self._sendCommand("N%d%s*%d" % (self._gcode_position, line, checksum)) self._gcode_position += 1 self.setProgress((self._gcode_position / len(self._gcode)) * 100) self.progressChanged.emit()
def addTool(self, tool): name = tool.getPluginId() if(name not in self._tools): self._tools[name] = tool self.toolsChanged.emit() else: Logger.log("w", "%s was already added to tool list. Unable to add it again.", name)
def findDefaultVariant(self) -> Optional[ContainerInterface]: definition = self._getMachineDefinition() # has_variants can be overridden in other containers and stacks. # In the case of UM2, it is overridden in the GlobalStack if not self.getMetaDataEntry("has_variants"): # If the machine does not use variants, we should never set a variant. return None # First add any variant. Later, overwrite with preference if the preference is valid. variant = None definition_id = self._findInstanceContainerDefinitionId(definition) variants = ContainerRegistry.getInstance().findInstanceContainers(definition = definition_id, type = "variant") if variants: variant = variants[0] preferred_variant_id = definition.getMetaDataEntry("preferred_variant") if preferred_variant_id: preferred_variants = ContainerRegistry.getInstance().findInstanceContainers(id = preferred_variant_id, definition = definition_id, type = "variant") if preferred_variants: variant = preferred_variants[0] else: Logger.log("w", "The preferred variant \"{variant}\" of stack {stack} does not exist or is not a variant.", variant = preferred_variant_id, stack = self.id) # And leave it at the default variant. if variant: return variant Logger.log("w", "Could not find a valid default variant for stack {stack}", stack = self.id) return None
def getProperty(self, key: str, property_name: str, context: Optional[PropertyEvaluationContext] = None) -> Any: if not self.definition.findDefinitions(key = key): return None # Handle the "resolve" property. if self._shouldResolve(key, property_name): self._resolving_settings.add(key) resolve = super().getProperty(key, "resolve", context) self._resolving_settings.remove(key) if resolve is not None: return resolve if context is None: context = PropertyEvaluationContext() context.pushContainer(self) # Handle the "limit_to_extruder" property. limit_to_extruder = super().getProperty(key, "limit_to_extruder", context) if limit_to_extruder is not None: limit_to_extruder = str(limit_to_extruder) if limit_to_extruder is not None and limit_to_extruder != "-1" and limit_to_extruder in self._extruders: if super().getProperty(key, "settable_per_extruder", context): result = self._extruders[str(limit_to_extruder)].getProperty(key, property_name, context) if result is not None: context.popContainer() return result else: Logger.log("e", "Setting {setting} has limit_to_extruder but is not settable per extruder!", setting = key) result = super().getProperty(key, property_name, context) context.popContainer() return result
def _onSocketStateChanged(self, state): if state == SignalSocket.ListeningState: if not Application.getInstance().getCommandLineOption("external-backend", False): self.startEngine() elif state == SignalSocket.ConnectedState: Logger.log("d", "Backend connected on port %s", self._port) self.backendConnected.emit()
def importProfile(self, file_name): if not file_name: return { "status": "error", "message": catalog.i18nc("@info:status", "Failed to import profile from <filename>{0}</filename>: <message>{1}</message>", file_name, "Invalid path")} plugin_registry = PluginRegistry.getInstance() for plugin_id, meta_data in self._getIOPlugins("profile_reader"): profile_reader = plugin_registry.getPluginObject(plugin_id) try: profile_or_list = profile_reader.read(file_name) # Try to open the file with the profile reader. except Exception as e: #Note that this will fail quickly. That is, if any profile reader throws an exception, it will stop reading. It will only continue reading if the reader returned None. Logger.log("e", "Failed to import profile from %s: %s", file_name, str(e)) return { "status": "error", "message": catalog.i18nc("@info:status", "Failed to import profile from <filename>{0}</filename>: <message>{1}</message>", file_name, str(e))} if profile_or_list: # Success! name_seed = os.path.splitext(os.path.basename(file_name))[0] if type(profile_or_list) is not list: profile = profile_or_list self._configureProfile(profile, name_seed) return { "status": "ok", "message": catalog.i18nc("@info:status", "Successfully imported profile {0}", profile.getName()) } else: for profile in profile_or_list: self._configureProfile(profile, name_seed) if len(profile_or_list) == 1: return {"status": "ok", "message": catalog.i18nc("@info:status", "Successfully imported profile {0}", profile_or_list[0].getName())} else: profile_names = ", ".join([profile.getName() for profile in profile_or_list]) return { "status": "ok", "message": catalog.i18nc("@info:status", "Successfully imported profiles {0}", profile_names) } #If it hasn't returned by now, none of the plugins loaded the profile successfully. return { "status": "error", "message": catalog.i18nc("@info:status", "Profile {0} has an unknown file type.", file_name)}
def serialize(self): parser = configparser.ConfigParser(interpolation = None, empty_lines_in_values = False) if not self._definition: Logger.log("e", "Tried to serialize an instance container without definition, this is not supported") return "" parser["general"] = {} parser["general"]["version"] = str(self.Version) parser["general"]["name"] = str(self._name) parser["general"]["definition"] = str(self._definition.getId()) parser["metadata"] = {} for key, value in self._metadata.items(): parser["metadata"][key] = str(value) parser["values"] = {} for key, instance in self._instances.items(): try: parser["values"][key] = str(instance.value) except AttributeError: pass stream = io.StringIO() parser.write(stream) return stream.getvalue()
def __init__(self, extruder: int, line_types: numpy.ndarray, data: numpy.ndarray, line_widths: numpy.ndarray, line_thicknesses: numpy.ndarray, line_feedrates: numpy.ndarray) -> None: self._extruder = extruder self._types = line_types for i in range(len(self._types)): if self._types[i] >= self.__number_of_types: # Got faulty line data from the engine. Logger.log("w", "Found an unknown line type: %s", i) self._types[i] = self.NoneType self._data = data self._line_widths = line_widths self._line_thicknesses = line_thicknesses self._line_feedrates = line_feedrates self._vertex_begin = 0 self._vertex_end = 0 self._index_begin = 0 self._index_end = 0 self._jump_mask = self.__jump_map[self._types] self._jump_count = numpy.sum(self._jump_mask) self._mesh_line_count = len(self._types) - self._jump_count self._vertex_count = self._mesh_line_count + numpy.sum(self._types[1:] == self._types[:-1]) # Buffering the colors shouldn't be necessary as it is not # re-used and can save alot of memory usage. self._color_map = LayerPolygon.getColorMap() self._colors = self._color_map[self._types] # type: numpy.ndarray # When type is used as index returns true if type == LayerPolygon.InfillType or type == LayerPolygon.SkinType or type == LayerPolygon.SupportInfillType # Should be generated in better way, not hardcoded. self._isInfillOrSkinTypeMap = numpy.array([0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1], dtype=numpy.bool) self._build_cache_line_mesh_mask = None # type: Optional[numpy.ndarray] self._build_cache_needed_points = None # type: Optional[numpy.ndarray]
def addMachineExtruders(self, machine_definition: DefinitionContainerInterface, machine_id: str) -> None: changed = False machine_definition_id = machine_definition.getId() if machine_id not in self._extruder_trains: self._extruder_trains[machine_id] = { } changed = True container_registry = ContainerRegistry.getInstance() if container_registry: # Add the extruder trains that don't exist yet. for extruder_definition in container_registry.findDefinitionContainers(machine = machine_definition_id): position = extruder_definition.getMetaDataEntry("position", None) if not position: Logger.log("w", "Extruder definition %s specifies no position metadata entry.", extruder_definition.getId()) if not container_registry.findContainerStacks(machine = machine_id, position = position): # Doesn't exist yet. self.createExtruderTrain(extruder_definition, machine_definition, position, machine_id) changed = True # Gets the extruder trains that we just created as well as any that still existed. extruder_trains = container_registry.findContainerStacks(type = "extruder_train", machine = machine_id) for extruder_train in extruder_trains: self._extruder_trains[machine_id][extruder_train.getMetaDataEntry("position")] = extruder_train # regardless of what the next stack is, we have to set it again, because of signal routing. extruder_train.setNextStack(Application.getInstance().getGlobalContainerStack()) changed = True if changed: self.extrudersChanged.emit(machine_id)
def deleteAll(self, only_selectable = True) -> None: Logger.log("i", "Clearing scene") if not self.getController().getToolsEnabled(): return nodes = [] for node in DepthFirstIterator(self.getController().getScene().getRoot()): #type: ignore #Ignore type error because iter() should get called automatically by Python syntax. if not isinstance(node, SceneNode): continue if (not node.getMeshData() and not node.callDecoration("getLayerData")) and not node.callDecoration("isGroup"): continue # Node that doesnt have a mesh and is not a group. if only_selectable and not node.isSelectable(): continue if not node.callDecoration("isSliceable") and not node.callDecoration("getLayerData") and not node.callDecoration("isGroup"): continue # Only remove nodes that are selectable. if node.getParent() and cast(SceneNode, node.getParent()).callDecoration("isGroup"): continue # Grouped nodes don't need resetting as their parent (the group) is resetted) nodes.append(node) if nodes: op = GroupedOperation() for node in nodes: op.addOperation(RemoveSceneNodeOperation(node)) # Reset the print information self.getController().getScene().sceneChanged.emit(node) op.push() Selection.clear()
def _updateMaterialContainer(self, definition, variant_container = None, preferred_material_name = None): if not definition.getMetaDataEntry("has_materials"): return self._empty_material_container search_criteria = { "type": "material" } if definition.getMetaDataEntry("has_machine_materials"): search_criteria["definition"] = self.getQualityDefinitionId(definition) if definition.getMetaDataEntry("has_variants") and variant_container: search_criteria["variant"] = self.getQualityVariantId(definition, variant_container) else: search_criteria["definition"] = "fdmprinter" if preferred_material_name: search_criteria["name"] = preferred_material_name else: preferred_material = definition.getMetaDataEntry("preferred_material") if preferred_material: search_criteria["id"] = preferred_material containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(**search_criteria) if containers: return containers[0] containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(**search_criteria) if "variant" in search_criteria or "id" in search_criteria: # If a material by this name can not be found, try a wider set of search criteria search_criteria.pop("variant", None) search_criteria.pop("id", None) containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(**search_criteria) if containers: return containers[0] Logger.log("w", "Unable to find a material container with provided criteria, returning an empty one instead.") return self._empty_material_container
def _onSocketError(self, error): super()._onSocketError(error) self._terminate() if error.getErrorCode() not in [Arcus.ErrorCode.BindFailedError, Arcus.ErrorCode.ConnectionResetError, Arcus.ErrorCode.Debug]: Logger.log("e", "A socket error caused the connection to be reset")
def createMaterial(self) -> str: from UM.i18n import i18nCatalog catalog = i18nCatalog("cura") # Ensure all settings are saved. self._application.saveSettings() machine_manager = self._application.getMachineManager() extruder_stack = machine_manager.activeStack machine_definition = self._application.getGlobalContainerStack().definition preferred_material = machine_definition.getMetaDataEntry("preferred_material") approximate_diameter = str(extruder_stack.approximateMaterialDiameter) root_material_id = preferred_material if preferred_material else "generic_pla" root_material_id = self.getRootMaterialIDForDiameter(root_material_id, approximate_diameter) material_group = self.getMaterialGroup(root_material_id) if not material_group: # This should never happen Logger.log("w", "Cannot get the material group of %s.", root_material_id) return "" # Create a new ID & container to hold the data. new_id = self._container_registry.uniqueName("custom_material") new_metadata = {"name": catalog.i18nc("@label", "Custom Material"), "brand": catalog.i18nc("@label", "Custom"), "GUID": str(uuid.uuid4()), } self.duplicateMaterial(material_group.root_material_node, new_base_id = new_id, new_metadata = new_metadata) return new_id
def setActivePreset(self, preset_id: str): if preset_id == self._active_preset_item["id"]: Logger.log("d", "Same setting visibility preset [%s] selected, do nothing.", preset_id) return preset_item = None for item in self.items: if item["id"] == preset_id: preset_item = item break if preset_item is None: Logger.log("w", "Tried to set active preset to unknown id [%s]", preset_id) return need_to_save_to_custom = self._active_preset_item["id"] == "custom" and preset_id != "custom" if need_to_save_to_custom: # Save the current visibility settings to custom current_visibility_string = self._preferences.getValue("general/visible_settings") if current_visibility_string: self._preferences.setValue("cura/custom_visible_settings", current_visibility_string) new_visibility_string = ";".join(preset_item["settings"]) if preset_id == "custom": # Get settings from the stored custom data new_visibility_string = self._preferences.getValue("cura/custom_visible_settings") if new_visibility_string is None: new_visibility_string = self._preferences.getValue("general/visible_settings") self._preferences.setValue("general/visible_settings", new_visibility_string) self._preferences.setValue("cura/active_setting_visibility_preset", preset_id) self._active_preset_item = preset_item self.activePresetChanged.emit()
def checkValidGetReply(reply): status_code = reply.attribute(QNetworkRequest.HttpStatusCodeAttribute) if status_code != 200: Logger.log("w", "Got status code {status_code} while trying to get data".format(status_code=status_code)) return False return True
def _onTimeout(self): self._saving = True # To prevent the save process from triggering another autosave. Logger.log("d", "Autosaving preferences, instances and profiles") self._application.saveSettings() self._saving = False
def qmlPath(self) -> "QUrl": plugin_path = PluginRegistry.getInstance().getPluginPath(self.getPluginId()) if plugin_path is None: Logger.log("e", "Cannot create QML view: cannot find plugin path for plugin [%s]", self.getPluginId()) return QUrl("") path = os.path.join(plugin_path, self._qml_url) return QUrl.fromLocalFile(path)
def importProfile(self, url): path = url.toLocalFile() if not path: return for profile_reader_id, profile_reader in self._manager.getProfileReaders(): try: profile = profile_reader.read(path) #Try to open the file with the profile reader. except Exception as e: #Note that this will fail quickly. That is, if any profile reader throws an exception, it will stop reading. It will only continue reading if the reader returned None. Logger.log("e", "Failed to import profile from %s: %s", path, str(e)) return { "status": "error", "message": catalog.i18nc("@info:status", "Failed to import profile from <filename>{0}</filename>: <message>{1}</message>", path, str(e)) } if profile: #Success! profile.setReadOnly(False) try: self._manager.addProfile(profile) #Add the new profile to the list of profiles. except SettingsError.DuplicateProfileError as e: count = 2 name = "{0} {1}".format(profile.getName(), count) #Try alternative profile names with a number appended to them. while self._manager.findProfile(name) != None: count += 1 name = "{0} {1}".format(profile.getName(), count) profile.setName(name) self._manager.addProfile(profile) return { "status": "duplicate", "message": catalog.i18nc("@info:status", "Profile was imported as {0}", name) } else: return { "status": "ok", "message": catalog.i18nc("@info:status", "Successfully imported profile {0}", profile.getName()) } #If it hasn't returned by now, none of the plugins loaded the profile successfully. return { "status": "error", "message": catalog.i18nc("@info:status", "Profile {0} has an unknown file type.", path) }
def createOutputModel(self) -> MaterialOutputModel: material_manager = CuraApplication.getInstance().getMaterialManager() material_group_list = material_manager.getMaterialGroupListByGUID(self.guid) or [] # Sort the material groups by "is_read_only = True" first, and then the name alphabetically. read_only_material_group_list = list(filter(lambda x: x.is_read_only, material_group_list)) non_read_only_material_group_list = list(filter(lambda x: not x.is_read_only, material_group_list)) material_group = None if read_only_material_group_list: read_only_material_group_list = sorted(read_only_material_group_list, key = lambda x: x.name) material_group = read_only_material_group_list[0] elif non_read_only_material_group_list: non_read_only_material_group_list = sorted(non_read_only_material_group_list, key = lambda x: x.name) material_group = non_read_only_material_group_list[0] if material_group: container = material_group.root_material_node.getContainer() color = container.getMetaDataEntry("color_code") brand = container.getMetaDataEntry("brand") material_type = container.getMetaDataEntry("material") name = container.getName() else: Logger.log("w", "Unable to find material with guid {guid}. Using data as provided by cluster" .format(guid = self.guid)) color = self.color brand = self.brand material_type = self.material name = "Empty" if self.material == "empty" else "Unknown" return MaterialOutputModel(guid = self.guid, type = material_type, brand = brand, color = color, name = name)
def fixSingleExtrusionMachineExtruderDefinition(self, global_stack: "GlobalStack") -> None: container_registry = ContainerRegistry.getInstance() expected_extruder_definition_0_id = global_stack.getMetaDataEntry("machine_extruder_trains")["0"] extruder_stack_0 = global_stack.extruders.get("0") # At this point, extruder stacks for this machine may not have been loaded yet. In this case, need to look in # the container registry as well. if not global_stack.extruders: extruder_trains = container_registry.findContainerStacks(type = "extruder_train", machine = global_stack.getId()) if extruder_trains: for extruder in extruder_trains: if extruder.getMetaDataEntry("position") == "0": extruder_stack_0 = extruder break if extruder_stack_0 is None: Logger.log("i", "No extruder stack for global stack [%s], create one", global_stack.getId()) # Single extrusion machine without an ExtruderStack, create it from cura.Settings.CuraStackBuilder import CuraStackBuilder CuraStackBuilder.createExtruderStackWithDefaultSetup(global_stack, 0) elif extruder_stack_0.definition.getId() != expected_extruder_definition_0_id: Logger.log("e", "Single extruder printer [{printer}] expected extruder [{expected}], but got [{got}]. I'm making it [{expected}].".format( printer = global_stack.getId(), expected = expected_extruder_definition_0_id, got = extruder_stack_0.definition.getId())) extruder_definition = container_registry.findDefinitionContainers(id = expected_extruder_definition_0_id)[0] extruder_stack_0.definition = extruder_definition
def _sendPrintJob(self, writer: FileWriter, preferred_format: Dict, nodes: List[SceneNode]): Logger.log("i", "Sending print job to printer.") if self._sending_gcode: self._error_message = Message( i18n_catalog.i18nc("@info:status", "Sending new jobs (temporarily) blocked, still sending the previous print job.")) self._error_message.show() yield #Wait on the user to select a target printer. yield #Wait for the write job to be finished. yield False #Return whether this was a success or not. yield #Prevent StopIteration. self._sending_gcode = True target_printer = yield #Potentially wait on the user to select a target printer. # Using buffering greatly reduces the write time for many lines of gcode if preferred_format["mode"] == FileWriter.OutputMode.TextMode: stream = io.StringIO() else: #Binary mode. stream = io.BytesIO() job = WriteFileJob(writer, stream, nodes, preferred_format["mode"]) self._write_job_progress_message = Message(i18n_catalog.i18nc("@info:status", "Sending data to printer"), lifetime = 0, dismissable = False, progress = -1, title = i18n_catalog.i18nc("@info:title", "Sending Data"), use_inactivity_timer = False) self._write_job_progress_message.show() self._dummy_lambdas = (target_printer, preferred_format, stream) job.finished.connect(self._sendPrintJobWaitOnWriteJobFinished) job.start() yield True #Return that we had success! yield #To prevent having to catch the StopIteration exception.
def _onGlobalContainerStackChanged(self): if self._global_container_stack: self.setMeshData(None) self._global_container_stack = Application.getInstance().getGlobalContainerStack() if self._global_container_stack: container = self._global_container_stack.findContainer({ "platform": "*" }) if container: mesh_file = container.getMetaDataEntry("platform") path = Resources.getPath(Resources.Meshes, mesh_file) if self._load_platform_job: # This prevents a previous load job from triggering texture loads. self._load_platform_job.finished.disconnect(self._onPlatformLoaded) # Perform platform mesh loading in the background self._load_platform_job = _LoadPlatformJob(path) self._load_platform_job.finished.connect(self._onPlatformLoaded) self._load_platform_job.start() offset = container.getMetaDataEntry("platform_offset") if offset: if len(offset) == 3: self.setPosition(Vector(offset[0], offset[1], offset[2])) else: Logger.log("w", "Platform offset is invalid: %s", str(offset)) self.setPosition(Vector(0.0, 0.0, 0.0)) else: self.setPosition(Vector(0.0, 0.0, 0.0))
def _onEngineCreated(self) -> None: plugin_path = PluginRegistry.getInstance().getPluginPath(self.getPluginId()) if plugin_path: self.addDisplayComponent("main", os.path.join(plugin_path, "SimulationViewMainComponent.qml")) self.addDisplayComponent("menu", os.path.join(plugin_path, "SimulationViewMenuComponent.qml")) else: Logger.log("e", "Unable to find the path for %s", self.getPluginId())
def _checkAuthentication(self): Logger.log( "d", "Checking if authentication is correct for id %s and key %s", self._authentication_id, self._getSafeAuthKey()) self.get("auth/check/" + str(self._authentication_id), on_finished=self._onCheckAuthenticationFinished)
def _onNetworkRequestFinished(self, reply): reply_url = reply.url().toString() if "system" in reply_url: if reply.attribute(QNetworkRequest.HttpStatusCodeAttribute) != 200: # Something went wrong with checking the firmware version! return try: system_info = json.loads( bytes(reply.readAll()).decode("utf-8")) except: Logger.log("e", "Something went wrong converting the JSON.") return address = reply.url().host() has_cluster_capable_firmware = Version( system_info["firmware"]) > self._min_cluster_version instance_name = "manual:%s" % address properties = { b"name": system_info["name"].encode("utf-8"), b"address": address.encode("utf-8"), b"firmware_version": system_info["firmware"].encode("utf-8"), b"manual": b"true", b"machine": str(system_info['hardware']["typeid"]).encode("utf-8") } if has_cluster_capable_firmware: # Cluster needs an additional request, before it's completed. properties[b"incomplete"] = b"true" # Check if the device is still in the list & re-add it with the updated # information. if instance_name in self._discovered_devices: self._onRemoveDevice(instance_name) self._onAddDevice(instance_name, address, properties) if has_cluster_capable_firmware: # We need to request more info in order to figure out the size of the cluster. cluster_url = QUrl("http://" + address + self._cluster_api_prefix + "printers/") cluster_request = QNetworkRequest(cluster_url) self._network_manager.get(cluster_request) elif "printers" in reply_url: if reply.attribute(QNetworkRequest.HttpStatusCodeAttribute) != 200: # Something went wrong with checking the amount of printers the cluster has! return # So we confirmed that the device is in fact a cluster printer, and we should now know how big it is. try: cluster_printers_list = json.loads( bytes(reply.readAll()).decode("utf-8")) except: Logger.log("e", "Something went wrong converting the JSON.") return address = reply.url().host() instance_name = "manual:%s" % address if instance_name in self._discovered_devices: device = self._discovered_devices[instance_name] properties = device.getProperties().copy() if b"incomplete" in properties: del properties[b"incomplete"] properties[b'cluster_size'] = len(cluster_printers_list) self._onRemoveDevice(instance_name) self._onAddDevice(instance_name, address, properties)
def stop(self): if self._zero_conf is not None: Logger.log("d", "zeroconf close...") self._zero_conf.close()
def _update(self): while True: # this is a locking function connected = self.getConnectionState() if connected == ConnectionState.connected: handled = False # properties of the printer response to report back to the server response_props = { 'command': self._last_command, 'line': self._commands_current_line } ### ### process any new commands to send, starting with resends ### if self.getPaused(): pass # Nothing to do! else: if (self._command_queue.unfinished_tasks > 0): # next_cmd_line = self._commands_current_line # unfinished = self._command_queue.unfinished_tasks # Logger.log("i", "handling next command line.{} unfininshed.{}".format(next_cmd_line,unfinished)) # get a command from the threaded queue command = self._command_queue.get(block=True) # add it to outgoing, numbered commands queue if command is not None: self._commands_list.append(command) try: self._command_queue.task_done( ) # mark task as finished in queue except ValueError as ve: Logger.log( "i", "_commandHandled called too many times: {}" .format(ve)) #else: # Logger.log("i", "no unfinished tasks") # if we're not at the top of the queue of commands, send the next one if (self._commands_current_line < self.countAllCommands() ) and (self._commands_on_printer < 6): # Logger.log("i","all commands: {}".format(self.countAllCommands())) cmd = self.getCommand(self._commands_current_line) self._serialSendCommand(cmd, self._commands_current_line) # necessary to allow other threads to interact, otherwise events/lines are lost! (locking didn't fix that) sleep(0.01) ### ### Process printer responses ### try: line = self._serial.readline() except: line = "" # only process if there's something to process if line: # process (parse) response from printer if line.startswith(b'!!'): response_props["type"] = "error" response_props[ "message"] = "Printer signals fatal error!" Logger.log( 'e', "Printer signals fatal error. Pausing print. {}". format(line)) self.pausePrint() # pause for error # done, don't process anything else # TODO: handle this better - just resend last command! No need for the magic bits elif b'resend' in line.lower() or line.startswith(b'rs'): # A resend can be requested either by Resend, resend or rs. Logger.log('e', "Printer signals resend. {}".format(line)) response_props["type"] = "resend" response_props[ "message"] = "Printer signals resend. {}".format( line.decode('utf-8')) # command was never received - mark as not on printer self._commands_on_printer = max( 0, self._commands_on_printer - 1) try: self._commands_current_line = int( line.replace(b"N:", b" ").replace(b"N", b" ").replace( b":", b" ").split()[-1]) except: if b"rs" in line: # In some cases of the RS command it needs to be handled differently. self._commands_current_line = int( line.split()[1]) elif line.startswith(b"echo:"): # Not handled because this is just if it's turned on response_props['type'] = 'info' printer_info = re.findall("echo\:(.+)?", line.decode('utf-8')) for info in printer_info: response_props['message'] = info # handled = self._commandHandled() elif line.startswith(b"Compiled:"): # Not handled because this is just if it's turned on response_props['type'] = 'info' response_props['message'] = line.decode('utf-8') # handled = self._commandHandled() elif line.startswith(b"start"): # Not handled because this is just if it's turned on response_props['type'] = 'start' response_props['message'] = line.decode('utf-8') # handled = self._commandHandled() elif b"ok T:" in line or line.startswith( b"T:" ) or b"ok B:" in line or line.startswith( b"B:" ): # Temperature message. 'T:' for extruder and 'B:' for bed response_props["type"] = "temperature" lineString = line.decode('utf-8') #Logger.log("d", "temp response: {}".format(line)) extruder_temperature_matches = re.findall( "T(\d*): ?([\d\.]+) ?\/?([\d\.]+)?", lineString) # Update all temperature values # line looks like: b'ok T:24.7 /0.0 B:23.4 /0.0 @:0 B@:0\n' # OR b'T:176.1 E:0 W:?\n' if len(extruder_temperature_matches) > 0: match = extruder_temperature_matches[0] ### NOTE: hot end (tool number) is the first match (0) response_props["hotend"] = match[1] if match[2]: response_props["hotend_target"] = match[2] bed_temperature_matches = re.findall( "B: ?([\d\.]+) ?\/?([\d\.]+)?", lineString) if len(bed_temperature_matches) > 0: match = bed_temperature_matches[0] response_props["bed"] = match[0] response_props["bed_target"] = match[1] handled = self._commandHandled() # LEGACY CODE, FOR FUTURE REFERENCE... #for match, extruder in zip(extruder_temperature_matches, self._printers[0].extruders): # if match[1]: # hotendTemperature(float(match[1])) # if match[2]: # extruder.updateTargetHotendTemperature(float(match[2])) #bed_temperature_matches = re.findall(b"B: ?([\d\.]+) ?\/?([\d\.]+)?", line) #if bed_temperature_matches: # match = bed_temperature_matches[0] # if match[0]: # self._printers[0].updateBedTemperature(float(match[0])) # if match[1]: # self._printers[0].updateTargetBedTemperature(float(match[1])) elif b"FIRMWARE_NAME:" in line: # TODO: possibly pre-parse this instead of sending whole line response_props["type"] = "firmware" response_props["message"] = line.decode('utf-8') handled = self._commandHandled() # position response for position update # b'X:0.00Y:0.00Z:0.00E:0.00 Count X: 0.00Y:0.00Z:0.00\n' elif line.startswith(b'X:'): stringline = line.decode('utf-8') matches = re.findall('([x|y|z|e]):([0-9\.\-]+)+', stringline.lower()) # Match 1 # 1. X # 2. 0.00 # parse it properly response_props["type"] = "position" # first 4 matches are position, other 2 are steps # axis, value for i in range(4): response_props[str(matches[i][0])] = str( matches[i][1]) handled = self._commandHandled() # handle any basic ok's elif line.lower().startswith(b'ok'): response_props["type"] = "ok" response_props["message"] = "ok" handled = self._commandHandled() #Logger.log('i', "ok received: {}".format(line)) # ERROR! BAD. elif line.lower().startswith(b'error'): response_props["type"] = "error" response_props["message"] = line.decode('utf-8') # Logger.log('e', "Error from printer: {}".format(response_props["message"])) ##### Done parsing, now send waiting commands # now, really not handled else: Logger.log( 'w', "WARN: Printer response not handled: {}".format( line)) response_props["type"] = "info" response_props["message"] = line.decode('utf-8') # self.pausePrint() # pause for error # TODO: do something else here?? # send response to queue so GUI can pick it up later response = PrinterResponse(**response_props) # Logger.log("d","response: {}".format(response)) self._responses_queue.put(response, block=True) sleep(0.01) # yield to others
def _autoDetectFinished(self, job: AutoDetectBaudJob): result = job.getResult() if result is not None: self.setBaudRate(result) Logger.log("d", "Set baudrate to {result}") self.connect() # Try to connect (actually create serial, etc)
#Copyright (c) 2018 Ultimaker B.V. #Cura is released under the terms of the LGPLv3 or higher. import sys from UM.Logger import Logger try: from . import UFPWriter except ImportError: Logger.log("w", "Could not import UFPWriter; libCharon may be missing") from UM.i18n import i18nCatalog #To translate the file format description. from UM.Mesh.MeshWriter import MeshWriter #For the binary mode flag. i18n_catalog = i18nCatalog("cura") ''' def getMetaData(): if "UFPWriter.UFPWriter" not in sys.modules: return {} return { "mesh_writer": { "output": [ { "mime_type": "application/x-ufp", "mode": MeshWriter.OutputMode.BinaryMode, "extension": "ufp", "description": i18n_catalog.i18nc("@item:inlistbox", "Ultimaker Format Package") } ] }
def _onGetPrinterDataFinished(self, reply): status_code = reply.attribute(QNetworkRequest.HttpStatusCodeAttribute) if status_code == 200: try: result = json.loads(bytes(reply.readAll()).decode("utf-8")) except json.decoder.JSONDecodeError: Logger.log( "w", "Received an invalid printer state message: Not valid JSON." ) return if not self._printers: # Quickest way to get the firmware version is to grab it from the zeroconf. firmware_version = self._properties.get( b"firmware_version", b"").decode("utf-8") self._printers = [ PrinterOutputModel( output_controller=self._output_controller, number_of_extruders=self._number_of_extruders, firmware_version=firmware_version) ] self._printers[0].setCameraUrl( QUrl("http://" + self._address + ":8080/?action=stream")) for extruder in self._printers[0].extruders: extruder.activeMaterialChanged.connect( self.materialIdChanged) extruder.hotendIDChanged.connect(self.hotendIdChanged) self.printersChanged.emit() # LegacyUM3 always has a single printer. printer = self._printers[0] printer.updateBedTemperature( result["bed"]["temperature"]["current"]) printer.updateTargetBedTemperature( result["bed"]["temperature"]["target"]) printer.updateState(result["status"]) try: # If we're still handling the request, we should ignore remote for a bit. if not printer.getController().isPreheatRequestInProgress(): printer.updateIsPreheating( result["bed"]["pre_heat"]["active"]) except KeyError: # Older firmwares don't support preheating, so we need to fake it. pass head_position = result["heads"][0]["position"] printer.updateHeadPosition(head_position["x"], head_position["y"], head_position["z"]) for index in range(0, self._number_of_extruders): temperatures = result["heads"][0]["extruders"][index][ "hotend"]["temperature"] extruder = printer.extruders[index] extruder.updateTargetHotendTemperature(temperatures["target"]) extruder.updateHotendTemperature(temperatures["current"]) material_guid = result["heads"][0]["extruders"][index][ "active_material"]["guid"] if extruder.activeMaterial is None or extruder.activeMaterial.guid != material_guid: # Find matching material (as we need to set brand, type & color) containers = ContainerRegistry.getInstance( ).findInstanceContainers(type="material", GUID=material_guid) if containers: color = containers[0].getMetaDataEntry("color_code") brand = containers[0].getMetaDataEntry("brand") material_type = containers[0].getMetaDataEntry( "material") name = containers[0].getName() else: # Unknown material. color = "#00000000" brand = "Unknown" material_type = "Unknown" name = "Unknown" material = MaterialOutputModel(guid=material_guid, type=material_type, brand=brand, color=color, name=name) extruder.updateActiveMaterial(material) try: hotend_id = result["heads"][0]["extruders"][index][ "hotend"]["id"] except KeyError: hotend_id = "" printer.extruders[index].updateHotendID(hotend_id) else: Logger.log( "w", "Got status code {status_code} while trying to get printer data" .format(status_code=status_code))
def _onRequestFinished(self, reply: QNetworkReply) -> None: if reply.error() == QNetworkReply.TimeoutError: Logger.log("w", "Got a timeout.") self.setViewPage("errored") self.resetDownload() return if reply.error() == QNetworkReply.HostNotFoundError: Logger.log("w", "Unable to reach server.") self.setViewPage("errored") self.resetDownload() return if reply.operation() == QNetworkAccessManager.GetOperation: for response_type, url in self._request_urls.items(): if reply.url() == url: if reply.attribute( QNetworkRequest.HttpStatusCodeAttribute) == 200: try: json_data = json.loads( bytes(reply.readAll()).decode("utf-8")) # Check for errors: if "errors" in json_data: for error in json_data["errors"]: Logger.log("e", "%s", error["title"]) return # Create model and apply metadata: if not self._models[response_type]: Logger.log("e", "Could not find the %s model.", response_type) break self._server_response_data[ response_type] = json_data["data"] self._models[response_type].setMetadata( self._server_response_data[response_type]) if response_type == "packages": self._models[response_type].setFilter( {"type": "plugin"}) self.reBuildMaterialsModels() self.reBuildPluginsModels() self._notifyPackageManager() elif response_type == "authors": self._models[response_type].setFilter( {"package_types": "material"}) self._models[response_type].setFilter( {"tags": "generic"}) elif response_type == "updates": # Tell the package manager that there's a new set of updates available. packages = set([ pkg["package_id"] for pkg in self._server_response_data[response_type] ]) self._package_manager.setPackagesWithUpdate( packages) elif response_type == "subscribed_packages": self._checkCompatibilities(json_data["data"]) self.metadataChanged.emit() if self.isLoadingComplete(): self.setViewPage("overview") except json.decoder.JSONDecodeError: Logger.log("w", "Received invalid JSON for %s.", response_type) break else: Logger.log( "w", "Unable to connect with the server, we got a response code %s while trying to connect to %s", reply.attribute( QNetworkRequest.HttpStatusCodeAttribute), reply.url()) self.setViewPage("errored") self.resetDownload()
def disable(self, plugin_id: str) -> None: self._plugin_registry.disablePlugin(plugin_id) self.enabledChanged.emit() Logger.log("i", "%s was set as 'deactive'.", plugin_id) self._restart_required = True self.restartRequiredChanged.emit()
def _verifyAuthentication(self): Logger.log("d", "Attempting to verify authentication") # This will ensure that the "_onAuthenticationRequired" is triggered, which will setup the authenticator. self.get("auth/verify", on_finished=self._onVerifyAuthenticationCompleted)
def processCliStream(self, stream: str) -> Optional[SteSlicerSceneNode]: Logger.log("d", "Preparing to load CLI") self._cancelled = False self._setPrintSettings() self._is_layers_in_file = False scene_node = SteSlicerSceneNode() gcode_list = [] self._writeStartCode(gcode_list) gcode_list.append(";LAYER_COUNT\n") # Reading starts here file_lines = 0 current_line = 0 for line in stream.split("\n"): file_lines += 1 if not self._is_layers_in_file and line[:len(self._layer_keyword)] == self._layer_keyword: self._is_layers_in_file = True file_step = max(math.floor(file_lines / 100), 1) self._clearValues() self._message = Message(catalog.i18nc("@info:status", "Parsing CLI"), lifetime=0, title=catalog.i18nc("@info:title", "CLI Details")) assert(self._message is not None) # use for typing purposes self._message.setProgress(0) self._message.show() Logger.log("d", "Parsing CLI...") self._position = Position(0, 0, 0, 0, 0, 1, 0, [0]) self._gcode_position = Position(0, 0, 0, 0, 0, 0, 0, [0]) current_path = [] # type: List[List[float]] geometry_start = False for line in stream.split("\n"): if self._cancelled: Logger.log("d", "Parsing CLI file cancelled") return None current_line += 1 if current_line % file_step == 0: self._message.setProgress(math.floor( current_line / file_lines * 100)) Job.yieldThread() if len(line) == 0: continue if line == "$$GEOMETRYSTART": geometry_start = True continue if not geometry_start: continue if self._is_layers_in_file and line[:len(self._layer_keyword)] == self._layer_keyword: try: layer_height = float(line[len(self._layer_keyword):]) self._current_layer_thickness = layer_height - self._current_layer_height if self._current_layer_thickness > 0.4: self._current_layer_thickness = 0.2 self._current_layer_height = layer_height self._createPolygon(self._current_layer_thickness, current_path, self._extruder_offsets.get( self._extruder_number, [0, 0])) current_path.clear() # Start the new layer at the end position of the last layer current_path.append([self._position.x, self._position.y, self._position.z, self._position.a, self._position.b, self._position.c, self._position.f, self._position.e[self._extruder_number], LayerPolygon.MoveCombingType]) self._layer_number += 1 gcode_list.append(";LAYER:%s\n" % self._layer_number) except: pass if line.find(self._body_type_keyword) == 0: self._layer_type = LayerPolygon.Inset0Type if line.find(self._support_type_keyword) == 0: self._layer_type = LayerPolygon.SupportType if line.find(self._perimeter_type_keyword) == 0: self._layer_type = LayerPolygon.Inset0Type if line.find(self._skin_type_keyword) == 0: self._layer_type = LayerPolygon.SkinType if line.find(self._infill_type_keyword) == 0: self._layer_type = LayerPolygon.InfillType # Comment line if line.startswith("//"): continue # Polyline processing self.processPolyline(line, current_path, gcode_list) # "Flush" leftovers. Last layer paths are still stored if len(current_path) > 1: if self._createPolygon(self._current_layer_thickness, current_path, self._extruder_offsets.get(self._extruder_number, [0, 0])): self._layer_number += 1 current_path.clear() layer_count_idx = gcode_list.index(";LAYER_COUNT\n") if layer_count_idx > 0: gcode_list[layer_count_idx] = ";LAYER_COUNT:%s\n" % self._layer_number end_gcode = self._global_stack.getProperty( "machine_end_gcode", "value") gcode_list.append(end_gcode + "\n") material_color_map = numpy.zeros((8, 4), dtype=numpy.float32) material_color_map[0, :] = [0.0, 0.7, 0.9, 1.0] material_color_map[1, :] = [0.7, 0.9, 0.0, 1.0] material_color_map[2, :] = [0.9, 0.0, 0.7, 1.0] material_color_map[3, :] = [0.7, 0.0, 0.0, 1.0] material_color_map[4, :] = [0.0, 0.7, 0.0, 1.0] material_color_map[5, :] = [0.0, 0.0, 0.7, 1.0] material_color_map[6, :] = [0.3, 0.3, 0.3, 1.0] material_color_map[7, :] = [0.7, 0.7, 0.7, 1.0] layer_mesh = self._layer_data_builder.build(material_color_map) decorator = LayerDataDecorator() decorator.setLayerData(layer_mesh) scene_node.addDecorator(decorator) gcode_list_decorator = GCodeListDecorator() gcode_list_decorator.setGCodeList(gcode_list) scene_node.addDecorator(gcode_list_decorator) # gcode_dict stores gcode_lists for a number of build plates. active_build_plate_id = SteSlicerApplication.getInstance( ).getMultiBuildPlateModel().activeBuildPlate gcode_dict = {active_build_plate_id: gcode_list} # type: ignore #Because gcode_dict is generated dynamically. SteSlicerApplication.getInstance().getController().getScene().gcode_dict = gcode_dict Logger.log("d", "Finished parsing CLI file") self._message.hide() if self._layer_number == 0: Logger.log("w", "File doesn't contain any valid layers") if not self._global_stack.getProperty("machine_center_is_zero", "value"): machine_width = self._global_stack.getProperty( "machine_width", "value") machine_depth = self._global_stack.getProperty( "machine_depth", "value") scene_node.setPosition( Vector(-machine_width / 2, 0, machine_depth / 2)) Logger.log("d", "CLI loading finished") if SteSlicerApplication.getInstance().getPreferences().getValue("gcodereader/show_caution"): caution_message = Message(catalog.i18nc( "@info:generic", "Make sure the g-code is suitable for your printer and printer configuration before sending the file to it. The g-code representation may not be accurate."), lifetime=0, title=catalog.i18nc("@info:title", "G-code Details")) caution_message.show() backend = SteSlicerApplication.getInstance().getBackend() backend.backendStateChange.emit(Backend.BackendState.Disabled) return scene_node
def cancelDownload(self) -> None: Logger.log("i", "User cancelled the download of a package.") self.resetDownload()
def exportProfile(self, instance_ids, file_name, file_type): # Parse the fileType to deduce what plugin can save the file format. # fileType has the format "<description> (*.<extension>)" split = file_type.rfind( " (*." ) # Find where the description ends and the extension starts. if split < 0: # Not found. Invalid format. Logger.log("e", "Invalid file format identifier %s", file_type) return description = file_type[:split] extension = file_type[split + 4:-1] # Leave out the " (*." and ")". if not file_name.endswith( "." + extension ): # Auto-fill the extension if the user did not provide any. file_name += "." + extension # On Windows, QML FileDialog properly asks for overwrite confirm, but not on other platforms, so handle those ourself. if not Platform.isWindows(): if os.path.exists(file_name): result = QMessageBox.question( None, catalog.i18nc("@title:window", "File Already Exists"), catalog.i18nc( "@label", "The file <filename>{0}</filename> already exists. Are you sure you want to overwrite it?" ).format(file_name)) if result == QMessageBox.No: return found_containers = [] extruder_positions = [] for instance_id in instance_ids: containers = ContainerRegistry.getInstance( ).findInstanceContainers(id=instance_id) if containers: found_containers.append(containers[0]) # Determine the position of the extruder of this container extruder_id = containers[0].getMetaDataEntry("extruder", "") if extruder_id == "": # Global stack extruder_positions.append(-1) else: extruder_containers = ContainerRegistry.getInstance( ).findDefinitionContainers(id=extruder_id) if extruder_containers: extruder_positions.append( int(extruder_containers[0].getMetaDataEntry( "position", 0))) else: extruder_positions.append(0) # Ensure the profiles are always exported in order (global, extruder 0, extruder 1, ...) found_containers = [ containers for (positions, containers ) in sorted(zip(extruder_positions, found_containers)) ] profile_writer = self._findProfileWriter(extension, description) try: success = profile_writer.write(file_name, found_containers) except Exception as e: Logger.log("e", "Failed to export profile to %s: %s", file_name, str(e)) m = Message(catalog.i18nc( "@info:status", "Failed to export profile to <filename>{0}</filename>: <message>{1}</message>", file_name, str(e)), lifetime=0) m.show() return if not success: Logger.log( "w", "Failed to export profile to %s: Writer plugin reported failure.", file_name) m = Message(catalog.i18nc( "@info:status", "Failed to export profile to <filename>{0}</filename>: Writer plugin reported failure.", file_name), lifetime=0) m.show() return m = Message( catalog.i18nc("@info:status", "Exported profile to <filename>{0}</filename>", file_name)) m.show()
def createExtruderTrain(self, extruder_definition: DefinitionContainerInterface, machine_definition: DefinitionContainerInterface, position, machine_id: str) -> None: # Cache some things. container_registry = ContainerRegistry.getInstance() machine_definition_id = Application.getInstance().getMachineManager().getQualityDefinitionId(machine_definition) # Create a container stack for this extruder. extruder_stack_id = container_registry.uniqueName(extruder_definition.getId()) container_stack = ContainerStack(extruder_stack_id) container_stack.setName(extruder_definition.getName()) # Take over the display name to display the stack with. container_stack.addMetaDataEntry("type", "extruder_train") container_stack.addMetaDataEntry("machine", machine_id) container_stack.addMetaDataEntry("position", position) container_stack.addContainer(extruder_definition) # Find the variant to use for this extruder. variant = container_registry.findInstanceContainers(id = "empty_variant")[0] if machine_definition.getMetaDataEntry("has_variants"): # First add any variant. Later, overwrite with preference if the preference is valid. variants = container_registry.findInstanceContainers(definition = machine_definition_id, type = "variant") if len(variants) >= 1: variant = variants[0] preferred_variant_id = machine_definition.getMetaDataEntry("preferred_variant") if preferred_variant_id: preferred_variants = container_registry.findInstanceContainers(id = preferred_variant_id, definition = machine_definition_id, type = "variant") if len(preferred_variants) >= 1: variant = preferred_variants[0] else: Logger.log("w", "The preferred variant \"%s\" of machine %s doesn't exist or is not a variant profile.", preferred_variant_id, machine_id) # And leave it at the default variant. container_stack.addContainer(variant) # Find a material to use for this variant. material = container_registry.findInstanceContainers(id = "empty_material")[0] if machine_definition.getMetaDataEntry("has_materials"): # First add any material. Later, overwrite with preference if the preference is valid. machine_has_variant_materials = machine_definition.getMetaDataEntry("has_variant_materials", default = False) if machine_has_variant_materials or machine_has_variant_materials == "True": materials = container_registry.findInstanceContainers(type = "material", definition = machine_definition_id, variant = variant.getId()) else: materials = container_registry.findInstanceContainers(type = "material", definition = machine_definition_id) if len(materials) >= 1: material = materials[0] preferred_material_id = machine_definition.getMetaDataEntry("preferred_material") if preferred_material_id: global_stack = ContainerRegistry.getInstance().findContainerStacks(id = machine_id) if global_stack: approximate_material_diameter = round(global_stack[0].getProperty("material_diameter", "value")) else: approximate_material_diameter = round(machine_definition.getProperty("material_diameter", "value")) search_criteria = { "type": "material", "id": preferred_material_id, "approximate_diameter": approximate_material_diameter} if machine_definition.getMetaDataEntry("has_machine_materials"): search_criteria["definition"] = machine_definition_id if machine_definition.getMetaDataEntry("has_variants") and variant: search_criteria["variant"] = variant.id else: search_criteria["definition"] = "fdmprinter" preferred_materials = container_registry.findInstanceContainers(**search_criteria) if len(preferred_materials) >= 1: # In some cases we get multiple materials. In that case, prefer materials that are marked as read only. read_only_preferred_materials = [preferred_material for preferred_material in preferred_materials if preferred_material.isReadOnly()] if len(read_only_preferred_materials) >= 1: material = read_only_preferred_materials[0] else: material = preferred_materials[0] else: Logger.log("w", "The preferred material \"%s\" of machine %s doesn't exist or is not a material profile.", preferred_material_id, machine_id) # And leave it at the default material. container_stack.addContainer(material) # Find a quality to use for this extruder. quality = container_registry.getEmptyInstanceContainer() search_criteria = { "type": "quality" } if machine_definition.getMetaDataEntry("has_machine_quality"): search_criteria["definition"] = machine_definition_id if machine_definition.getMetaDataEntry("has_materials") and material: search_criteria["material"] = material.id else: search_criteria["definition"] = "fdmprinter" preferred_quality = machine_definition.getMetaDataEntry("preferred_quality") if preferred_quality: search_criteria["id"] = preferred_quality containers = ContainerRegistry.getInstance().findInstanceContainers(**search_criteria) if not containers and preferred_quality: Logger.log("w", "The preferred quality \"%s\" of machine %s doesn't exist or is not a quality profile.", preferred_quality, machine_id) search_criteria.pop("id", None) containers = ContainerRegistry.getInstance().findInstanceContainers(**search_criteria) if containers: quality = containers[0] container_stack.addContainer(quality) empty_quality_changes = container_registry.findInstanceContainers(id = "empty_quality_changes")[0] container_stack.addContainer(empty_quality_changes) user_profile = container_registry.findInstanceContainers(type = "user", extruder = extruder_stack_id) if user_profile: # There was already a user profile, loaded from settings. user_profile = user_profile[0] else: user_profile = InstanceContainer(extruder_stack_id + "_current_settings") # Add an empty user profile. user_profile.addMetaDataEntry("type", "user") user_profile.addMetaDataEntry("extruder", extruder_stack_id) from cura.CuraApplication import CuraApplication user_profile.addMetaDataEntry("setting_version", CuraApplication.SettingVersion) user_profile.setDefinition(machine_definition) container_registry.addContainer(user_profile) container_stack.addContainer(user_profile) # regardless of what the next stack is, we have to set it again, because of signal routing. container_stack.setNextStack(Application.getInstance().getGlobalContainerStack()) container_registry.addContainer(container_stack)
def _populate(self): from cura.CuraApplication import CuraApplication items = [] for file_path in Resources.getAllResourcesOfType( CuraApplication.ResourceTypes.SettingVisibilityPreset): try: mime_type = MimeTypeDatabase.getMimeTypeForFile(file_path) except MimeTypeNotFoundError: Logger.log("e", "Could not determine mime type of file %s", file_path) continue item_id = urllib.parse.unquote_plus( mime_type.stripExtension(os.path.basename(file_path))) if not os.path.isfile(file_path): Logger.log("e", "[%s] is not a file", file_path) continue parser = ConfigParser( allow_no_value=True) # accept options without any value, try: parser.read([file_path]) if not parser.has_option("general", "name") or not parser.has_option( "general", "weight"): continue settings = [] for section in parser.sections(): if section == 'general': continue settings.append(section) for option in parser[section].keys(): settings.append(option) items.append({ "id": item_id, "name": catalog.i18nc("@action:inmenu", parser["general"]["name"]), "weight": parser["general"]["weight"], "settings": settings, }) except Exception: Logger.logException("e", "Failed to load setting preset %s", file_path) items.sort(key=lambda k: (int(k["weight"]), k["id"])) # Put "custom" at the top items.insert( 0, { "id": "custom", "name": "Custom selection", "weight": -100, "settings": [] }) self.setItems(items)
def _installPackage(self, installation_package_data: dict): package_info = installation_package_data["package_info"] filename = installation_package_data["filename"] package_id = package_info["package_id"] Logger.log("i", "Installing package [%s] from file [%s]", package_id, filename) # Load the cached package file and extract all contents to a temporary directory if not os.path.exists(filename): Logger.log( "w", "Package [%s] file '%s' is missing, cannot install this package", package_id, filename) return try: with zipfile.ZipFile(filename, "r") as archive: temp_dir = tempfile.TemporaryDirectory() archive.extractall(temp_dir.name) except Exception: Logger.logException("e", "Failed to install package from file [%s]", filename) return # Remove it first and then install try: self._purgePackage(package_id) except Exception as e: message = Message(catalog.i18nc( "@error:update", "There was an error uninstalling the package {package} before installing " "new version:\n{error}.\nPlease try to upgrade again later.". format(package=package_id, error=str(e))), title=catalog.i18nc("@info:title", "Updating error")) message.show() return # Copy the folders there for sub_dir_name, installation_root_dir in self._installation_dirs_dict.items( ): src_dir_path = os.path.join(temp_dir.name, "files", sub_dir_name) dst_dir_path = os.path.join(installation_root_dir, package_id) if not os.path.exists(src_dir_path): continue self.__installPackageFiles(package_id, src_dir_path, dst_dir_path) # Remove the file try: os.remove(filename) except Exception: Logger.log("w", "Tried to delete file [%s], but it failed", filename) # Move the info to the installed list of packages only when it succeeds self._installed_package_dict[ package_id] = self._to_install_package_dict[package_id] self._installed_package_dict[package_id]["package_info"][ "is_installed"] = True
def _connect(self): Logger.log("d", "Attempting to connect to %s", self._serial_port) self._is_connecting = True programmer = stk500v2.Stk500v2() try: programmer.connect( self._serial_port ) # Connect with the serial, if this succeeds, it"s an arduino based usb device. self._serial = programmer.leaveISP() except ispBase.IspError as e: Logger.log( "i", "Could not establish connection on %s: %s. Device is not arduino based." % (self._serial_port, str(e))) except Exception as e: Logger.log( "i", "Could not establish connection on %s, unknown reasons. Device is not arduino based." % self._serial_port) # If the programmer connected, we know its an atmega based version. Not all that usefull, but it does give some debugging information. for baud_rate in self._getBaudrateList( ): # Cycle all baud rates (auto detect) if self._serial is None: try: self._serial = serial.Serial(str(self._serial_port), baud_rate, timeout=3, writeTimeout=10000) except serial.SerialException: Logger.log("i", "Could not open port %s" % self._serial_port) return else: if not self.setBaudRate(baud_rate): continue # Could not set the baud rate, go to the next time.sleep( 1.5 ) # Ensure that we are not talking to the bootloader. 1.5 sec seems to be the magic number sucesfull_responses = 0 timeout_time = time.time() + 5 self._serial.write(b"\n") self._sendCommand( "M105" ) # Request temperature, as this should (if baudrate is correct) result in a command with "T:" in it while timeout_time > time.time(): line = self._readline() if line is None: self.setIsConnected( False ) # Something went wrong with reading, could be that close was called. return if b"T:" in line: self._serial.timeout = 0.5 sucesfull_responses += 1 if sucesfull_responses >= self._required_responses_auto_baud: self._serial.timeout = 2 #Reset serial timeout self.setIsConnected(True) Logger.log( "i", "Established printer connection on port %s" % self._serial_port) return self._sendCommand( "M105" ) # Send M105 as long as we are listening, otherwise we end up in an undefined state Logger.log("e", "Baud rate detection for %s failed", self._serial_port) self.close() # Unable to connect, wrap up. self.setIsConnected(False)
def importProfile(self, file_name): Logger.log("d", "Attempting to import profile %s", file_name) if not file_name: return { "status": "error", "message": catalog.i18nc( "@info:status", "Failed to import profile from <filename>{0}</filename>: <message>{1}</message>", file_name, "Invalid path") } plugin_registry = PluginRegistry.getInstance() extension = file_name.split(".")[-1] global_container_stack = Application.getInstance( ).getGlobalContainerStack() if not global_container_stack: return machine_extruders = list( ExtruderManager.getInstance().getMachineExtruders( global_container_stack.getId())) machine_extruders.sort(key=lambda k: k.getMetaDataEntry("position")) for plugin_id, meta_data in self._getIOPlugins("profile_reader"): if meta_data["profile_reader"][0]["extension"] != extension: continue profile_reader = plugin_registry.getPluginObject(plugin_id) try: profile_or_list = profile_reader.read( file_name) # Try to open the file with the profile reader. except Exception as e: # Note that this will fail quickly. That is, if any profile reader throws an exception, it will stop reading. It will only continue reading if the reader returned None. Logger.log( "e", "Failed to import profile from %s: %s while using profile reader. Got exception %s", file_name, profile_reader.getPluginId(), str(e)) return { "status": "error", "message": catalog.i18nc( "@info:status", "Failed to import profile from <filename>{0}</filename>: <message>{1}</message>", file_name, str(e)) } if profile_or_list: # Success! name_seed = os.path.splitext(os.path.basename(file_name))[0] new_name = self.uniqueName(name_seed) if type(profile_or_list) is not list: profile = profile_or_list self._configureProfile(profile, name_seed, new_name) return { "status": "ok", "message": catalog.i18nc("@info:status", "Successfully imported profile {0}", profile.getName()) } else: profile_index = -1 global_profile = None for profile in profile_or_list: if profile_index >= 0: if len(machine_extruders) > profile_index: extruder_id = Application.getInstance( ).getMachineManager().getQualityDefinitionId( machine_extruders[profile_index].getBottom( )) # Ensure the extruder profiles get non-conflicting names # NB: these are not user-facing if "extruder" in profile.getMetaData(): profile.setMetaDataEntry( "extruder", extruder_id) else: profile.addMetaDataEntry( "extruder", extruder_id) profile_id = (extruder_id + "_" + name_seed).lower().replace( " ", "_") elif profile_index == 0: # Importing a multiextrusion profile into a single extrusion machine; merge 1st extruder profile into global profile profile._id = self.uniqueName( "temporary_profile") self.addContainer(profile) ContainerManager.getInstance().mergeContainers( global_profile.getId(), profile.getId()) self.removeContainer(profile.getId()) break else: # The imported composite profile has a profile for an extruder that this machine does not have. Ignore this extruder-profile break else: global_profile = profile profile_id = ( global_container_stack.getBottom().getId() + "_" + name_seed).lower().replace(" ", "_") self._configureProfile(profile, profile_id, new_name) profile_index += 1 return { "status": "ok", "message": catalog.i18nc("@info:status", "Successfully imported profile {0}", profile_or_list[0].getName()) } # If it hasn't returned by now, none of the plugins loaded the profile successfully. return { "status": "error", "message": catalog.i18nc( "@info:status", "Profile {0} has an unknown file type or is corrupted.", file_name) }
def log(self, msg): Logger.log("d", msg)
def _listen(self): Logger.log( "i", "Printer connection listen thread started for %s" % self._serial_port) temperature_request_timeout = time.time() ok_timeout = time.time() while self._is_connected: line = self._readline() if line is None: break # None is only returned when something went wrong. Stop listening if time.time() > temperature_request_timeout: if self._extruder_count > 0: self._temperature_requested_extruder_index = ( self._temperature_requested_extruder_index + 1) % self._extruder_count self.sendCommand( "M105 T%d" % (self._temperature_requested_extruder_index)) else: self.sendCommand("M105") temperature_request_timeout = time.time() + 5 if line.startswith(b"Error:"): # Oh YEAH, consistency. # Marlin reports an MIN/MAX temp error as "Error:x\n: Extruder switched off. MAXTEMP triggered !\n" # But a bed temp error is reported as "Error: Temperature heated bed switched off. MAXTEMP triggered !!" # So we can have an extra newline in the most common case. Awesome work people. if re.match(b"Error:[0-9]\n", line): line = line.rstrip() + self._readline() # Skip the communication errors, as those get corrected. if b"Extruder switched off" in line or b"Temperature heated bed switched off" in line or b"Something is wrong, please turn off the printer." in line: if not self.hasError(): self._setErrorState(line[6:]) elif b" T:" in line or line.startswith( b"T:"): #Temperature message try: self._setExtruderTemperature( self._temperature_requested_extruder_index, float(re.search(b"T: *([0-9\.]*)", line).group(1))) except: pass if b"B:" in line: # Check if it"s a bed temperature try: self._setBedTemperature( float(re.search(b"B: *([0-9\.]*)", line).group(1))) except Exception as e: pass #TODO: temperature changed callback elif b"_min" in line or b"_max" in line: tag, value = line.split(b':', 1) self._setEndstopState(tag, (b'H' in value or b'TRIGGERED' in value)) if self._is_printing: if line == b"" and time.time() > ok_timeout: line = b"ok" # Force a timeout (basicly, send next command) if b"ok" in line: ok_timeout = time.time() + 5 if not self._command_queue.empty(): self._sendCommand(self._command_queue.get()) else: self._sendNextGcodeLine() elif b"resend" in line.lower( ) or b"rs" in line: # Because a resend can be asked with "resend" and "rs" try: self._gcode_position = int( line.replace(b"N:", b" ").replace(b"N", b" ").replace( b":", b" ").split()[-1]) except: if b"rs" in line: self._gcode_position = int(line.split()[1]) else: # Request the temperature on comm timeout (every 2 seconds) when we are not printing.) if line == b"": if self._extruder_count > 0: self._temperature_requested_extruder_index = ( self._temperature_requested_extruder_index + 1) % self._extruder_count self.sendCommand( "M105 T%d" % self._temperature_requested_extruder_index) else: self.sendCommand("M105") Logger.log( "i", "Printer connection listen thread stopped for %s" % self._serial_port)
def getUsedExtruderStacks(self) -> List["ContainerStack"]: global_stack = Application.getInstance().getGlobalContainerStack() container_registry = ContainerRegistry.getInstance() if global_stack.getProperty("machine_extruder_count", "value") <= 1: #For single extrusion. return [global_stack] used_extruder_stack_ids = set() #Get the extruders of all meshes in the scene. support_enabled = False support_bottom_enabled = False support_roof_enabled = False scene_root = Application.getInstance().getController().getScene( ).getRoot() meshes = [ node for node in DepthFirstIterator(scene_root) if type(node) is SceneNode and node.isSelectable() ] #Only use the nodes that will be printed. for mesh in meshes: extruder_stack_id = mesh.callDecoration("getActiveExtruder") if not extruder_stack_id: #No per-object settings for this node. extruder_stack_id = self.extruderIds["0"] used_extruder_stack_ids.add(extruder_stack_id) # Get whether any of them use support. stack_to_use = mesh.callDecoration( "getStack") # if there is a per-mesh stack, we use it if not stack_to_use: # if there is no per-mesh stack, we use the build extruder for this mesh stack_to_use = container_registry.findContainerStacks( id=extruder_stack_id)[0] support_enabled |= stack_to_use.getProperty( "support_enable", "value") support_bottom_enabled |= stack_to_use.getProperty( "support_bottom_enable", "value") support_roof_enabled |= stack_to_use.getProperty( "support_roof_enable", "value") # Check limit to extruders limit_to_extruder_feature_list = [ "wall_0_extruder_nr", "wall_x_extruder_nr", "roofing_extruder_nr", "top_bottom_extruder_nr", "infill_extruder_nr", ] for extruder_nr_feature_name in limit_to_extruder_feature_list: extruder_nr = int( global_stack.getProperty(extruder_nr_feature_name, "value")) if extruder_nr == -1: continue used_extruder_stack_ids.add(self.extruderIds[str(extruder_nr)]) # Check support extruders if support_enabled: used_extruder_stack_ids.add(self.extruderIds[str( global_stack.getProperty("support_infill_extruder_nr", "value"))]) used_extruder_stack_ids.add(self.extruderIds[str( global_stack.getProperty("support_extruder_nr_layer_0", "value"))]) if support_bottom_enabled: used_extruder_stack_ids.add(self.extruderIds[str( global_stack.getProperty("support_bottom_extruder_nr", "value"))]) if support_roof_enabled: used_extruder_stack_ids.add(self.extruderIds[str( global_stack.getProperty("support_roof_extruder_nr", "value"))]) #The platform adhesion extruder. Not used if using none. if global_stack.getProperty("adhesion_type", "value") != "none": used_extruder_stack_ids.add(self.extruderIds[str( global_stack.getProperty("adhesion_extruder_nr", "value"))]) try: return [ container_registry.findContainerStacks(id=stack_id)[0] for stack_id in used_extruder_stack_ids ] except IndexError: # One or more of the extruders was not found. Logger.log("e", "Unable to find one or more of the extruders in %s", used_extruder_stack_ids) return []
def updateFirmware(self, file_name): Logger.log("i", "Updating firmware of %s using %s", self._serial_port, file_name) self._firmware_file_name = file_name self._update_firmware_thread.start()
def _onNetworkRequestFinished(self, reply: "QNetworkReply") -> None: reply_url = reply.url().toString() address = reply.url().host() device = None properties = {} # type: Dict[bytes, bytes] if reply.attribute(QNetworkRequest.HttpStatusCodeAttribute) != 200: # Either: # - Something went wrong with checking the firmware version! # - Something went wrong with checking the amount of printers the cluster has! # - Couldn't find printer at the address when trying to add it manually. if address in self._manual_instances: key = "manual:" + address self.removeManualDevice(key, address) return if "system" in reply_url: try: system_info = json.loads( bytes(reply.readAll()).decode("utf-8")) except: Logger.log("e", "Something went wrong converting the JSON.") return if address in self._manual_instances: manual_printer_request = self._manual_instances[address] manual_printer_request.network_reply = None if manual_printer_request.callback is not None: self._application.callLater( manual_printer_request.callback, True, address) has_cluster_capable_firmware = Version( system_info["firmware"]) > self._min_cluster_version instance_name = "manual:%s" % address properties = { b"name": (system_info["name"] + " (manual)").encode("utf-8"), b"address": address.encode("utf-8"), b"firmware_version": system_info["firmware"].encode("utf-8"), b"manual": b"true", b"machine": str(system_info['hardware']["typeid"]).encode("utf-8") } if has_cluster_capable_firmware: # Cluster needs an additional request, before it's completed. properties[b"incomplete"] = b"true" # Check if the device is still in the list & re-add it with the updated # information. if instance_name in self._discovered_devices: self._onRemoveDevice(instance_name) self._onAddDevice(instance_name, address, properties) if has_cluster_capable_firmware: # We need to request more info in order to figure out the size of the cluster. cluster_url = QUrl("http://" + address + self._cluster_api_prefix + "printers/") cluster_request = QNetworkRequest(cluster_url) self._network_manager.get(cluster_request) elif "printers" in reply_url: # So we confirmed that the device is in fact a cluster printer, and we should now know how big it is. try: cluster_printers_list = json.loads( bytes(reply.readAll()).decode("utf-8")) except: Logger.log("e", "Something went wrong converting the JSON.") return instance_name = "manual:%s" % address if instance_name in self._discovered_devices: device = self._discovered_devices[instance_name] properties = device.getProperties().copy() if b"incomplete" in properties: del properties[b"incomplete"] properties[b"cluster_size"] = str( len(cluster_printers_list)).encode("utf-8") self._onRemoveDevice(instance_name) self._onAddDevice(instance_name, address, properties)
def requestWrite(self, nodes, file_name=None, filter_by_machine=False, file_handler=None, **kwargs): filter_by_machine = True # This plugin is intended to be used by machine (regardless of what it was told to do) if self._writing: raise OutputDeviceError.DeviceBusyError() # Formats supported by this application (File types that we can actually write) if file_handler: file_formats = file_handler.getSupportedFileTypesWrite() else: file_formats = Application.getInstance().getMeshFileHandler( ).getSupportedFileTypesWrite() if filter_by_machine: container = Application.getInstance().getGlobalContainerStack( ).findContainer({"file_formats": "*"}) # Create a list from supported file formats string machine_file_formats = [ file_type.strip() for file_type in container.getMetaDataEntry( "file_formats").split(";") ] # Take the intersection between file_formats and machine_file_formats. format_by_mimetype = { format["mime_type"]: format for format in file_formats } file_formats = [ format_by_mimetype[mimetype] for mimetype in machine_file_formats ] #Keep them ordered according to the preference in machine_file_formats. if len(file_formats) == 0: Logger.log("e", "There are no file formats available to write with!") raise OutputDeviceError.WriteRequestFailedError( catalog.i18nc( "@info:status", "There are no file formats available to write with!")) preferred_format = file_formats[0] # Just take the first file format available. if file_handler is not None: writer = file_handler.getWriterByMimeType( preferred_format["mime_type"]) else: writer = Application.getInstance().getMeshFileHandler( ).getWriterByMimeType(preferred_format["mime_type"]) extension = preferred_format["extension"] if file_name is None: file_name = self._automaticFileName(nodes) if extension: # Not empty string. extension = "." + extension file_name = os.path.join(self.getId()[8:], os.path.splitext(file_name)[0] + extension) try: Logger.log("d", "Writing to %s", file_name) # Using buffering greatly reduces the write time for many lines of gcode if preferred_format["mode"] == FileWriter.OutputMode.TextMode: self._stream = open(file_name, "wt", buffering=1, encoding="utf-8") else: #Binary mode. self._stream = open(file_name, "wb", buffering=1) job = WriteFileJob(writer, self._stream, nodes, preferred_format["mode"]) job.setFileName(file_name) job.progress.connect(self._onProgress) job.finished.connect(self._onFinished) message = Message( catalog.i18nc( "@info:progress Don't translate the XML tags <filename>!", "Saving to Removable Drive <filename>{0}</filename>"). format(self.getName()), 0, False, -1, catalog.i18nc("@info:title", "Saving")) message.show() self.writeStarted.emit(self) job.setMessage(message) self._writing = True job.start() except PermissionError as e: Logger.log("e", "Permission denied when trying to write to %s: %s", file_name, str(e)) raise OutputDeviceError.PermissionDeniedError( catalog.i18nc( "@info:status Don't translate the XML tags <filename> or <message>!", "Could not save to <filename>{0}</filename>: <message>{1}</message>" ).format(file_name, str(e))) from e except OSError as e: Logger.log("e", "Operating system would not let us write to %s: %s", file_name, str(e)) raise OutputDeviceError.WriteRequestFailedError( catalog.i18nc( "@info:status Don't translate the XML tags <filename> or <message>!", "Could not save to <filename>{0}</filename>: <message>{1}</message>" ).format(file_name, str(e))) from e
def read(self, file_name): """Reads a g-code file, loading the profile from it. :param file_name: The name of the file to read the profile from. :return: The profile that was in the specified file, if any. If the specified file was no g-code or contained no parsable profile, None is returned. """ if file_name.split(".")[-1] != "gcode": return None prefix = ";SETTING_" + str(GCodeProfileReader.version) + " " prefix_length = len(prefix) # Loading all settings from the file. # They are all at the end, but Python has no reverse seek any more since Python3. # TODO: Consider moving settings to the start? serialized = "" # Will be filled with the serialized profile. try: with open(file_name, "r", encoding="utf-8") as f: for line in f: if line.startswith(prefix): # Remove the prefix and the newline from the line and add it to the rest. serialized += line[prefix_length:-1] except IOError as e: Logger.log("e", "Unable to open file %s for reading: %s", file_name, str(e)) return None serialized = unescapeGcodeComment(serialized) serialized = serialized.strip() if not serialized: Logger.log("i", "No custom profile to import from this g-code: %s", file_name) raise NoProfileException() # serialized data can be invalid JSON try: json_data = json.loads(serialized) except Exception as e: Logger.log( "e", "Could not parse serialized JSON data from g-code %s, error: %s", file_name, e) return None profiles = [] global_profile = readQualityProfileFromString( json_data["global_quality"]) # This is a fix for profiles created with 2.3.0 For some reason it added the "extruder" property to the # global profile. # The fix is simple and safe, as a global profile should never have the extruder entry. if global_profile.getMetaDataEntry("extruder", None) is not None: global_profile.setMetaDataEntry("extruder", None) profiles.append(global_profile) for profile_string in json_data.get("extruder_quality", []): profiles.append(readQualityProfileFromString(profile_string)) return profiles
def checkCloudFlowIsPossible(self, cluster: Optional[CloudOutputDevice]) -> None: Logger.log("d", "Checking if cloud connection is possible...") # Pre-Check: Skip if active machine already has been cloud connected or you said don't ask again active_machine = self._application.getMachineManager( ).activeMachine # type: Optional[GlobalStack] if active_machine: # Check 1A: Printer isn't already configured for cloud if ConnectionType.CloudConnection.value in active_machine.configuredConnectionTypes: Logger.log("d", "Active machine was already configured for cloud.") return # Check 1B: Printer isn't already configured for cloud if active_machine.getMetaDataEntry("cloud_flow_complete", False): Logger.log("d", "Active machine was already configured for cloud.") return # Check 2: User did not already say "Don't ask me again" if active_machine.getMetaDataEntry("do_not_show_cloud_message", False): Logger.log( "d", "Active machine shouldn't ask about cloud anymore.") return # Check 3: User is logged in with an Ultimaker account if not self._account.isLoggedIn: Logger.log("d", "Cloud Flow not possible: User not logged in!") return # Check 4: Machine is configured for network connectivity if not self._application.getMachineManager( ).activeMachineHasNetworkConnection: Logger.log( "d", "Cloud Flow not possible: Machine is not connected!") return # Check 5: Machine has correct firmware version firmware_version = self._application.getMachineManager( ).activeMachineFirmwareVersion # type: str if not Version(firmware_version) > self._min_cloud_version: Logger.log( "d", "Cloud Flow not possible: Machine firmware (%s) is too low! (Requires version %s)", firmware_version, self._min_cloud_version) return Logger.log("d", "Cloud flow is possible!") self.cloudFlowIsPossible.emit()
def setFilters(self, model_type: str, filter_dict: dict) -> None: if not self._models[model_type]: Logger.log("w", "Couldn't filter %s model because it doesn't exist.", model_type) return self._models[model_type].setFilter(filter_dict) self.filterChanged.emit()
def stop(self): if self._zero_conf is not None: Logger.log("d", "zeroconf close...") self._zero_conf.close() self._cloud_output_device_manager.stop()
def filterModelByProp(self, model_type: str, filter_type: str, parameter: str) -> None: if not self._models[model_type]: Logger.log("w", "Couldn't filter %s model because it doesn't exist.", model_type) return self._models[model_type].setFilter({filter_type: parameter}) self.filterChanged.emit()
def removeFilters(self, model_type: str) -> None: if not self._models[model_type]: Logger.log("w", "Couldn't remove filters on %s model because it doesn't exist.", model_type) return self._models[model_type].setFilter({}) self.filterChanged.emit()