Esempio n. 1
0
 def load_workspace_from_file(self,
                              filename,
                              remember_uri=True,
                              default_content=None):
     if remember_uri:
         self.last_workspace_uri = pycam.Utils.URIHandler(filename)
         self.settings.get("set_last_filename")(filename)
     log.info("Loading workspace from file: %s", filename)
     try:
         with open_file_context(filename, "r", True) as in_file:
             content = in_file.read()
     except OSError as exc:
         if default_content:
             content = default_content
         else:
             log.error("Failed to read workspace file (%s): %s", filename,
                       exc)
             return False
     try:
         return self.load_workspace_from_description(content)
     except PycamBaseException as exc:
         log.warning(
             "Failed to load workspace description from file (%s): %s",
             filename, exc)
         if default_content:
             log.info("Falling back to default workspace due to load error")
             self.load_workspace_from_description(default_content)
         return False
Esempio n. 2
0
 def run(self):
     if self._is_running:
         log.warning(
             "Refusing to run main loop again, while we are running")
         return
     self._is_running = True
     try:
         self._gtk.main()
     except KeyboardInterrupt:
         pass
     self._is_running = False
Esempio n. 3
0
def get_mainloop(use_gtk=False):
    """create new or return an existing mainloop

    @param use_gtk: supply Gtk with timeslots for event handling (active if this parameter is True
        at least once)
    """
    try:
        mainloop = __mainloop[0]
    except IndexError:
        try:
            mainloop = GtkMainLoop()
        except ImportError:
            log.warning("No event loop is available")
            mainloop = None
        __mainloop.append(mainloop)
    return mainloop
Esempio n. 4
0
 def load_preferences(self):
     """ load all settings (see Preferences window) from a file in the user's home directory """
     config = ConfigParser()
     try:
         with pycam.Gui.Settings.open_preferences_file() as in_file:
             config.read_file(in_file)
     except FileNotFoundError as exc:
         log.info(
             "No preferences file found (%s). Starting with default preferences.",
             exc)
     except OSError as exc:
         log.error("Failed to read preferences: %s", exc)
         return
     # report any ignored (obsolete) preference keys present in the file
     for item, value in config.items("DEFAULT"):
         if item not in PREFERENCES_DEFAULTS.keys():
             log.warn("Skipping obsolete preference item: %s", str(item))
     for item in PREFERENCES_DEFAULTS:
         if not config.has_option("DEFAULT", item):
             # a new preference setting is missing in the (old) file
             continue
         value_json = config.get("DEFAULT", item)
         try:
             value = json.loads(value_json)
         except ValueError as exc:
             log.warning("Failed to parse configuration setting '%s': %s",
                         item, exc)
             value = PREFERENCES_DEFAULTS[item]
         wanted_type = type(PREFERENCES_DEFAULTS[item])
         if wanted_type is float:
             # int is accepted for floats, too
             wanted_type = (float, int)
         if not isinstance(value, wanted_type):
             log.warning(
                 "Falling back to default configuration setting for '%s' due to "
                 "an invalid value type being parsed: %s != %s", item,
                 type(value), wanted_type)
             value = PREFERENCES_DEFAULTS[item]
         self.settings.set(item, value)
Esempio n. 5
0
def generate_toolpath(model, tool_settings=None,
        bounds=None, direction="x",
        path_generator="DropCutter", path_postprocessor="ZigZagCutter",
        material_allowance=0, overlap_percent=0, step_down=0, engrave_offset=0,
        milling_style="ignore", pocketing_type="none",
        support_grid_type=None, support_grid_distance_x=None,
        support_grid_distance_y=None, support_grid_thickness=None,
        support_grid_height=None, support_grid_offset_x=None,
        support_grid_offset_y=None, support_grid_adjustments_x=None,
        support_grid_adjustments_y=None, support_grid_average_distance=None,
        support_grid_minimum_bridges=None, support_grid_length=None,
        calculation_backend=None, callback=None):
    """ abstract interface for generating a toolpath

    @type model: pycam.Geometry.Model.Model
    @value model: a model contains surface triangles or a contour
    @type tool_settings: dict
    @value tool_settings: contains at least the following keys (depending on
        the tool type):
        "shape": any of possible cutter shape (see "pycam.Cutters")
        "tool_radius": main radius of the tools
        "torus_radius": (only for ToroidalCutter) second toroidal radius
    @type bounds_low: tuple(float) | list(float)
    @value bounds_low: the lower processing boundary (used for the center of
        the tool) (order: minx, miny, minz)
    @type bounds_high: tuple(float) | list(float)
    @value bounds_high: the lower processing boundary (used for the center of
        the tool) (order: maxx, maxy, maxz)
    @type direction: str
    @value direction: any member of the DIRECTIONS set (e.g. "x", "y" or "xy")
    @type path_generator: str
    @value path_generator: any member of the PATH_GENERATORS set
    @type path_postprocessor: str
    @value path_postprocessor: any member of the PATH_POSTPROCESSORS set
    @type material_allowance: float
    @value material_allowance: the minimum distance between the tool and the model
    @type overlap_percent: int
    @value overlap_percent: the overlap between two adjacent tool paths (0..100) given in percent
    @type step_down: float
    @value step_down: maximum height of each layer (for PushCutter)
    @type engrave_offset: float
    @value engrave_offset: toolpath distance to the contour model
    @type support_grid_distance_x: float
    @value support_grid_distance_x: distance between support grid lines along x
    @type support_grid_distance_y: float
    @value support_grid_distance_y: distance between support grid lines along y
    @type support_grid_thickness: float
    @value support_grid_thickness: thickness of the support grid
    @type support_grid_height: float
    @value support_grid_height: height of the support grid
    @type support_grid_offset_x: float
    @value support_grid_offset_x: shift the support grid by this value along x
    @type support_grid_offset_y: float
    @value support_grid_offset_y: shift the support grid by this value along y
    @type support_grid_adjustments_x: list(float)
    @value support_grid_adjustments_x: manual adjustment of each x-grid bar
    @type support_grid_adjustments_y: list(float)
    @value support_grid_adjustments_y: manual adjustment of each y-grid bar
    @type calculation_backend: str | None
    @value calculation_backend: any member of the CALCULATION_BACKENDS set
        The default is the triangular collision detection.
    @rtype: pycam.Toolpath.Toolpath | str
    @return: the resulting toolpath object or an error string in case of invalid
        arguments
    """
    log.debug("Starting toolpath generation")
    step_down = number(step_down)
    engrave_offset = number(engrave_offset)
    if bounds is None:
        # no bounds were given - we use the boundaries of the model
        bounds = pycam.Toolpath.Bounds(pycam.Toolpath.Bounds.TYPE_CUSTOM,
                (model.minx, model.miny, model.minz),
                (model.maxx, model.maxy, model.maxz))
    bounds_low, bounds_high = bounds.get_absolute_limits()
    minx, miny, minz = [number(value) for value in bounds_low]
    maxx, maxy, maxz = [number(value) for value in bounds_high]
    # trimesh model or contour model?
    if isinstance(model, pycam.Geometry.Model.ContourModel):
        # contour model
        trimesh_models = []
        contour_model = model
    else:
        # trimesh model
        trimesh_models = [model]
        contour_model = None
    # Due to some weirdness the height of the drill must be bigger than the
    # object's size. Otherwise some collisions are not detected.
    cutter_height = 4 * abs(maxz - minz)
    cutter = pycam.Cutters.get_tool_from_settings(tool_settings, cutter_height)
    if isinstance(cutter, basestring):
        return cutter
    if not path_generator in ("EngraveCutter", "ContourFollow"):
        # material allowance is not available for these two strategies
        cutter.set_required_distance(material_allowance)
    # create the grid model if requested
    if (support_grid_type == "grid") \
            and (((not support_grid_distance_x is None) \
            or (not support_grid_distance_y is None)) \
            and (not support_grid_thickness is None)):
        # grid height defaults to the thickness
        if support_grid_height is None:
            support_grid_height = support_grid_thickness
        if (support_grid_distance_x < 0) or (support_grid_distance_y < 0):
            return "The distance of the support grid must be a positive value"
        if not ((support_grid_distance_x > 0) or (support_grid_distance_y > 0)):
            return "Both distance values for the support grid may not be " \
                    + "zero at the same time"
        if support_grid_thickness <= 0:
            return "The thickness of the support grid must be a positive value"
        if support_grid_height <= 0:
            return "The height of the support grid must be a positive value"
        if not callback is None:
            callback(text="Preparing support grid model ...")
        support_grid_model = pycam.Toolpath.SupportGrid.get_support_grid(
                minx, maxx, miny, maxy, minz, support_grid_distance_x,
                support_grid_distance_y, support_grid_thickness,
                support_grid_height, offset_x=support_grid_offset_x,
                offset_y=support_grid_offset_y,
                adjustments_x=support_grid_adjustments_x,
                adjustments_y=support_grid_adjustments_y)
        trimesh_models.append(support_grid_model)
    elif (support_grid_type in ("distributed_edges", "distributed_corners")) \
            and (not support_grid_average_distance is None) \
            and (not support_grid_thickness is None) \
            and (not support_grid_length is None):
        if support_grid_height is None:
            support_grid_height = support_grid_thickness
        if support_grid_minimum_bridges is None:
            support_grid_minimum_bridges = 2
        if support_grid_average_distance <= 0:
            return "The average support grid distance must be a positive value"
        if support_grid_minimum_bridges <= 0:
            return "The minimum number of bridged per polygon must be a " \
                    + "positive value"
        if support_grid_thickness <= 0:
            return "The thickness of the support grid must be a positive value"
        if support_grid_height <= 0:
            return "The height of the support grid must be a positive value"
        if not callback is None:
            callback(text="Preparing support grid model ...")
        # check which model to choose
        if contour_model:
            model = contour_model
        else:
            model = trimesh_models[0]
        start_at_corners = (support_grid_type == "distributed_corners")
        support_grid_model = pycam.Toolpath.SupportGrid.get_support_distributed(
                model, minz, support_grid_average_distance,
                support_grid_minimum_bridges, support_grid_thickness,
                support_grid_height, support_grid_length,
                bounds, start_at_corners=start_at_corners)
        trimesh_models.append(support_grid_model)
    elif (not support_grid_type) or (support_grid_type == "none"):
        pass
    else:
        return "Invalid support grid type selected: %s" % support_grid_type
    # Adapt the contour_model to the engraving offset. This offset is
    # considered to be part of the material_allowance.
    if contour_model and (engrave_offset != 0):
        if not callback is None:
            callback(text="Preparing contour model with offset ...")
        contour_model = contour_model.get_offset_model(engrave_offset,
                callback=callback)
        if not contour_model:
            return "Failed to calculate offset polygons"
        if not callback is None:
            # reset percentage counter after the contour model calculation
            callback(percent=0)
            if callback(text="Checking contour model with offset for " \
                    + "collisions ..."):
                # quit requested
                return None
            progress_callback = ProgressCounter(
                    len(contour_model.get_polygons()), callback).increment
        else:
            progress_callback = None
        result = contour_model.check_for_collisions(callback=progress_callback)
        if result is None:
            return None
        elif result:
            warning = "The contour model contains colliding line groups. " + \
                    "This can cause problems with an engraving offset.\n" + \
                    "A collision was detected at (%.2f, %.2f, %.2f)." % \
                    (result.x, result.y, result.z)
            log.warning(warning)
        else:
            # no collisions and no user interruption
            pass
    # check the pocketing type
    if contour_model and (pocketing_type != "none"):
        if not callback is None:
            callback(text="Generating pocketing polygons ...")
        pocketing_offset = cutter.radius * 1.8
        # TODO: this is an arbitrary limit to avoid infinite loops
        pocketing_limit = 1000
        base_polygons = []
        other_polygons = []
        if pocketing_type == "holes":
            # fill polygons with negative area
            for poly in contour_model.get_polygons():
                if poly.is_closed and not poly.is_outer():
                    base_polygons.append(poly)
                else:
                    other_polygons.append(poly)
        elif pocketing_type == "enclosed":
            # fill polygons with positive area
            pocketing_offset *= -1
            for poly in contour_model.get_polygons():
                if poly.is_closed and poly.is_outer():
                    base_polygons.append(poly)
                else:
                    other_polygons.append(poly)
        else:
            return "Unknown pocketing type given (not one of 'none', " + \
                    "'holes', 'enclosed'): %s" % str(pocketing_type)
        # For now we use only the polygons that do not surround eny other
        # polygons. Sorry - the pocketing is currently very simple ...
        base_filtered_polygons = []
        for candidate in base_polygons:
            if callback and callback():
                return "Interrupted"
            for other in other_polygons:
                if candidate.is_polygon_inside(other):
                    break
            else:
                base_filtered_polygons.append(candidate)
        # start the pocketing for all remaining polygons
        pocket_polygons = []
        for base_polygon in base_filtered_polygons:
            current_queue = [base_polygon]
            next_queue = []
            pocket_depth = 0
            while current_queue and (pocket_depth < pocketing_limit):
                if callback and callback():
                    return "Interrupted"
                for poly in current_queue:
                    result = poly.get_offset_polygons(pocketing_offset)
                    pocket_polygons.extend(result)
                    next_queue.extend(result)
                    pocket_depth += 1
                current_queue = next_queue
                next_queue = []
        # use a copy instead of the original
        contour_model = contour_model.get_copy()
        for pocket in pocket_polygons:
            contour_model.append(pocket)
    # limit the contour model to the bounding box
    if contour_model:
        # use minz/maxz of the contour model (in other words: ignore z)
        contour_model = contour_model.get_cropped_model(minx, maxx, miny, maxy,
                contour_model.minz, contour_model.maxz)
        if not contour_model:
            return "No part of the contour model is within the bounding box."
    physics = _get_physics(trimesh_models, cutter, calculation_backend)
    if isinstance(physics, basestring):
        return physics
    generator = _get_pathgenerator_instance(trimesh_models, contour_model,
            cutter, path_generator, path_postprocessor, physics,
            milling_style)
    if isinstance(generator, basestring):
        return generator
    overlap = overlap_percent / 100.0
    if (overlap < 0) or (overlap >= 1):
        return "Invalid overlap value (%f): should be greater or equal 0 " \
                + "and lower than 1"
    # factor "2" since we are based on radius instead of diameter
    line_stepping = 2 * number(tool_settings["tool_radius"]) * (1 - overlap)
    if path_generator == "PushCutter":
        step_width = None
    else:
        # the step_width is only used for the DropCutter
        step_width = tool_settings["tool_radius"] / 4
    if path_generator == "DropCutter":
        layer_distance = None
    else:
        layer_distance = step_down
    direction_dict = {"x": pycam.Toolpath.MotionGrid.GRID_DIRECTION_X,
            "y": pycam.Toolpath.MotionGrid.GRID_DIRECTION_Y,
            "xy": pycam.Toolpath.MotionGrid.GRID_DIRECTION_XY}
    milling_style_grid = {
            "ignore": pycam.Toolpath.MotionGrid.MILLING_STYLE_IGNORE,
            "conventional": pycam.Toolpath.MotionGrid.MILLING_STYLE_CONVENTIONAL,
            "climb": pycam.Toolpath.MotionGrid.MILLING_STYLE_CLIMB}
    if path_generator in ("DropCutter", "PushCutter"):
        motion_grid = pycam.Toolpath.MotionGrid.get_fixed_grid(bounds,
                layer_distance, line_stepping, step_width=step_width,
                grid_direction=direction_dict[direction],
                milling_style=milling_style_grid[milling_style])
        if path_generator == "DropCutter":
            toolpath = generator.GenerateToolPath(motion_grid, minz, maxz,
                    callback)
        else:
            toolpath = generator.GenerateToolPath(motion_grid, callback)
    elif path_generator == "EngraveCutter":
        if step_down > 0:
            dz = step_down
        else:
            dz = maxz - minz
        toolpath = generator.GenerateToolPath(minz, maxz, step_width, dz,
                callback)
    elif path_generator == "ContourFollow":
        if step_down > 0:
            dz = step_down
        else:
            dz = maxz - minz
            if dz <= 0:
                dz = 1
        toolpath = generator.GenerateToolPath(minx, maxx, miny, maxy, minz,
                maxz, dz, callback)
    elif path_generator == "Contour2dCutter": # JULIEN
        toolpath = generator.GenerateToolPath(callback)
    else:
        return "Invalid path generator (%s): not one of %s" \
                % (path_generator, PATH_GENERATORS)
    return toolpath
Esempio n. 6
0
def generate_toolpath(model,
                      tool_settings=None,
                      bounds=None,
                      direction="x",
                      path_generator="DropCutter",
                      path_postprocessor="ZigZagCutter",
                      material_allowance=0,
                      overlap_percent=0,
                      step_down=0,
                      engrave_offset=0,
                      milling_style="ignore",
                      pocketing_type="none",
                      support_model=None,
                      calculation_backend=None,
                      callback=None):
    """ abstract interface for generating a toolpath

    @type model: pycam.Geometry.Model.Model
    @value model: a model contains surface triangles or a contour
    @type tool_settings: dict
    @value tool_settings: contains at least the following keys (depending on
        the tool type):
        "shape": any of possible cutter shape (see "pycam.Cutters")
        "tool_radius": main radius of the tools
        "torus_radius": (only for ToroidalCutter) second toroidal radius
    @type bounds_low: tuple(float) | list(float)
    @value bounds_low: the lower processing boundary (used for the center of
        the tool) (order: minx, miny, minz)
    @type bounds_high: tuple(float) | list(float)
    @value bounds_high: the lower processing boundary (used for the center of
        the tool) (order: maxx, maxy, maxz)
    @type direction: str
    @value direction: any member of the DIRECTIONS set (e.g. "x", "y" or "xy")
    @type path_generator: str
    @value path_generator: any member of the PATH_GENERATORS set
    @type path_postprocessor: str
    @value path_postprocessor: any member of the PATH_POSTPROCESSORS set
    @type material_allowance: float
    @value material_allowance: the minimum distance between the tool and the model
    @type overlap_percent: int
    @value overlap_percent: the overlap between two adjacent tool paths (0..100) given in percent
    @type step_down: float
    @value step_down: maximum height of each layer (for PushCutter)
    @type engrave_offset: float
    @value engrave_offset: toolpath distance to the contour model
    @type calculation_backend: str | None
    @value calculation_backend: any member of the CALCULATION_BACKENDS set
        The default is the triangular collision detection.
    @rtype: pycam.Toolpath.Toolpath | str
    @return: the resulting toolpath object or an error string in case of invalid
        arguments
    """
    log.debug("Starting toolpath generation")
    step_down = number(step_down)
    engrave_offset = number(engrave_offset)
    if bounds is None:
        # no bounds were given - we use the boundaries of the model
        bounds = pycam.Toolpath.Bounds(pycam.Toolpath.Bounds.TYPE_CUSTOM,
                                       (model.minx, model.miny, model.minz),
                                       (model.maxx, model.maxy, model.maxz))
    bounds_low, bounds_high = bounds.get_absolute_limits()
    minx, miny, minz = [number(value) for value in bounds_low]
    maxx, maxy, maxz = [number(value) for value in bounds_high]
    # trimesh model or contour model?
    if isinstance(model, pycam.Geometry.Model.ContourModel):
        # contour model
        trimesh_models = []
        contour_model = model
    else:
        # trimesh model
        trimesh_models = [model]
        contour_model = None
    # Due to some weirdness the height of the drill must be bigger than the
    # object's size. Otherwise some collisions are not detected.
    cutter_height = 4 * abs(maxz - minz)
    cutter = pycam.Cutters.get_tool_from_settings(tool_settings, cutter_height)
    if isinstance(cutter, basestring):
        return cutter
    if not path_generator in ("EngraveCutter", "ContourFollow"):
        # material allowance is not available for these two strategies
        cutter.set_required_distance(material_allowance)
    # create the grid model if requested
    if support_model:
        trimesh_models.append(support_model)
    # Adapt the contour_model to the engraving offset. This offset is
    # considered to be part of the material_allowance.
    if contour_model and (engrave_offset != 0):
        if not callback is None:
            callback(text="Preparing contour model with offset ...")
        contour_model = contour_model.get_offset_model(engrave_offset,
                                                       callback=callback)
        if contour_model:
            return "Failed to calculate offset polygons"
        if not callback is None:
            # reset percentage counter after the contour model calculation
            callback(percent=0)
            if callback(text="Checking contour model with offset for " \
                    + "collisions ..."):
                # quit requested
                return None
            progress_callback = ProgressCounter(
                len(contour_model.get_polygons()), callback).increment
        else:
            progress_callback = None
        result = contour_model.check_for_collisions(callback=progress_callback)
        if result is None:
            return None
        elif result:
            warning = "The contour model contains colliding line groups. " + \
                    "This can cause problems with an engraving offset.\n" + \
                    "A collision was detected at (%.2f, %.2f, %.2f)." % \
                    (result.x, result.y, result.z)
            log.warning(warning)
        else:
            # no collisions and no user interruption
            pass
    # check the pocketing type
    if contour_model and (pocketing_type != "none"):
        if not callback is None:
            callback(text="Generating pocketing polygons ...")
        pocketing_offset = cutter.radius * 1.8
        # TODO: this is an arbitrary limit to avoid infinite loops
        pocketing_limit = 1000
        base_polygons = []
        other_polygons = []
        if pocketing_type == "holes":
            # fill polygons with negative area
            for poly in contour_model.get_polygons():
                if poly.is_closed and not poly.is_outer():
                    base_polygons.append(poly)
                else:
                    other_polygons.append(poly)
        elif pocketing_type == "enclosed":
            # fill polygons with positive area
            pocketing_offset *= -1
            for poly in contour_model.get_polygons():
                if poly.is_closed and poly.is_outer():
                    base_polygons.append(poly)
                else:
                    other_polygons.append(poly)
        else:
            return "Unknown pocketing type given (not one of 'none', " + \
                    "'holes', 'enclosed'): %s" % str(pocketing_type)
        # For now we use only the polygons that do not surround eny other
        # polygons. Sorry - the pocketing is currently very simple ...
        base_filtered_polygons = []
        for candidate in base_polygons:
            if callback and callback():
                return "Interrupted"
            for other in other_polygons:
                if candidate.is_polygon_inside(other):
                    break
            else:
                base_filtered_polygons.append(candidate)
        # start the pocketing for all remaining polygons
        pocket_polygons = []
        for base_polygon in base_filtered_polygons:
            current_queue = [base_polygon]
            next_queue = []
            pocket_depth = 0
            while current_queue and (pocket_depth < pocketing_limit):
                if callback and callback():
                    return "Interrupted"
                for poly in current_queue:
                    result = poly.get_offset_polygons(pocketing_offset)
                    pocket_polygons.extend(result)
                    next_queue.extend(result)
                    pocket_depth += 1
                current_queue = next_queue
                next_queue = []
        # use a copy instead of the original
        contour_model = contour_model.get_copy()
        for pocket in pocket_polygons:
            contour_model.append(pocket)
    # limit the contour model to the bounding box
    if contour_model:
        # use minz/maxz of the contour model (in other words: ignore z)
        contour_model = contour_model.get_cropped_model(
            minx, maxx, miny, maxy, contour_model.minz, contour_model.maxz)
        if contour_model:
            return "No part of the contour model is within the bounding box."
    physics = _get_physics(trimesh_models, cutter, calculation_backend)
    if isinstance(physics, basestring):
        return physics
    generator = _get_pathgenerator_instance(trimesh_models, contour_model,
                                            cutter, path_generator,
                                            path_postprocessor, physics,
                                            milling_style)
    if isinstance(generator, basestring):
        return generator
    overlap = overlap_percent / 100.0
    if (overlap < 0) or (overlap >= 1):
        return "Invalid overlap value (%f): should be greater or equal 0 " \
                + "and lower than 1"
    # factor "2" since we are based on radius instead of diameter
    line_stepping = 2 * number(tool_settings["tool_radius"]) * (1 - overlap)
    if path_generator == "PushCutter":
        step_width = None
    else:
        # the step_width is only used for the DropCutter
        step_width = tool_settings["tool_radius"] / 4
    if path_generator == "DropCutter":
        layer_distance = None
    else:
        layer_distance = step_down
    direction_dict = {
        "x": pycam.Toolpath.MotionGrid.GRID_DIRECTION_X,
        "y": pycam.Toolpath.MotionGrid.GRID_DIRECTION_Y,
        "xy": pycam.Toolpath.MotionGrid.GRID_DIRECTION_XY
    }
    milling_style_grid = {
        "ignore": pycam.Toolpath.MotionGrid.MILLING_STYLE_IGNORE,
        "conventional": pycam.Toolpath.MotionGrid.MILLING_STYLE_CONVENTIONAL,
        "climb": pycam.Toolpath.MotionGrid.MILLING_STYLE_CLIMB
    }
    if path_generator in ("DropCutter", "PushCutter"):
        motion_grid = pycam.Toolpath.MotionGrid.get_fixed_grid(
            (bounds_low, bounds_high),
            layer_distance,
            line_stepping,
            step_width=step_width,
            grid_direction=direction_dict[direction],
            milling_style=milling_style_grid[milling_style])
        if path_generator == "DropCutter":
            toolpath = generator.GenerateToolPath(motion_grid, minz, maxz,
                                                  callback)
        else:
            toolpath = generator.GenerateToolPath(motion_grid, callback)
    elif path_generator == "EngraveCutter":
        if step_down > 0:
            dz = step_down
        else:
            dz = maxz - minz
        toolpath = generator.GenerateToolPath(minz, maxz, step_width, dz,
                                              callback)
    elif path_generator == "ContourFollow":
        if step_down > 0:
            dz = step_down
        else:
            dz = maxz - minz
            if dz <= 0:
                dz = 1
        toolpath = generator.GenerateToolPath(minx, maxx, miny, maxy, minz,
                                              maxz, dz, callback)
    else:
        return "Invalid path generator (%s): not one of %s" \
                % (path_generator, PATH_GENERATORS)
    return toolpath
Esempio n. 7
0
def init_threading(number_of_processes=None,
                   enable_server=False,
                   remote=None,
                   run_server=False,
                   server_credentials="",
                   local_port=DEFAULT_PORT):
    global __multiprocessing, __num_of_processes, __manager, __closing, \
            __task_source_uuid
    if __multiprocessing:
        # kill the manager and clean everything up for a re-initialization
        cleanup()
    if (not is_server_mode_available()) and (enable_server or run_server):
        # server mode is disabled for the Windows pyinstaller standalone
        # due to "pickle errors". How to reproduce: run the standalone binary
        # with "--enable-server --server-auth-key foo".
        feature_matrix_text = "Take a look at the wiki for a matrix of " + \
                "platforms and available features: " + \
                "http://sf.net/apps/mediawiki/pycam/?title=" + \
                "Parallel_Processing_on_different_Platforms"
        if enable_server:
            log.warn("Unable to enable server mode with your current " + \
                    "setup.\n" + feature_matrix_text)
        elif run_server:
            log.warn("Unable to run in server-only mode with the Windows " + \
                    "standalone executable.\n" + feature_matrix_text)
        else:
            # no further warnings required
            pass
        enable_server = False
        run_server = False
    # only local -> no server settings allowed
    if (not enable_server) and (not run_server):
        remote = None
        run_server = None
        server_credentials = ""
    try:
        import multiprocessing
        mp_is_available = True
    except ImportError:
        mp_is_available = False
    if not mp_is_available:
        __multiprocessing = False
        # Maybe a multiprocessing feature was explicitely requested?
        # Issue some warnings if necessary.
        multiprocessing_missing_text = "Failed to enable server mode due to " \
                + "a lack of 'multiprocessing' capabilities. Please use " \
                + "Python2.6 or install the 'python-multiprocessing' package."
        if enable_server:
            log.warn("Failed to enable server mode due to a lack of " \
                    + "'multiprocessing' capabilities. " \
                    + multiprocessing_missing_text)
        elif run_server:
            log.warn("Failed to run in server-only mode due to a lack of " \
                    + "'multiprocessing' capabilities. " \
                    + multiprocessing_missing_text)
        else:
            # no further warnings required
            pass
    else:
        if number_of_processes is None:
            # use defaults
            # don't enable threading for a single cpu
            if (multiprocessing.cpu_count() > 1) or remote or run_server or \
                    enable_server:
                __multiprocessing = multiprocessing
                __num_of_processes = multiprocessing.cpu_count()
            else:
                __multiprocessing = False
        elif (number_of_processes < 1) and (remote is None) and \
                (enable_server is None):
            # Zero processes are allowed if we use a remote server or offer a
            # server.
            __multiprocessing = False
        else:
            __multiprocessing = multiprocessing
            __num_of_processes = number_of_processes
    # initialize the manager
    if not __multiprocessing:
        __manager = None
        log.info("Disabled parallel processing")
    elif not enable_server and not run_server:
        __manager = None
        log.info("Enabled %d parallel local processes" % __num_of_processes)
    else:
        # with multiprocessing
        log.info("Enabled %d parallel local processes" % __num_of_processes)
        log.info("Allow remote processing")
        # initialize the uuid list for all workers
        worker_uuid_list = [
            str(uuid.uuid1()) for index in range(__num_of_processes)
        ]
        __task_source_uuid = str(uuid.uuid1())
        if remote is None:
            # try to guess an appropriate interface for binding
            if pycam.Utils.get_platform() == pycam.Utils.PLATFORM_WINDOWS:
                # Windows does not support a wildcard interface listener
                all_ips = pycam.Utils.get_all_ips()
                if all_ips:
                    address = (all_ips[0], local_port)
                    log.info("Binding to local interface with IP %s" % \
                            str(all_ips[0]))
                else:
                    return "Failed to find any local IP"
            else:
                # empty hostname -> wildcard interface
                # (this does not work with Windows - see above)
                address = ('', local_port)
        else:
            if ":" in remote:
                host, port = remote.split(":", 1)
                try:
                    port = int(port)
                except ValueError:
                    log.warning(("Invalid port specified: '%s' - using " + \
                            "default port (%d) instead") % \
                            (port, DEFAULT_PORT))
                    port = DEFAULT_PORT
            else:
                host = remote
                port = DEFAULT_PORT
            address = (host, port)
        if remote is None:
            tasks_queue = multiprocessing.Queue()
            results_queue = multiprocessing.Queue()
            statistics = ProcessStatistics()
            cache = ProcessDataCache()
            pending_tasks = PendingTasks()
            info = ManagerInfo(tasks_queue, results_queue, statistics, cache,
                               pending_tasks)
            TaskManager.register("tasks", callable=info.get_tasks_queue)
            TaskManager.register("results", callable=info.get_results_queue)
            TaskManager.register("statistics", callable=info.get_statistics)
            TaskManager.register("cache", callable=info.get_cache)
            TaskManager.register("pending_tasks",
                                 callable=info.get_pending_tasks)
        else:
            TaskManager.register("tasks")
            TaskManager.register("results")
            TaskManager.register("statistics")
            TaskManager.register("cache")
            TaskManager.register("pending_tasks")
        __manager = TaskManager(address=address, authkey=server_credentials)
        # run the local server, connect to a remote one or begin serving
        try:
            if remote is None:
                __manager.start()
                log.info("Started a local server.")
            else:
                __manager.connect()
                log.info("Connected to a remote task server.")
        except (multiprocessing.AuthenticationError, socket.error), err_msg:
            __manager = None
            return err_msg
        except EOFError:
            __manager = None
            return "Failed to bind to socket for unknown reasons"
Esempio n. 8
0
def init_threading(number_of_processes=None, enable_server=False, remote=None, run_server=False,
                   server_credentials="", local_port=DEFAULT_PORT):
    global __multiprocessing, __num_of_processes, __manager, __closing, __task_source_uuid
    if __multiprocessing:
        # kill the manager and clean everything up for a re-initialization
        cleanup()
    if (not is_server_mode_available()) and (enable_server or run_server):
        # server mode is disabled for the Windows pyinstaller standalone
        # due to "pickle errors". How to reproduce: run the standalone binary
        # with "--enable-server --server-auth-key foo".
        feature_matrix_text = ("Take a look at the wiki for a matrix of platforms and available "
                               "features: http://pycam.sourceforge.net/parallel-processing")
        if enable_server:
            log.warn("Unable to enable server mode with your current setup.\n%s",
                     feature_matrix_text)
        elif run_server:
            log.warn("Unable to run in server-only mode with the Windows standalone "
                     "executable.\n%s", feature_matrix_text)
        else:
            # no further warnings required
            pass
        enable_server = False
        run_server = False
    # only local -> no server settings allowed
    if (not enable_server) and (not run_server):
        remote = None
        run_server = None
        server_credentials = ""
    if not is_multiprocessing_available():
        __multiprocessing = False
        # Maybe a multiprocessing feature was explicitly requested?
        # Issue some warnings if necessary.
        multiprocessing_missing_text = (
            "Failed to enable server mode due to a lack of 'multiprocessing' capabilities. Please "
            "use Python2.6 or install the 'python-multiprocessing' package.")
        if enable_server:
            log.warn("Failed to enable server mode due to a lack of 'multiprocessing' "
                     "capabilities. %s", multiprocessing_missing_text)
        elif run_server:
            log.warn("Failed to run in server-only mode due to a lack of 'multiprocessing' "
                     "capabilities. %s", multiprocessing_missing_text)
        else:
            # no further warnings required
            pass
    else:
        import multiprocessing
        if number_of_processes is None:
            # use defaults
            # don't enable threading for a single cpu
            if (multiprocessing.cpu_count() > 1) or remote or run_server or enable_server:
                __multiprocessing = multiprocessing
                __num_of_processes = multiprocessing.cpu_count()
            else:
                __multiprocessing = False
        elif (number_of_processes < 1) and (remote is None) and (enable_server is None):
            # Zero processes are allowed if we use a remote server or offer a
            # server.
            __multiprocessing = False
        else:
            __multiprocessing = multiprocessing
            __num_of_processes = number_of_processes
    # initialize the manager
    if not __multiprocessing:
        __manager = None
        log.info("Disabled parallel processing")
    elif not enable_server and not run_server:
        __manager = None
        log.info("Enabled %d parallel local processes", __num_of_processes)
    else:
        # with multiprocessing
        log.info("Enabled %d parallel local processes", __num_of_processes)
        log.info("Allow remote processing")
        # initialize the uuid list for all workers
        worker_uuid_list = [str(uuid.uuid1()) for index in range(__num_of_processes)]
        __task_source_uuid = str(uuid.uuid1())
        if remote is None:
            # try to guess an appropriate interface for binding
            if pycam.Utils.get_platform() == pycam.Utils.OSPlatform.WINDOWS:
                # Windows does not support a wildcard interface listener
                all_ips = pycam.Utils.get_all_ips()
                if all_ips:
                    address = (all_ips[0], local_port)
                    log.info("Binding to local interface with IP %s", str(all_ips[0]))
                else:
                    raise CommunicationError("Failed to find any local IP")
            else:
                # empty hostname -> wildcard interface
                # (this does not work with Windows - see above)
                address = ('', local_port)
        else:
            if ":" in remote:
                host, port = remote.split(":", 1)
                try:
                    port = int(port)
                except ValueError:
                    log.warning("Invalid port specified: '%s' - using default port (%d) instead",
                                port, DEFAULT_PORT)
                    port = DEFAULT_PORT
            else:
                host = remote
                port = DEFAULT_PORT
            address = (host, port)
        if remote is None:
            tasks_queue = multiprocessing.Queue()
            results_queue = multiprocessing.Queue()
            statistics = ProcessStatistics()
            cache = ProcessDataCache()
            pending_tasks = PendingTasks()
            info = ManagerInfo(tasks_queue, results_queue, statistics, cache, pending_tasks)
            TaskManager.register("tasks", callable=info.get_tasks_queue)
            TaskManager.register("results", callable=info.get_results_queue)
            TaskManager.register("statistics", callable=info.get_statistics)
            TaskManager.register("cache", callable=info.get_cache)
            TaskManager.register("pending_tasks", callable=info.get_pending_tasks)
        else:
            TaskManager.register("tasks")
            TaskManager.register("results")
            TaskManager.register("statistics")
            TaskManager.register("cache")
            TaskManager.register("pending_tasks")
        __manager = TaskManager(address=address, authkey=server_credentials)
        # run the local server, connect to a remote one or begin serving
        try:
            if remote is None:
                __manager.start()
                log.info("Started a local server.")
            else:
                __manager.connect()
                log.info("Connected to a remote task server.")
        except (multiprocessing.AuthenticationError, socket.error) as err_msg:
            __manager = None
            return err_msg
        except EOFError:
            __manager = None
            raise CommunicationError("Failed to bind to socket for unknown reasons")
        # create the spawning process
        __closing = __manager.Value("b", False)
        if __num_of_processes > 0:
            # only start the spawner, if we want to use local workers
            spawner = __multiprocessing.Process(name="spawn", target=_spawn_daemon,
                                                args=(__manager, __num_of_processes,
                                                      worker_uuid_list))
            spawner.start()
        else:
            spawner = None
        # wait forever - in case of a server
        if run_server:
            log.info("Running a local server and waiting for remote connections.")
            # the server can be stopped via CTRL-C - it is caught later
            if spawner is not None:
                spawner.join()
Esempio n. 9
0
def init_threading(number_of_processes=None, enable_server=False, remote=None,
        run_server=False, server_credentials="", local_port=DEFAULT_PORT):
    global __multiprocessing, __num_of_processes, __manager, __closing, \
            __task_source_uuid
    if __multiprocessing:
        # kill the manager and clean everything up for a re-initialization
        cleanup()
    if (not is_server_mode_available()) and (enable_server or run_server):
        # server mode is disabled for the Windows pyinstaller standalone
        # due to "pickle errors". How to reproduce: run the standalone binary
        # with "--enable-server --server-auth-key foo".
        feature_matrix_text = "Take a look at the wiki for a matrix of " + \
                "platforms and available features: " + \
                "http://sf.net/apps/mediawiki/pycam/?title=" + \
                "Parallel_Processing_on_different_Platforms"
        if enable_server:
            log.warn("Unable to enable server mode with your current " + \
                    "setup.\n" + feature_matrix_text)
        elif run_server:
            log.warn("Unable to run in server-only mode with the Windows " + \
                    "standalone executable.\n" + feature_matrix_text)
        else:
            # no further warnings required
            pass
        enable_server = False
        run_server = False
    # only local -> no server settings allowed
    if (not enable_server) and (not run_server):
        remote = None
        run_server = None
        server_credentials = ""
    try:
        import multiprocessing
        mp_is_available = True
    except ImportError:
        mp_is_available = False
    if not mp_is_available:
        __multiprocessing = False
        # Maybe a multiprocessing feature was explicitely requested?
        # Issue some warnings if necessary.
        multiprocessing_missing_text = "Failed to enable server mode due to " \
                + "a lack of 'multiprocessing' capabilities. Please use " \
                + "Python2.6 or install the 'python-multiprocessing' package."
        if enable_server:
            log.warn("Failed to enable server mode due to a lack of " \
                    + "'multiprocessing' capabilities. " \
                    + multiprocessing_missing_text)
        elif run_server:
            log.warn("Failed to run in server-only mode due to a lack of " \
                    + "'multiprocessing' capabilities. " \
                    + multiprocessing_missing_text)
        else:
            # no further warnings required
            pass
    else:
        if number_of_processes is None:
            # use defaults
            # don't enable threading for a single cpu
            if (multiprocessing.cpu_count() > 1) or remote or run_server or \
                    enable_server:
                __multiprocessing = multiprocessing
                __num_of_processes = multiprocessing.cpu_count()
            else:
                __multiprocessing = False
        elif (number_of_processes < 1) and (remote is None) and \
                (enable_server is None):
            # Zero processes are allowed if we use a remote server or offer a
            # server.
            __multiprocessing = False
        else:
            __multiprocessing = multiprocessing
            __num_of_processes = number_of_processes
    # initialize the manager
    if not __multiprocessing:
        __manager = None
        log.info("Disabled parallel processing")
    elif not enable_server and not run_server:
        __manager = None
        log.info("Enabled %d parallel local processes" % __num_of_processes)
    else:
        # with multiprocessing
        log.info("Enabled %d parallel local processes" % __num_of_processes)
        log.info("Allow remote processing")
        # initialize the uuid list for all workers
        worker_uuid_list = [str(uuid.uuid1())
                for index in range(__num_of_processes)]
        __task_source_uuid = str(uuid.uuid1())
        if remote is None:
            # try to guess an appropriate interface for binding
            if pycam.Utils.get_platform() == pycam.Utils.PLATFORM_WINDOWS:
                # Windows does not support a wildcard interface listener
                all_ips = pycam.Utils.get_all_ips()
                if all_ips:
                    address = (all_ips[0], local_port)
                    log.info("Binding to local interface with IP %s" % \
                            str(all_ips[0]))
                else:
                    return "Failed to find any local IP"
            else:
                # empty hostname -> wildcard interface
                # (this does not work with Windows - see above)
                address = ('', local_port)
        else:
            if ":" in remote:
                host, port = remote.split(":", 1)
                try:
                    port = int(port)
                except ValueError:
                    log.warning(("Invalid port specified: '%s' - using " + \
                            "default port (%d) instead") % \
                            (port, DEFAULT_PORT))
                    port = DEFAULT_PORT
            else:
                host = remote
                port = DEFAULT_PORT
            address = (host, port)
        if remote is None:
            tasks_queue = multiprocessing.Queue()
            results_queue = multiprocessing.Queue()
            statistics = ProcessStatistics()
            cache = ProcessDataCache()
            pending_tasks = PendingTasks()
            info = ManagerInfo(tasks_queue, results_queue, statistics, cache,
                    pending_tasks)
            TaskManager.register("tasks", callable=info.get_tasks_queue)
            TaskManager.register("results", callable=info.get_results_queue)
            TaskManager.register("statistics", callable=info.get_statistics)
            TaskManager.register("cache", callable=info.get_cache)
            TaskManager.register("pending_tasks",
                    callable=info.get_pending_tasks)
        else:
            TaskManager.register("tasks")
            TaskManager.register("results")
            TaskManager.register("statistics")
            TaskManager.register("cache")
            TaskManager.register("pending_tasks")
        __manager = TaskManager(address=address, authkey=server_credentials)
        # run the local server, connect to a remote one or begin serving
        try:
            if remote is None:
                __manager.start()
                log.info("Started a local server.")
            else:
                __manager.connect()
                log.info("Connected to a remote task server.")
        except (multiprocessing.AuthenticationError, socket.error), err_msg:
            __manager = None
            return err_msg
        except EOFError:
            __manager = None
            return "Failed to bind to socket for unknown reasons"