Beispiel #1
0
 def __init__(self, waterlines=False, physics=None):
     if physics is None:
         log.debug("Starting PushCutter (without ODE)")
     else:
         log.debug("Starting PushCutter (with ODE)")
     self.physics = physics
     self.waterlines = waterlines
Beispiel #2
0
 def call_chain(self, name, *args, **kwargs):
     if name in self.chains:
         for data in self.chains[name]:
             data.func(*args, **kwargs)
     else:
         # this may happen during startup
         log.debug("Called an unknown chain: %s", name)
Beispiel #3
0
def _cleanup_job(job_id, tasks_queue, pending_tasks, finished_jobs):
    # flush the task queue
    try:
        queue_len = tasks_queue.qsize()
    except NotImplementedError:
        # this can happen on MacOS (according to the multiprocessing doc)
        # -> no cleanup of old processes
        queue_len = 0
    # remove all remaining tasks with the current job id
    removed_job_counter = 0
    for index in range(queue_len):
        try:
            this_job_id, task_id, func, args = tasks_queue.get(timeout=0.1)
        except Queue.Empty:
            break
        if this_job_id != job_id:
            tasks_queue.put((this_job_id, task_id, func, args))
        else:
            removed_job_counter += 1
    if removed_job_counter > 0:
        log.debug("Removed %d remaining tasks for %s" %
                  (removed_job_counter, job_id))
    # remove all stale tasks
    pending_tasks.remove(job_id)
    # limit the number of stored finished jobs
    finished_jobs.append(job_id)
    while len(finished_jobs) > 30:
        finished_jobs.pop(0)
Beispiel #4
0
 def get_waterline_contour(self, plane, callback=None):
     collision_lines = []
     progress_max = 2 * len(self._triangles)
     counter = 0
     for t in self._triangles:
         if callback and callback(percent=100.0 * counter / progress_max):
             return
         collision_line = plane.intersect_triangle(t,
                                                   counter_clockwise=True)
         if collision_line is not None:
             collision_lines.append(collision_line)
         else:
             counter += 1
         counter += 1
     # combine these lines into polygons
     contour = ContourModel(plane=plane)
     for line in collision_lines:
         if callback and callback(percent=100.0 * counter / progress_max):
             return
         contour.append(line)
         counter += 1
     log.debug("Waterline: %f - %d - %s", plane.p[2],
               len(contour.get_polygons()),
               [len(p.get_lines()) for p in contour.get_polygons()])
     return contour
Beispiel #5
0
def _cleanup_job(job_id, tasks_queue, pending_tasks, finished_jobs):
    # flush the task queue
    try:
        queue_len = tasks_queue.qsize()
    except NotImplementedError:
        # this can happen on MacOS (according to the multiprocessing doc)
        # -> no cleanup of old processes
        queue_len = 0
    # remove all remaining tasks with the current job id
    removed_job_counter = 0
    for index in range(queue_len):
        try:
            this_job_id, task_id, func, args = tasks_queue.get(timeout=0.1)
        except Queue.Empty:
            break
        if this_job_id != job_id:
            tasks_queue.put((this_job_id, task_id, func, args))
        else:
            removed_job_counter += 1
    if removed_job_counter > 0:
        log.debug("Removed %d remaining tasks for %s" % (removed_job_counter,
                job_id))
    # remove all stale tasks
    pending_tasks.remove(job_id)
    # limit the number of stored finished jobs
    finished_jobs.append(job_id)
    while len(finished_jobs) > 30:
        finished_jobs.pop(0)
Beispiel #6
0
 def unregister_ui_section(self, section):
     if section in self.ui_sections:
         ui_section = self.ui_sections[section]
         while ui_section[UI_WIDGET_INDEX]:
             ui_section[UI_WIDGET_INDEX].pop()
         del self.ui_sections[section]
     else:
         log.debug("Trying to unregister a non-existent ui section: %s" % \
                 str(section))
Beispiel #7
0
 def unregister_ui_section(self, section):
     if section in self.ui_sections:
         ui_section = self.ui_sections[section]
         while ui_section[UI_WIDGET_INDEX]:
             ui_section[UI_WIDGET_INDEX].pop()
         del self.ui_sections[section]
     else:
         log.debug("Trying to unregister a non-existent ui section: %s" % \
                 str(section))
Beispiel #8
0
 def unblock_event(self, event):
     if event in self.event_handlers:
         if self.event_handlers[event][EVENT_BLOCKER_INDEX] > 0:
             self.event_handlers[event][EVENT_BLOCKER_INDEX] -= 1
         else:
             log.debug("Trying to unblock non-blocked event '%s'" % \
                     str(event))
     else:
         log.debug("Trying to unblock an unknown event: %s" % str(event))
Beispiel #9
0
 def unblock_event(self, event):
     if event in self.event_handlers:
         if self.event_handlers[event].blocker_tokens:
             log.debug2("Unblocking an event: %s", event)
             self.event_handlers[event].blocker_tokens.pop()
         else:
             log.debug("Trying to unblock non-blocked event '%s'", event)
     else:
         log.info("Trying to unblock an unknown event: %s", event)
Beispiel #10
0
 def __init__(self, path_processor, physics=None):
     if physics is None:
         log.debug("Starting PushCutter (without ODE)")
     else:
         log.debug("Starting PushCutter (with ODE)")
     self.pa = path_processor
     self.physics = physics
     # check if we use a PolygonExtractor
     self._use_polygon_extractor = hasattr(self.pa, "polygon_extractor")
Beispiel #11
0
 def __init__(self, path_processor, physics=None):
     if physics is None:
         log.debug("Starting PushCutter (without ODE)")
     else:
         log.debug("Starting PushCutter (with ODE)")
     self.pa = path_processor
     self.physics = physics
     # check if we use a PolygonExtractor
     self._use_polygon_extractor = hasattr(self.pa, "polygon_extractor")
Beispiel #12
0
 def unblock_event(self, event):
     if event in self.event_handlers:
         if self.event_handlers[event][EVENT_BLOCKER_INDEX] > 0:
             self.event_handlers[event][EVENT_BLOCKER_INDEX] -= 1
         else:
             log.debug("Trying to unblock non-blocked event '%s'" % \
                     str(event))
     else:
         log.debug("Trying to unblock an unknown event: %s" % str(event))
Beispiel #13
0
 def add_point(p_array, bulge):
     # fill all "None" values with zero
     for index in range(len(p_array)):
         if p_array[index] is None:
             if (index == 0) or (index == 1):
                 log.debug("DXFImporter: weird LWPOLYLINE input " + \
                         "date in line %d: %s" % \
                         (self.line_number, p_array))
             p_array[index] = 0
     points.append((Point(p_array[0], p_array[1], p_array[2]), bulge))
Beispiel #14
0
 def add_point(p_array, bulge):
     # fill all "None" values with zero
     for index in range(len(p_array)):
         if p_array[index] is None:
             if (index == 0) or (index == 1):
                 log.debug(
                     "DXFImporter: weird LWPOLYLINE input date in line %d: %s",
                     self.line_number, p_array)
             p_array[index] = 0
     points.append(((p_array[0], p_array[1], p_array[2]), bulge))
Beispiel #15
0
 def unregister_chain(self, name, func):
     if name in self.chains:
         for index, data in enumerate(self.chains[name]):
             if data[CHAIN_FUNC_INDEX] == func:
                 self.chains[name].pop(index)
                 break
         else:
             log.debug("Trying to unregister unknown function from " + \
                     "%s: %s" % (name, func))
     else:
         log.debug("Trying to unregister from unknown chain: %s" % name)
Beispiel #16
0
 def unregister_chain(self, name, func):
     if name in self.chains:
         for index, data in enumerate(self.chains[name]):
             if data[CHAIN_FUNC_INDEX] == func:
                 self.chains[name].pop(index)
                 break
         else:
             log.debug("Trying to unregister unknown function from " + \
                     "%s: %s" % (name, func))
     else:
         log.debug("Trying to unregister from unknown chain: %s" % name)
Beispiel #17
0
 def _store_undo_state(self):
     # for now we only store the model
     if not self.settings.get("models"):
         return
     # TODO: store all models
     self._undo_states.append(pickle.dumps(
             self.settings.get("models")[0].model, PICKLE_PROTOCOL))
     log.debug("Stored the current state of the model for undo")
     while len(self._undo_states) > MAX_UNDO_STATES:
         self._undo_states.pop(0)
     self.gui.get_object("UndoButton").set_sensitive(True)
Beispiel #18
0
 def emit_event(self, event, *args, **kwargs):
     log.debug2("Event emitted: %s", event)
     if event in self.event_handlers:
         if self.event_handlers[event].blocker_tokens:
             log.debug2("Ignoring blocked event: %s", event)
             return
         # prevent infinite recursion
         with self.blocked_events({event}):
             for handler in self.event_handlers[event].handlers:
                 handler.func(*(handler.args + args), **kwargs)
     else:
         log.debug("No events registered for event '%s'", event)
Beispiel #19
0
 def unregister_event(self, event, func):
     if event in self.event_handlers:
         removal_list = []
         handlers = self.event_handlers[event]
         for index, item in enumerate(handlers[EVENT_HANDLER_INDEX]):
             if func == item[HANDLER_FUNC_INDEX]:
                 removal_list.append(index)
         removal_list.reverse()
         for index in removal_list:
             handlers[EVENT_HANDLER_INDEX].pop(index)
     else:
         log.debug("Trying to unregister an unknown event: %s" % event)
Beispiel #20
0
 def _store_undo_state(self):
     # for now we only store the model
     if not self.settings.get("models"):
         return
     # TODO: store all models
     self._undo_states.append(
         pickle.dumps(
             self.settings.get("models")[0].model, PICKLE_PROTOCOL))
     log.debug("Stored the current state of the model for undo")
     while len(self._undo_states) > MAX_UNDO_STATES:
         self._undo_states.pop(0)
     self.gui.get_object("UndoButton").set_sensitive(True)
Beispiel #21
0
 def store_undo_state(self):
     # for now we only store the model
     if not self.settings.get("models"):
         return
     # TODO: store all models
     self._undo_states.append(
         pickle.dumps(
             self.settings.get("models")[0].model, PICKLE_PROTOCOL))
     log.debug("Stored the current state of the model for undo")
     while len(self._undo_states) > MAX_UNDO_STATES:
         self._undo_states.pop(0)
     self.settings.emit_event("undo-states-changed")
Beispiel #22
0
 def unregister_event(self, event, func):
     if event in self.event_handlers:
         removal_list = []
         handlers = self.event_handlers[event]
         for index, item in enumerate(handlers[EVENT_HANDLER_INDEX]):
             if func == item[HANDLER_FUNC_INDEX]:
                 removal_list.append(index)
         removal_list.reverse()
         for index in removal_list:
             handlers[EVENT_HANDLER_INDEX].pop(index)
     else:
         log.debug("Trying to unregister an unknown event: %s" % event)
Beispiel #23
0
 def emit_event(self, event, *args, **kwargs):
     log.debug2("Event emitted: %s", str(event))
     if event in self.event_handlers:
         if self.event_handlers[event].blocker_tokens:
             return
         # prevent infinite recursion
         self.block_event(event)
         for handler in self.event_handlers[event].handlers:
             handler.func(*(handler.args + args), **kwargs)
         self.unblock_event(event)
     else:
         log.debug("No events registered for event '%s'", str(event))
Beispiel #24
0
def get_icons_pixbuffers():
    result = []
    for icon_filename in WINDOW_ICON_FILENAMES:
        abs_filename = get_ui_file_location(icon_filename, silent=True)
        if abs_filename:
            try:
                result.append(gtk.gdk.pixbuf_new_from_file(abs_filename))
            except gobject.GError, err_msg:
                # ignore icons that are not found
                log.debug("Failed to process window icon (%s): %s" \
                        % (abs_filename, err_msg))
        else:
            log.debug("Failed to locate window icon: %s" % icon_filename)
Beispiel #25
0
def get_icons_pixbuffers():
    result = []
    for icon_filename in WINDOW_ICON_FILENAMES:
        abs_filename = get_ui_file_location(icon_filename, silent=True)
        if abs_filename:
            try:
                result.append(gtk.gdk.pixbuf_new_from_file(abs_filename))
            except gobject.GError, err_msg:
                # ignore icons that are not found
                log.debug("Failed to process window icon (%s): %s" \
                        % (abs_filename, err_msg))
        else:
            log.debug("Failed to locate window icon: %s" % icon_filename)
Beispiel #26
0
 def get_waterline_contour(self, plane):
     collision_lines = []
     for t in self._triangles:
         collision_line = plane.intersect_triangle(t, counter_clockwise=True)
         if not collision_line is None:
             collision_lines.append(collision_line)
     # combine these lines into polygons
     contour = ContourModel(plane=plane)
     for line in collision_lines:
         contour.append(line)
     log.debug("Waterline: %f - %d - %s" % (plane.p.z,
             len(contour.get_polygons()),
             [len(p.get_lines()) for p in contour.get_polygons()]))
     return contour
Beispiel #27
0
 def emit_event(self, event, *args, **kwargs):
     log.debug2("Event emitted: %s" % str(event))
     if event in self.event_handlers:
         if self.event_handlers[event][EVENT_BLOCKER_INDEX] != 0:
             return
         # prevent infinite recursion
         self.block_event(event)
         for handler in self.event_handlers[event][EVENT_HANDLER_INDEX]:
             func = handler[HANDLER_FUNC_INDEX]
             data = handler[HANDLER_ARG_INDEX]
             func(*(data + args), **kwargs)
         self.unblock_event(event)
     else:
         log.debug("No events registered for event '%s'" % str(event))
Beispiel #28
0
 def emit_event(self, event, *args, **kwargs):
     log.debug2("Event emitted: %s" % str(event))
     if event in self.event_handlers:
         if self.event_handlers[event][EVENT_BLOCKER_INDEX] != 0:
             return
         # prevent infinite recursion
         self.block_event(event)
         for handler in self.event_handlers[event][EVENT_HANDLER_INDEX]:
             func = handler[HANDLER_FUNC_INDEX]
             data = handler[HANDLER_ARG_INDEX]
             func(*(data + args), **kwargs)
         self.unblock_event(event)
     else:
         log.debug("No events registered for event '%s'" % str(event))
Beispiel #29
0
 def get_waterline_contour(self, plane):
     collision_lines = []
     for t in self._triangles:
         collision_line = plane.intersect_triangle(t,
                                                   counter_clockwise=True)
         if not collision_line is None:
             collision_lines.append(collision_line)
     # combine these lines into polygons
     contour = ContourModel(plane=plane)
     for line in collision_lines:
         contour.append(line)
     log.debug("Waterline: %f - %d - %s" %
               (plane.p.z, len(contour.get_polygons()),
                [len(p.get_lines()) for p in contour.get_polygons()]))
     return contour
Beispiel #30
0
 def unblock_event(self, event, disable_log=False):
     if event in self.event_handlers:
         if self.event_handlers[event].blocker_tokens:
             self.event_handlers[event].blocker_tokens.pop()
             if not disable_log:
                 log.debug2(
                     "Unblocking an event: %s (%d blockers remaining)",
                     event, len(self.event_handlers[event].blocker_tokens))
         else:
             if not disable_log:
                 log.debug("Trying to unblock non-blocked event '%s'",
                           event)
     else:
         # "disable_log" is only relevant for the debugging messages above
         log.info("Trying to unblock an unknown event: %s", event)
Beispiel #31
0
 def unregister_ui(self, section, widget):
     if (section in self.ui_sections) or (None in self.ui_sections):
         if not section in self.ui_sections:
             section = None
         ui_section = self.ui_sections[section]
         removal_list = []
         for index, item in enumerate(ui_section[UI_WIDGET_INDEX]):
             if item[WIDGET_OBJ_INDEX] == widget:
                 removal_list.append(index)
         removal_list.reverse()
         for index in removal_list:
             ui_section[UI_WIDGET_INDEX].pop(index)
         self._rebuild_ui_section(section)
     else:
         log.debug("Trying to unregister unknown ui section: %s" % section)
Beispiel #32
0
 def unregister_ui(self, section, widget):
     if (section in self.ui_sections) or (None in self.ui_sections):
         if not section in self.ui_sections:
             section = None
         ui_section = self.ui_sections[section]
         removal_list = []
         for index, item in enumerate(ui_section[UI_WIDGET_INDEX]):
             if item[WIDGET_OBJ_INDEX] == widget:
                 removal_list.append(index)
         removal_list.reverse()
         for index in removal_list:
             ui_section[UI_WIDGET_INDEX].pop(index)
         self._rebuild_ui_section(section)
     else:
         log.debug("Trying to unregister unknown ui section: %s" % section)
Beispiel #33
0
def _spawn_daemon(manager, number_of_processes, worker_uuid_list):
    """ wait for items in the 'tasks' queue to appear and then spawn workers
    """
    global __multiprocessing, __closing
    tasks = manager.tasks()
    results = manager.results()
    stats = manager.statistics()
    cache = manager.cache()
    pending_tasks = manager.pending_tasks()
    log.debug("Spawner daemon started with %d processes" % number_of_processes)
    log.debug("Registering %d worker threads: %s" \
            % (len(worker_uuid_list), worker_uuid_list))
    last_cache_update = time.time()
    # use only the hostname (for brevity) - no domain part
    hostname = platform.node().split(".", 1)[0]
    try:
        while not __closing.get():
            # check the expire timeout of the cache from time to time
            if last_cache_update + 30 < time.time():
                cache.expire_cache_items()
                last_cache_update = time.time()
            if not tasks.empty():
                workers = []
                for task_id in worker_uuid_list:
                    task_name = "%s-%s" % (hostname, task_id)
                    worker = __multiprocessing.Process(
                        name=task_name,
                        target=_handle_tasks,
                        args=(tasks, results, stats, cache, pending_tasks,
                              __closing))
                    worker.start()
                    workers.append(worker)
                # wait until all workers are finished
                for worker in workers:
                    worker.join()
            else:
                time.sleep(1.0)
    except KeyboardInterrupt:
        log.info("Spawner daemon killed by keyboard interrupt")
        # set the "closing" flag and just exit
        try:
            __closing.set(True)
        except (IOError, EOFError):
            pass
    except (IOError, EOFError):
        # the connection was closed
        log.info("Spawner daemon lost connection to server")
Beispiel #34
0
 def is_point_inside(self, p):
     """ Test if a given point is inside of the polygon.
     The result is True if the point is on a line (or very close to it).
     """
     if not self.is_closed:
         return False
     # First: check if the point is within the boundary of the polygon.
     if not pis_inside(p, self.minx, self.maxx, self.miny, self.maxy,
                       self.minz, self.maxz):
         # the point is outside the rectangle boundary
         return False
     # see http://www.alienryderflex.com/polygon/
     # Count the number of intersections of a ray along the x axis through
     # all polygon lines.
     # Odd number -> point is inside
     intersection_count_left = 0
     intersection_count_right = 0
     for index in range(len(self._points)):
         p1 = self._points[index]
         p2 = self._points[(index + 1) % len(self._points)]
         # Only count intersections with lines that are partly below
         # the y level of the point. This solves the problem of intersections
         # through shared vertices or lines that go along the y level of the
         # point.
         if ((p1[1] < p[1]) and (p[1] <= p2[1])) \
                 or ((p2[1] < p[1]) and (p[1] <= p1[1])):
             part_y = (p[1] - p1[1]) / (p2[1] - p1[1])
             intersection_x = p1[0] + part_y * (p2[0] - p1[0])
             if intersection_x < p[0] + epsilon:
                 # count intersections to the left
                 intersection_count_left += 1
             if intersection_x > p[0] - epsilon:
                 # count intersections to the right
                 intersection_count_right += 1
     # odd intersection count -> inside
     left_odd = intersection_count_left % 2 == 1
     right_odd = intersection_count_right % 2 == 1
     if left_odd and right_odd:
         # clear decision: we are inside
         return True
     elif not left_odd and not right_odd:
         # clear decision: we are outside
         return False
     else:
         # it seems like we are on the line -> inside
         log.debug("polygon.is_point_inside: unclear decision")
         return True
Beispiel #35
0
 def _rebuild_ui_section(self, section):
     if section in self.ui_sections:
         ui_section = self.ui_sections[section]
         if ui_section[UI_FUNC_INDEX]:
             add_func, clear_func = ui_section[UI_FUNC_INDEX]
             ui_section[UI_WIDGET_INDEX].sort(
                 key=lambda x: x[WIDGET_WEIGHT_INDEX])
             clear_func()
             for item in ui_section[UI_WIDGET_INDEX]:
                 if item[WIDGET_ARGS_INDEX]:
                     args = item[WIDGET_ARGS_INDEX]
                 else:
                     args = {}
                 add_func(item[WIDGET_OBJ_INDEX], item[WIDGET_NAME_INDEX],
                          **args)
     else:
         log.debug("Failed to rebuild unknown ui section: %s", str(section))
Beispiel #36
0
 def _rebuild_ui_section(self, section):
     if section in self.ui_sections:
         ui_section = self.ui_sections[section]
         if ui_section[UI_FUNC_INDEX]:
             add_func, clear_func = ui_section[UI_FUNC_INDEX]
             ui_section[UI_WIDGET_INDEX].sort(
                     key=lambda x: x[WIDGET_WEIGHT_INDEX])
             clear_func()
             for item in ui_section[UI_WIDGET_INDEX]:
                 if item[WIDGET_ARGS_INDEX]:
                     args = item[WIDGET_ARGS_INDEX]
                 else:
                     args = {}
                 add_func(item[WIDGET_OBJ_INDEX], item[WIDGET_NAME_INDEX],
                         **args)
     else:
         log.debug("Failed to rebuild unknown ui section: %s" % str(section))
Beispiel #37
0
 def register_ui(self, section, name, widget, weight=0, args_dict=None):
     if not section in self.ui_sections:
         self.ui_sections[section] = [None, None]
         self.ui_sections[section][UI_WIDGET_INDEX] = []
     assert WIDGET_NAME_INDEX == 0
     assert WIDGET_OBJ_INDEX == 1
     assert WIDGET_WEIGHT_INDEX == 2
     assert WIDGET_ARGS_INDEX == 3
     current_widgets = [item[1]
             for item in self.ui_sections[section][UI_WIDGET_INDEX]]
     if (not widget is None) and (widget in current_widgets):
         log.debug("Tried to register widget twice: %s -> %s" % \
                 (section, name))
         return
     self.ui_sections[section][UI_WIDGET_INDEX].append((name, widget,
             weight, args_dict))
     self._rebuild_ui_section(section)
Beispiel #38
0
def _spawn_daemon(manager, number_of_processes, worker_uuid_list):
    """ wait for items in the 'tasks' queue to appear and then spawn workers
    """
    global __multiprocessing, __closing
    tasks = manager.tasks()
    results = manager.results()
    stats = manager.statistics()
    cache = manager.cache()
    pending_tasks = manager.pending_tasks()
    log.debug("Spawner daemon started with %d processes" % number_of_processes)
    log.debug("Registering %d worker threads: %s" \
            % (len(worker_uuid_list), worker_uuid_list))
    last_cache_update = time.time()
    # use only the hostname (for brevity) - no domain part
    hostname = platform.node().split(".", 1)[0]
    try:
        while not __closing.get():
            # check the expire timeout of the cache from time to time
            if last_cache_update + 30 < time.time():
                cache.expire_cache_items()
                last_cache_update = time.time()
            if not tasks.empty():
                workers = []
                for task_id in worker_uuid_list:
                    task_name = "%s-%s" % (hostname, task_id)
                    worker = __multiprocessing.Process(
                            name=task_name, target=_handle_tasks,
                            args=(tasks, results, stats, cache,
                                    pending_tasks, __closing))
                    worker.start()
                    workers.append(worker)
                # wait until all workers are finished
                for worker in workers:
                    worker.join()
            else:
                time.sleep(1.0)
    except KeyboardInterrupt:
        log.info("Spawner daemon killed by keyboard interrupt")
        # set the "closing" flag and just exit
        try:
            __closing.set(True)
        except (IOError, EOFError):
            pass
    except (IOError, EOFError):
        # the connection was closed
        log.info("Spawner daemon lost connection to server")
Beispiel #39
0
 def register_ui(self, section, name, widget, weight=0, args_dict=None):
     if not section in self.ui_sections:
         self.ui_sections[section] = [None, None]
         self.ui_sections[section][UI_WIDGET_INDEX] = []
     assert WIDGET_NAME_INDEX == 0
     assert WIDGET_OBJ_INDEX == 1
     assert WIDGET_WEIGHT_INDEX == 2
     assert WIDGET_ARGS_INDEX == 3
     current_widgets = [
         item[1] for item in self.ui_sections[section][UI_WIDGET_INDEX]
     ]
     if (not widget is None) and (widget in current_widgets):
         log.debug("Tried to register widget twice: %s -> %s" % \
                 (section, name))
         return
     self.ui_sections[section][UI_WIDGET_INDEX].append(
         (name, widget, weight, args_dict))
     self._rebuild_ui_section(section)
Beispiel #40
0
 def emit_event(self, event):
     log.debug2("Event emitted: %s", event)
     if event in self.event_handlers:
         self.event_handlers[event].statistics["emitted"] += 1
         if self.event_handlers[event].blocker_tokens:
             self.event_handlers[event].statistics["blocked"] += 1
             log.debug2("Ignoring blocked event: %s", event)
         else:
             # prevent infinite recursion
             with self.blocked_events({event}, disable_log=True):
                 self.event_handlers[event].statistics["handled"] += 1
                 for handler in self.event_handlers[event].handlers:
                     log.debug2("Calling event handler: %s", handler)
                     if isinstance(handler, str):
                         # event names are acceptable
                         self.emit_event(handler)
                     else:
                         handler()
     else:
         log.debug("No events registered for event '%s'", event)
Beispiel #41
0
def cleanup():
    global __multiprocessing, __manager, __closing
    if __multiprocessing and __closing:
        log.debug("Shutting down process handler")
        try:
            __closing.set(True)
        except (IOError, EOFError):
            log.debug("Connection to manager lost during cleanup")
        # Only managers that were started via ".start()" implement a "shutdown".
        # Managers started via ".connect" may skip this.
        if hasattr(__manager, "shutdown"):
            # wait for the spawner and the worker threads to go down
            time.sleep(2.5)
            #__manager.shutdown()
            time.sleep(0.1)
            # check if it is still alive and kill it if necessary
            if __manager._process.is_alive():
                __manager._process.terminate()
    __manager = None
    __closing = None
    __multiprocessing = None
Beispiel #42
0
def cleanup():
    global __multiprocessing, __manager, __closing
    if __multiprocessing and __closing:
        log.debug("Shutting down process handler")
        try:
            __closing.set(True)
        except (IOError, EOFError):
            log.debug("Connection to manager lost during cleanup")
        # Only managers that were started via ".start()" implement a "shutdown".
        # Managers started via ".connect" may skip this.
        if hasattr(__manager, "shutdown"):
            # wait for the spawner and the worker threads to go down
            time.sleep(2.5)
            #__manager.shutdown()
            time.sleep(0.1)
            # check if it is still alive and kill it if necessary
            if __manager._process.is_alive():
                __manager._process.terminate()
    __manager = None
    __closing = None
    __multiprocessing = None
Beispiel #43
0
 def parse_content(self):
     key, value = self._read_key_value()
     while (key is not None) and not ((key == self.KEYS["MARKER"]) and
                                      (value == "EOF")):
         if self.callback and self.callback():
             return
         if key == self.KEYS["MARKER"]:
             if value in ("SECTION", "TABLE", "LAYER", "ENDTAB", "ENDSEC"):
                 # we don't handle these meta-information
                 pass
             elif value == "LINE":
                 self.parse_line()
             elif value == "LWPOLYLINE":
                 self.parse_lwpolyline()
             elif value == "POLYLINE":
                 self.parse_polyline(True)
             elif value == "VERTEX":
                 self.parse_vertex()
             elif value == "SEQEND":
                 self.close_sequence()
             elif value == "ARC":
                 self.parse_arc()
             elif value == "CIRCLE":
                 self.parse_arc(circle=True)
             elif value == "TEXT":
                 self.parse_text()
             elif value == "MTEXT":
                 self.parse_mtext()
             elif value == "3DFACE":
                 self.parse_3dface()
             elif value in self.IGNORE_KEYS:
                 log.debug(
                     "DXFImporter: Ignored a blacklisted element in line %d: %s",
                     self.line_number, value)
             else:
                 # not supported
                 log.warn(
                     "DXFImporter: Ignored unsupported element in line %d: %s",
                     self.line_number, value)
         key, value = self._read_key_value()
Beispiel #44
0
 def parse_content(self):
     key, value = self._read_key_value()
     while (not key is None) \
             and not ((key == self.KEYS["MARKER"]) and (value == "EOF")):
         if self.callback and self.callback():
             return
         if key == self.KEYS["MARKER"]:
             if value in ("SECTION", "TABLE", "LAYER", "ENDTAB", "ENDSEC"):
                 # we don't handle these meta-information
                 pass
             elif value == "LINE":
                 self.parse_line()
             elif value == "LWPOLYLINE":
                 self.parse_lwpolyline()
             elif value == "POLYLINE":
                 self.parse_polyline(True)
             elif value == "VERTEX":
                 self.parse_vertex()
             elif value == "SEQEND":
                 self.close_sequence()
             elif value == "ARC":
                 self.parse_arc()
             elif value == "CIRCLE":
                 self.parse_arc(circle=True)
             elif value == "TEXT":
                 self.parse_text()
             elif value == "MTEXT":
                 self.parse_mtext()
             elif value == "3DFACE":
                 self.parse_3dface()
             elif value in self.IGNORE_KEYS:
                 log.debug("DXFImporter: Ignored a blacklisted element " \
                         + "in line %d: %s" % (self.line_number, value))
             else:
                 # not supported
                 log.warn("DXFImporter: Ignored unsupported element " \
                         + "in line %d: %s" % (self.line_number, value))
         key, value = self._read_key_value()
Beispiel #45
0
 def get_waterline_contour(self, plane, callback=None):
     collision_lines = []
     progress_max = 2 * len(self._triangles)
     counter = 0
     for t in self._triangles:
         if callback and callback(percent=100.0 * counter / progress_max):
             return
         collision_line = plane.intersect_triangle(t, counter_clockwise=True)
         if not collision_line is None:
             collision_lines.append(collision_line)
         else:
             counter += 1
         counter += 1
     # combine these lines into polygons
     contour = ContourModel(plane=plane)
     for line in collision_lines:
         if callback and callback(percent=100.0 * counter / progress_max):
             return
         contour.append(line)
         counter += 1
     log.debug("Waterline: %f - %d - %s" % (plane.p[2],
             len(contour.get_polygons()),
             [len(p.get_lines()) for p in contour.get_polygons()]))
     return contour
Beispiel #46
0
def retrieve_cached_download(storage_filename, download_url):
    """ retrieve the full filename of a locally cached download

    @throws OSError in case of any problems (download or data storage)
    @returns absolute filename
    """
    # this may raise an OSError
    cache_dir = get_cache_directory()
    full_filename = os.path.join(cache_dir, storage_filename)
    if os.path.exists(full_filename):
        log.debug("Use cached file (%s) instead of downloading '%s'", full_filename, download_url)
    else:
        log.info("Downloading '%s' to '%s'", download_url, full_filename)
        # download the file
        temporary_filename = full_filename + ".part"
        # remove the file if it was left there in a previous attempt
        try:
            os.remove(temporary_filename)
        except OSError:
            pass
        # this may raise an HTTP-related error (inherited from OSError)
        urllib.request.urlretrieve(download_url, temporary_filename)
        os.rename(temporary_filename, full_filename)
    return full_filename
Beispiel #47
0
 def stop(self):
     if self._is_running:
         log.debug("Stopping main loop")
         self._gtk.main_quit()
     else:
         log.info("Main loop was stopped before")
Beispiel #48
0
 def revise_directions(self, callback=None):
     """ Go through all open polygons and try to merge them regardless of
     their direction. Afterwards all closed polygons are analyzed regarding
     their inside/outside relationships.
     Beware: never use this function if the direction of lines may not
     change.
     """
     number_of_initial_closed_polygons = len([poly
             for poly in self.get_polygons() if poly.is_closed])
     open_polygons = [poly for poly in self.get_polygons()
             if not poly.is_closed]
     if callback:
         progress_callback = pycam.Utils.ProgressCounter(
                 2 * number_of_initial_closed_polygons + len(open_polygons),
                 callback).increment
     else:
         progress_callback = None
     # try to connect all open polygons
     for poly in open_polygons:
         self._line_groups.remove(poly)
     poly_open_before = len(open_polygons)
     for poly in open_polygons:
         for line in poly.get_lines():
             self.append(line, allow_reverse=True)
         if progress_callback and progress_callback():
             return
     poly_open_after = len([poly for poly in self.get_polygons()
             if not poly.is_closed])
     if poly_open_before != poly_open_after:
         log.info("Reduced the number of open polygons from " + \
                 "%d down to %d" % (poly_open_before, poly_open_after))
     else:
         log.debug("No combineable open polygons found")
     # auto-detect directions of closed polygons: inside and outside
     finished = []
     remaining_polys = [poly for poly in self.get_polygons()
             if poly.is_closed]
     if progress_callback:
         # shift the counter back by the number of new closed polygons
         progress_callback(2 * (number_of_initial_closed_polygons - \
                 len(remaining_polys)))
     remaining_polys.sort(key=lambda poly: abs(poly.get_area()))
     while remaining_polys:
         # pick the largest polygon
         current = remaining_polys.pop()
         # start with the smallest finished polygon
         for comp, is_outer in finished:
             if comp.is_polygon_inside(current):
                 finished.insert(0, (current, not is_outer))
                 break
         else:
             # no enclosing polygon was found
             finished.insert(0, (current, True))
         if progress_callback and progress_callback():
             return
     # Adjust the directions of all polygons according to the result
     # of the previous analysis.
     change_counter = 0
     for polygon, is_outer in finished:
         if polygon.is_outer() != is_outer:
             polygon.reverse_direction()
             change_counter += 1
         if progress_callback and progress_callback():
             self.reset_cache()
             return
     log.info("The winding of %d polygon(s) was fixed." % change_counter)
     self.reset_cache()
Beispiel #49
0
 def block_event(self, event):
     if event in self.event_handlers:
         self.event_handlers[event][EVENT_BLOCKER_INDEX] += 1
     else:
         log.debug("Trying to block an unknown event: %s" % str(event))
Beispiel #50
0
def run_in_parallel_remote(func, args_list, unordered=False,
        disable_multiprocessing=False, callback=None):
    global __multiprocessing, __num_of_processes, __manager, \
            __task_source_uuid, __finished_jobs
    if __multiprocessing is None:
        # threading was not configured before
        init_threading()
    if __multiprocessing and not disable_multiprocessing:
        job_id = str(uuid.uuid1())
        log.debug("Starting parallel tasks: %s" % job_id)
        tasks_queue = __manager.tasks()
        results_queue = __manager.results()
        remote_cache = __manager.cache()
        stats = __manager.statistics()
        pending_tasks = __manager.pending_tasks()
        # add all tasks of this job to the queue
        for index, args in enumerate(args_list):
            if callback:
                callback()
            start_time = time.time()
            result_args = []
            for arg in args:
                # add the argument to the cache if possible
                if hasattr(arg, "uuid"):
                    data_uuid = ProcessDataCacheItemID(arg.uuid)
                    if not remote_cache.contains(data_uuid):
                        log.debug("Adding cache item for job %s: %s - %s" % \
                                (job_id, arg.uuid, arg.__class__))
                        remote_cache.add(data_uuid, arg)
                    result_args.append(data_uuid)
                elif isinstance(arg, (list, set, tuple)):
                    # a list with - maybe containing cacheable items
                    new_arg_list = []
                    for item in arg:
                        try:
                            data_uuid = ProcessDataCacheItemID(item.uuid)
                        except AttributeError:
                            # non-cacheable item
                            new_arg_list.append(item)
                            continue
                        if not remote_cache.contains(data_uuid):
                            log.debug("Adding cache item from list for " \
                                    + "job %s: %s - %s" \
                                    % (job_id, item.uuid, item.__class__))
                            remote_cache.add(data_uuid, item)
                        new_arg_list.append(data_uuid)
                    result_args.append(new_arg_list)
                else:
                    result_args.append(arg)
            tasks_queue.put((job_id, index, func, result_args))
            stats.add_queueing_time(__task_source_uuid,
                    time.time() - start_time)
        log.debug("Added %d tasks for job %s" % (len(args_list), job_id))
        result_buffer = {}
        index = 0
        cancelled = False
        # wait for all results of this job
        while (index < len(args_list)) and not cancelled:
            if callback and callback():
                # cancel requested
                cancelled = True
                break
            # re-inject stale tasks if necessary
            stale_task = pending_tasks.get_stale_task()
            if stale_task:
                stale_job_id, stale_task_id = stale_task[:2]
                if stale_job_id in __finished_jobs:
                    log.debug("Throwing away stale task of an old " + \
                            "job: %s" % stale_job_id)
                    pending_tasks.remove(stale_job_id, stale_task_id)
                elif stale_job_id == job_id:
                    log.debug("Reinjecting stale task: %s / %s" % \
                            (job_id, stale_task_id))
                    stale_func, stale_args = stale_task[2]
                    tasks_queue.put((job_id, stale_task_id, stale_func,
                            stale_args))
                    pending_tasks.remove(job_id, stale_task_id)
                else:
                    # non-local task
                    log.debug("Ignoring stale non-local task: %s / %s" \
                            % (stale_job_id, stale_task_id))
            try:
                result_job_id, task_id, result = results_queue.get(
                        timeout=1.0)
            except Queue.Empty:
                time.sleep(1.0)
                continue
            if result_job_id == job_id:
                log.debug("Received the result of a task: %s / %s" % \
                        (job_id, task_id))
                try:
                    if unordered:
                        # just return the values in any order
                        yield result
                        index += 1
                    else:
                        # return the results in order (based on task_id)
                        if task_id == index:
                            yield result
                            index += 1
                            while index in result_buffer.keys():
                                yield result_buffer[index]
                                del result_buffer[index]
                                index += 1
                        else:
                            result_buffer[task_id] = result
                except GeneratorExit:
                    # This exception is triggered when the caller stops
                    # requesting more items from the generator.
                    log.debug("Parallel processing cancelled: %s" % job_id)
                    _cleanup_job(job_id, tasks_queue, pending_tasks,
                            __finished_jobs)
                    # re-raise the GeneratorExit exception to finish destruction
                    raise
            elif result_job_id in __finished_jobs:
                # throw away this result of an old job
                log.debug("Throwing away one result of an old job: %s" % \
                        result_job_id)
            else:
                log.debug("Skipping result of non-local job: %s" % \
                        result_job_id)
                # put the result back to the queue for the next manager
                results_queue.put((result_job_id, task_id, result))
                # wait a little bit to get some idle CPU cycles
                time.sleep(0.2)
        _cleanup_job(job_id, tasks_queue, pending_tasks, __finished_jobs)
        if cancelled:
            log.debug("Parallel processing cancelled: %s" % job_id)
        else:
            log.debug("Parallel processing finished: %s" % job_id)
    else:
        for args in args_list:
            yield func(args)
Beispiel #51
0
    while len(header_lines) < 2:
        line = f.readline(200)
        if len(line) == 0:
            # empty line (not even a line-feed) -> EOF
            log.error("STLImporter: No valid lines found in '%s'" % filename)
            return None
        # ignore comment lines
        # note: partial comments (starting within a line) are not handled
        if not line.startswith(";"):
            header_lines.append(line)
    header = "".join(header_lines)
    # read byte 80 to 83 - they contain the "numfacets" value in binary format
    f.seek(80)
    numfacets = unpack("<I", f.read(4))[0]
    binary = False
    log.debug("STL import info: %s / %s / %s / %s" % \
            (f.len, numfacets, header.find("solid"), header.find("facet")))

    if f.len == (84 + 50*numfacets):
        binary = True
    elif header.find("solid") >= 0 and header.find("facet") >= 0:
        binary = False
        f.seek(0)
    else:
        log.error("STLImporter: STL binary/ascii detection failed")
        return None

    if use_kdtree:
        kdtree = PointKdtree([], 3, 1, epsilon)
    model = Model(use_kdtree)

    t = None
Beispiel #52
0
def generate_toolpath(model, tool_settings=None,
        bounds=None, direction="x",
        path_generator="DropCutter", path_postprocessor="ZigZagCutter",
        material_allowance=0, overlap_percent=0, step_down=0, engrave_offset=0,
        milling_style="ignore", pocketing_type="none",
        support_grid_type=None, support_grid_distance_x=None,
        support_grid_distance_y=None, support_grid_thickness=None,
        support_grid_height=None, support_grid_offset_x=None,
        support_grid_offset_y=None, support_grid_adjustments_x=None,
        support_grid_adjustments_y=None, support_grid_average_distance=None,
        support_grid_minimum_bridges=None, support_grid_length=None,
        calculation_backend=None, callback=None):
    """ abstract interface for generating a toolpath

    @type model: pycam.Geometry.Model.Model
    @value model: a model contains surface triangles or a contour
    @type tool_settings: dict
    @value tool_settings: contains at least the following keys (depending on
        the tool type):
        "shape": any of possible cutter shape (see "pycam.Cutters")
        "tool_radius": main radius of the tools
        "torus_radius": (only for ToroidalCutter) second toroidal radius
    @type bounds_low: tuple(float) | list(float)
    @value bounds_low: the lower processing boundary (used for the center of
        the tool) (order: minx, miny, minz)
    @type bounds_high: tuple(float) | list(float)
    @value bounds_high: the lower processing boundary (used for the center of
        the tool) (order: maxx, maxy, maxz)
    @type direction: str
    @value direction: any member of the DIRECTIONS set (e.g. "x", "y" or "xy")
    @type path_generator: str
    @value path_generator: any member of the PATH_GENERATORS set
    @type path_postprocessor: str
    @value path_postprocessor: any member of the PATH_POSTPROCESSORS set
    @type material_allowance: float
    @value material_allowance: the minimum distance between the tool and the model
    @type overlap_percent: int
    @value overlap_percent: the overlap between two adjacent tool paths (0..100) given in percent
    @type step_down: float
    @value step_down: maximum height of each layer (for PushCutter)
    @type engrave_offset: float
    @value engrave_offset: toolpath distance to the contour model
    @type support_grid_distance_x: float
    @value support_grid_distance_x: distance between support grid lines along x
    @type support_grid_distance_y: float
    @value support_grid_distance_y: distance between support grid lines along y
    @type support_grid_thickness: float
    @value support_grid_thickness: thickness of the support grid
    @type support_grid_height: float
    @value support_grid_height: height of the support grid
    @type support_grid_offset_x: float
    @value support_grid_offset_x: shift the support grid by this value along x
    @type support_grid_offset_y: float
    @value support_grid_offset_y: shift the support grid by this value along y
    @type support_grid_adjustments_x: list(float)
    @value support_grid_adjustments_x: manual adjustment of each x-grid bar
    @type support_grid_adjustments_y: list(float)
    @value support_grid_adjustments_y: manual adjustment of each y-grid bar
    @type calculation_backend: str | None
    @value calculation_backend: any member of the CALCULATION_BACKENDS set
        The default is the triangular collision detection.
    @rtype: pycam.Toolpath.Toolpath | str
    @return: the resulting toolpath object or an error string in case of invalid
        arguments
    """
    log.debug("Starting toolpath generation")
    step_down = number(step_down)
    engrave_offset = number(engrave_offset)
    if bounds is None:
        # no bounds were given - we use the boundaries of the model
        bounds = pycam.Toolpath.Bounds(pycam.Toolpath.Bounds.TYPE_CUSTOM,
                (model.minx, model.miny, model.minz),
                (model.maxx, model.maxy, model.maxz))
    bounds_low, bounds_high = bounds.get_absolute_limits()
    minx, miny, minz = [number(value) for value in bounds_low]
    maxx, maxy, maxz = [number(value) for value in bounds_high]
    # trimesh model or contour model?
    if isinstance(model, pycam.Geometry.Model.ContourModel):
        # contour model
        trimesh_models = []
        contour_model = model
    else:
        # trimesh model
        trimesh_models = [model]
        contour_model = None
    # Due to some weirdness the height of the drill must be bigger than the
    # object's size. Otherwise some collisions are not detected.
    cutter_height = 4 * abs(maxz - minz)
    cutter = pycam.Cutters.get_tool_from_settings(tool_settings, cutter_height)
    if isinstance(cutter, basestring):
        return cutter
    if not path_generator in ("EngraveCutter", "ContourFollow"):
        # material allowance is not available for these two strategies
        cutter.set_required_distance(material_allowance)
    # create the grid model if requested
    if (support_grid_type == "grid") \
            and (((not support_grid_distance_x is None) \
            or (not support_grid_distance_y is None)) \
            and (not support_grid_thickness is None)):
        # grid height defaults to the thickness
        if support_grid_height is None:
            support_grid_height = support_grid_thickness
        if (support_grid_distance_x < 0) or (support_grid_distance_y < 0):
            return "The distance of the support grid must be a positive value"
        if not ((support_grid_distance_x > 0) or (support_grid_distance_y > 0)):
            return "Both distance values for the support grid may not be " \
                    + "zero at the same time"
        if support_grid_thickness <= 0:
            return "The thickness of the support grid must be a positive value"
        if support_grid_height <= 0:
            return "The height of the support grid must be a positive value"
        if not callback is None:
            callback(text="Preparing support grid model ...")
        support_grid_model = pycam.Toolpath.SupportGrid.get_support_grid(
                minx, maxx, miny, maxy, minz, support_grid_distance_x,
                support_grid_distance_y, support_grid_thickness,
                support_grid_height, offset_x=support_grid_offset_x,
                offset_y=support_grid_offset_y,
                adjustments_x=support_grid_adjustments_x,
                adjustments_y=support_grid_adjustments_y)
        trimesh_models.append(support_grid_model)
    elif (support_grid_type in ("distributed_edges", "distributed_corners")) \
            and (not support_grid_average_distance is None) \
            and (not support_grid_thickness is None) \
            and (not support_grid_length is None):
        if support_grid_height is None:
            support_grid_height = support_grid_thickness
        if support_grid_minimum_bridges is None:
            support_grid_minimum_bridges = 2
        if support_grid_average_distance <= 0:
            return "The average support grid distance must be a positive value"
        if support_grid_minimum_bridges <= 0:
            return "The minimum number of bridged per polygon must be a " \
                    + "positive value"
        if support_grid_thickness <= 0:
            return "The thickness of the support grid must be a positive value"
        if support_grid_height <= 0:
            return "The height of the support grid must be a positive value"
        if not callback is None:
            callback(text="Preparing support grid model ...")
        # check which model to choose
        if contour_model:
            model = contour_model
        else:
            model = trimesh_models[0]
        start_at_corners = (support_grid_type == "distributed_corners")
        support_grid_model = pycam.Toolpath.SupportGrid.get_support_distributed(
                model, minz, support_grid_average_distance,
                support_grid_minimum_bridges, support_grid_thickness,
                support_grid_height, support_grid_length,
                bounds, start_at_corners=start_at_corners)
        trimesh_models.append(support_grid_model)
    elif (not support_grid_type) or (support_grid_type == "none"):
        pass
    else:
        return "Invalid support grid type selected: %s" % support_grid_type
    # Adapt the contour_model to the engraving offset. This offset is
    # considered to be part of the material_allowance.
    if contour_model and (engrave_offset != 0):
        if not callback is None:
            callback(text="Preparing contour model with offset ...")
        contour_model = contour_model.get_offset_model(engrave_offset,
                callback=callback)
        if not contour_model:
            return "Failed to calculate offset polygons"
        if not callback is None:
            # reset percentage counter after the contour model calculation
            callback(percent=0)
            if callback(text="Checking contour model with offset for " \
                    + "collisions ..."):
                # quit requested
                return None
            progress_callback = ProgressCounter(
                    len(contour_model.get_polygons()), callback).increment
        else:
            progress_callback = None
        result = contour_model.check_for_collisions(callback=progress_callback)
        if result is None:
            return None
        elif result:
            warning = "The contour model contains colliding line groups. " + \
                    "This can cause problems with an engraving offset.\n" + \
                    "A collision was detected at (%.2f, %.2f, %.2f)." % \
                    (result.x, result.y, result.z)
            log.warning(warning)
        else:
            # no collisions and no user interruption
            pass
    # check the pocketing type
    if contour_model and (pocketing_type != "none"):
        if not callback is None:
            callback(text="Generating pocketing polygons ...")
        pocketing_offset = cutter.radius * 1.8
        # TODO: this is an arbitrary limit to avoid infinite loops
        pocketing_limit = 1000
        base_polygons = []
        other_polygons = []
        if pocketing_type == "holes":
            # fill polygons with negative area
            for poly in contour_model.get_polygons():
                if poly.is_closed and not poly.is_outer():
                    base_polygons.append(poly)
                else:
                    other_polygons.append(poly)
        elif pocketing_type == "enclosed":
            # fill polygons with positive area
            pocketing_offset *= -1
            for poly in contour_model.get_polygons():
                if poly.is_closed and poly.is_outer():
                    base_polygons.append(poly)
                else:
                    other_polygons.append(poly)
        else:
            return "Unknown pocketing type given (not one of 'none', " + \
                    "'holes', 'enclosed'): %s" % str(pocketing_type)
        # For now we use only the polygons that do not surround eny other
        # polygons. Sorry - the pocketing is currently very simple ...
        base_filtered_polygons = []
        for candidate in base_polygons:
            if callback and callback():
                return "Interrupted"
            for other in other_polygons:
                if candidate.is_polygon_inside(other):
                    break
            else:
                base_filtered_polygons.append(candidate)
        # start the pocketing for all remaining polygons
        pocket_polygons = []
        for base_polygon in base_filtered_polygons:
            current_queue = [base_polygon]
            next_queue = []
            pocket_depth = 0
            while current_queue and (pocket_depth < pocketing_limit):
                if callback and callback():
                    return "Interrupted"
                for poly in current_queue:
                    result = poly.get_offset_polygons(pocketing_offset)
                    pocket_polygons.extend(result)
                    next_queue.extend(result)
                    pocket_depth += 1
                current_queue = next_queue
                next_queue = []
        # use a copy instead of the original
        contour_model = contour_model.get_copy()
        for pocket in pocket_polygons:
            contour_model.append(pocket)
    # limit the contour model to the bounding box
    if contour_model:
        # use minz/maxz of the contour model (in other words: ignore z)
        contour_model = contour_model.get_cropped_model(minx, maxx, miny, maxy,
                contour_model.minz, contour_model.maxz)
        if not contour_model:
            return "No part of the contour model is within the bounding box."
    physics = _get_physics(trimesh_models, cutter, calculation_backend)
    if isinstance(physics, basestring):
        return physics
    generator = _get_pathgenerator_instance(trimesh_models, contour_model,
            cutter, path_generator, path_postprocessor, physics,
            milling_style)
    if isinstance(generator, basestring):
        return generator
    overlap = overlap_percent / 100.0
    if (overlap < 0) or (overlap >= 1):
        return "Invalid overlap value (%f): should be greater or equal 0 " \
                + "and lower than 1"
    # factor "2" since we are based on radius instead of diameter
    line_stepping = 2 * number(tool_settings["tool_radius"]) * (1 - overlap)
    if path_generator == "PushCutter":
        step_width = None
    else:
        # the step_width is only used for the DropCutter
        step_width = tool_settings["tool_radius"] / 4
    if path_generator == "DropCutter":
        layer_distance = None
    else:
        layer_distance = step_down
    direction_dict = {"x": pycam.Toolpath.MotionGrid.GRID_DIRECTION_X,
            "y": pycam.Toolpath.MotionGrid.GRID_DIRECTION_Y,
            "xy": pycam.Toolpath.MotionGrid.GRID_DIRECTION_XY}
    milling_style_grid = {
            "ignore": pycam.Toolpath.MotionGrid.MILLING_STYLE_IGNORE,
            "conventional": pycam.Toolpath.MotionGrid.MILLING_STYLE_CONVENTIONAL,
            "climb": pycam.Toolpath.MotionGrid.MILLING_STYLE_CLIMB}
    if path_generator in ("DropCutter", "PushCutter"):
        motion_grid = pycam.Toolpath.MotionGrid.get_fixed_grid(bounds,
                layer_distance, line_stepping, step_width=step_width,
                grid_direction=direction_dict[direction],
                milling_style=milling_style_grid[milling_style])
        if path_generator == "DropCutter":
            toolpath = generator.GenerateToolPath(motion_grid, minz, maxz,
                    callback)
        else:
            toolpath = generator.GenerateToolPath(motion_grid, callback)
    elif path_generator == "EngraveCutter":
        if step_down > 0:
            dz = step_down
        else:
            dz = maxz - minz
        toolpath = generator.GenerateToolPath(minz, maxz, step_width, dz,
                callback)
    elif path_generator == "ContourFollow":
        if step_down > 0:
            dz = step_down
        else:
            dz = maxz - minz
            if dz <= 0:
                dz = 1
        toolpath = generator.GenerateToolPath(minx, maxx, miny, maxy, minz,
                maxz, dz, callback)
    elif path_generator == "Contour2dCutter": # JULIEN
        toolpath = generator.GenerateToolPath(callback)
    else:
        return "Invalid path generator (%s): not one of %s" \
                % (path_generator, PATH_GENERATORS)
    return toolpath
Beispiel #53
0
 def call_chain(self, name, *args, **kwargs):
     if name in self.chains:
         for data in self.chains[name]:
             data[CHAIN_FUNC_INDEX](*args, **kwargs)
     else:
         log.debug("Called an unknown chain: %s" % name)
Beispiel #54
0
 def revise_directions(self, callback=None):
     """ Go through all open polygons and try to merge them regardless of
     their direction. Afterwards all closed polygons are analyzed regarding
     their inside/outside relationships.
     Beware: never use this function if the direction of lines may not
     change.
     """
     number_of_initial_closed_polygons = len(
         [poly for poly in self.get_polygons() if poly.is_closed])
     open_polygons = [
         poly for poly in self.get_polygons() if not poly.is_closed
     ]
     if callback:
         progress_callback = pycam.Utils.ProgressCounter(
             2 * number_of_initial_closed_polygons + len(open_polygons),
             callback).increment
     else:
         progress_callback = None
     # try to connect all open polygons
     for poly in open_polygons:
         self._line_groups.remove(poly)
     poly_open_before = len(open_polygons)
     for poly in open_polygons:
         for line in poly.get_lines():
             self.append(line, allow_reverse=True)
         if progress_callback and progress_callback():
             return
     poly_open_after = len(
         [poly for poly in self.get_polygons() if not poly.is_closed])
     if poly_open_before != poly_open_after:
         log.info("Reduced the number of open polygons from %d down to %d",
                  poly_open_before, poly_open_after)
     else:
         log.debug("No combineable open polygons found")
     # auto-detect directions of closed polygons: inside and outside
     finished = []
     remaining_polys = [
         poly for poly in self.get_polygons() if poly.is_closed
     ]
     if progress_callback:
         # shift the counter back by the number of new closed polygons
         progress_callback(
             2 * (number_of_initial_closed_polygons - len(remaining_polys)))
     remaining_polys.sort(key=lambda poly: abs(poly.get_area()))
     while remaining_polys:
         # pick the largest polygon
         current = remaining_polys.pop()
         # start with the smallest finished polygon
         for comp, is_outer in finished:
             if comp.is_polygon_inside(current):
                 finished.insert(0, (current, not is_outer))
                 break
         else:
             # no enclosing polygon was found
             finished.insert(0, (current, True))
         if progress_callback and progress_callback():
             return
     # Adjust the directions of all polygons according to the result
     # of the previous analysis.
     change_counter = 0
     for polygon, is_outer in finished:
         if polygon.is_outer() != is_outer:
             polygon.reverse_direction()
             change_counter += 1
         if progress_callback and progress_callback():
             self.reset_cache()
             return
     log.info("The winding of %d polygon(s) was fixed.", change_counter)
     self.reset_cache()
Beispiel #55
0
def get_collision_waterline_of_triangle(model, cutter, up_vector, triangle, z):
    # TODO: there are problems with "material allowance > 0"
    plane = Plane(Point(0, 0, z), up_vector)
    if triangle.minz >= z:
        # no point of the triangle is below z
        # try all edges
        # Case (4)
        proj_points = []
        for p in triangle.get_points():
            proj_p = plane.get_point_projection(p)
            if not proj_p in proj_points:
                proj_points.append(proj_p)
        if len(proj_points) == 3:
            edges = []
            for index in range(3):
                edge = Line(proj_points[index - 1], proj_points[index])
                # the edge should be clockwise around the model
                if edge.dir.cross(triangle.normal).dot(up_vector) < 0:
                    edge = Line(edge.p2, edge.p1)
                edges.append((edge, proj_points[index - 2]))
            outer_edges = []
            for edge, other_point in edges:
                # pick only edges, where the other point is on the right side
                if other_point.sub(edge.p1).cross(edge.dir).dot(up_vector) > 0:
                    outer_edges.append(edge)
            if len(outer_edges) == 0:
                # the points seem to be an one line
                # pick the longest edge
                long_edge = edges[0][0]
                for edge, other_point in edges[1:]:
                    if edge.len > long_edge.len:
                        long_edge = edge
                outer_edges = [long_edge]
        else:
            edge = Line(proj_points[0], proj_points[1])
            if edge.dir.cross(triangle.normal).dot(up_vector) < 0:
                edge = Line(edge.p2, edge.p1)
            outer_edges = [edge]
    else:
        # some parts of the triangle are above and some below the cutter level
        # Cases (2a), (2b), (3a) and (3b)
        points_above = [plane.get_point_projection(p)
                for p in triangle.get_points() if p.z > z]
        waterline = plane.intersect_triangle(triangle)
        if waterline is None:
            if len(points_above) == 0:
                # the highest point of the triangle is at z
                outer_edges = []
            else:
                if abs(triangle.minz - z) < epsilon:
                    # This is just an accuracy issue (see the
                    # "triangle.minz >= z" statement above).
                    outer_edges = []
                elif not [p for p in triangle.get_points()
                        if p.z > z + epsilon]:
                    # same as above: fix for inaccurate floating calculations
                    outer_edges = []
                else:
                    # this should not happen
                    raise ValueError(("Could not find a waterline, but " \
                            + "there are points above z level (%f): " \
                            + "%s / %s") % (z, triangle, points_above))
        else:
            # remove points that are not part of the waterline
            points_above = [p for p in points_above
                    if (p != waterline.p1) and (p != waterline.p2)]
            if len(points_above) == 0:
                # part of case (2a)
                outer_edges = [waterline]
            elif len(points_above) == 1:
                other_point = points_above[0]
                dot = other_point.sub(waterline.p1).cross(waterline.dir).dot(
                        up_vector)
                if dot > 0:
                    # Case (2b)
                    outer_edges = [waterline]
                elif dot < 0:
                    # Case (3b)
                    edges = []
                    edges.append(Line(waterline.p1, other_point))
                    edges.append(Line(waterline.p2, other_point))
                    outer_edges = []
                    for edge in edges:
                        if edge.dir.cross(triangle.normal).dot(up_vector) < 0:
                            outer_edges.append(Line(edge.p2, edge.p1))
                        else:
                            outer_edges.append(edge)
                else:
                    # the three points are on one line
                    # part of case (2a)
                    edges = []
                    edges.append(waterline)
                    edges.append(Line(waterline.p1, other_point))
                    edges.append(Line(waterline.p2, other_point))
                    edges.sort(key=lambda x: x.len)
                    edge = edges[-1]
                    if edge.dir.cross(triangle.normal).dot(up_vector) < 0:
                        outer_edges = [Line(edge.p2, edge.p1)]
                    else:
                        outer_edges = [edge]
            else:
                # two points above
                other_point = points_above[0]
                dot = other_point.sub(waterline.p1).cross(waterline.dir).dot(
                        up_vector)
                if dot > 0:
                    # Case (2b)
                    # the other two points are on the right side
                    outer_edges = [waterline]
                elif dot < 0:
                    # Case (3a)
                    edge = Line(points_above[0], points_above[1])
                    if edge.dir.cross(triangle.normal).dot(up_vector) < 0:
                        outer_edges = [Line(edge.p2, edge.p1)]
                    else:
                        outer_edges = [edge]
                else:
                    edges = []
                    # pick the longest combination of two of these points
                    # part of case (2a)
                    # TODO: maybe we should use the waterline instead?
                    # (otherweise the line could be too long and thus
                    # connections to the adjacent waterlines are not discovered?
                    # Test this with an appropriate test model.)
                    points = [waterline.p1, waterline.p2] + points_above
                    for p1 in points:
                        for p2 in points:
                            if not p1 is p2:
                                edges.append(Line(p1, p2))
                    edges.sort(key=lambda x: x.len)
                    edge = edges[-1]
                    if edge.dir.cross(triangle.normal).dot(up_vector) < 0:
                        outer_edges = [Line(edge.p2, edge.p1)]
                    else:
                        outer_edges = [edge]
    # calculate the maximum diagonal length within the model
    x_dim = abs(model.maxx - model.minx)
    y_dim = abs(model.maxy - model.miny)
    z_dim = abs(model.maxz - model.minz)
    max_length = sqrt(x_dim ** 2 + y_dim ** 2 + z_dim ** 2)
    result = []
    for edge in outer_edges:
        direction = up_vector.cross(edge.dir).normalized()
        if direction is None:
            continue
        direction = direction.mul(max_length)
        edge_dir = edge.p2.sub(edge.p1)
        # TODO: Adapt the number of potential starting positions to the length
        # of the line. Don't use 0.0 and 1.0 - this could result in ambiguous
        # collisions with triangles sharing these vertices.
        for factor in (0.5, epsilon, 1.0 - epsilon, 0.25, 0.75):
            start = edge.p1.add(edge_dir.mul(factor))
            # We need to use the triangle collision algorithm here - because we
            # need the point of collision in the triangle.
            collisions = get_free_paths_triangles([model], cutter, start,
                    start.add(direction), return_triangles=True)
            for index, coll in enumerate(collisions):
                if (index % 2 == 0) and (not coll[1] is None) \
                        and (not coll[2] is None) \
                        and (coll[0].sub(start).dot(direction) > 0):
                    cl, hit_t, cp = coll
                    break
            else:
                log.debug("Failed to detect any collision: " \
                        + "%s / %s -> %s" % (edge, start, direction))
                continue
            proj_cp = plane.get_point_projection(cp)
            # e.g. the Spherical Cutter often does not collide exactly above
            # the potential collision line.
            # TODO: maybe an "is cp inside of the triangle" check would be good?
            if (triangle is hit_t) or (edge.is_point_inside(proj_cp)):
                result.append((cl, edge))
                # continue with the next outer_edge
                break
    # Don't check triangles again that are completely above the z level and
    # did not return any collisions.
    if (len(result) == 0) and (triangle.minz > z):
        # None indicates that the triangle needs no further evaluation
        return None
    return result
Beispiel #56
0
def _handle_tasks(tasks, results, stats, cache, pending_tasks, closing):
    global __multiprocessing
    name = __multiprocessing.current_process().name
    local_cache = ProcessDataCache()
    timeout_limit = 60
    timeout_counter = 0
    last_worker_notification = 0
    log.debug("Worker thread started: %s" % name)
    try:
        while (timeout_counter < timeout_limit) and not closing.get():
            if last_worker_notification + 30 < time.time():
                stats.worker_notification(name)
                last_worker_notification = time.time()
            start_time = time.time()
            try:
                job_id, task_id, func, args = tasks.get(timeout=0.2)
            except Queue.Empty:
                time.sleep(1.8)
                timeout_counter += 1
                continue
            # TODO: if the client aborts/disconnects between "tasks.get" and
            # "pending_tasks.add", the task is lost. We should better use some
            # backup.
            pending_tasks.add(job_id, task_id, (func, args))
            log.debug("Worker %s processes %s / %s" % (name, job_id, task_id))
            # reset the timeout counter, if we found another item in the queue
            timeout_counter = 0
            real_args = []
            for arg in args:
                if isinstance(arg, ProcessDataCacheItemID):
                    try:
                        value = local_cache.get(arg)
                    except KeyError:
                        # TODO: we will break hard, if the item is expired
                        value = cache.get(arg)
                        local_cache.add(arg, value)
                    real_args.append(value)
                elif isinstance(arg, list) and [True for item in arg \
                        if isinstance(item, ProcessDataCacheItemID)]:
                    # check if any item in the list is cacheable
                    args_list = []
                    for item in arg:
                        if isinstance(item, ProcessDataCacheItemID):
                            try:
                                value = local_cache.get(item)
                            except KeyError:
                                value = cache.get(item)
                                local_cache.add(item, value)
                            args_list.append(value)
                        else:
                            args_list.append(item)
                    real_args.append(args_list)
                else:
                    real_args.append(arg)
            stats.add_transfer_time(name, time.time() - start_time)
            start_time = time.time()
            results.put((job_id, task_id, func(real_args)))
            pending_tasks.remove(job_id, task_id)
            stats.add_process_time(name, time.time() - start_time)
    except KeyboardInterrupt:
        pass
    log.debug("Worker thread finished after %d seconds of inactivity: %s" \
            % (timeout_counter, name))