コード例 #1
0
    def replaceTagMatch(self, m):
        import time

        pre = m.group(1)
        tag = m.group(2)

        if tag == 'time':
            return pre + time.strftime('%H:%M:%S')
        if tag == 'date':
            return pre + time.strftime('%d-%m-%Y')
        if tag == 'day':
            return pre + ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'][int(
                time.strftime('%w'))]
        if tag == 'profile_string':
            return pre + 'CURA_OCTO_PROFILE_STRING:%s' % (
                self.get_profile_string())

        if pre == 'F' and tag == 'max_z_speed':
            f = self.get_float("travel_speed") * 60
        elif pre == 'F' and tag in [
                'print_speed', 'retraction_speed', 'travel_speed',
                'bottom_layer_speed', 'cool_min_feedrate'
        ]:
            f = self.get_float(tag) * 60
        elif self.get(tag):
            f = self.get(tag)
        else:
            return '%s?%s?' % (pre, tag)

        if (f % 1) == 0:
            return pre + to_unicode(int(f))

        return pre + to_unicode(f)
コード例 #2
0
 def on_firmware_info_received(self, comm_instance, firmware_name,
                               firmware_data):
     self._run_checks(
         "m115", to_unicode(firmware_name, errors="replace"),
         dict((to_unicode(key, errors="replace"),
               to_unicode(value, errors="replace"))
              for key, value in firmware_data.items()))
     self._scan_received = False
コード例 #3
0
    def execute(self, *args):
        if self.refresh:
            self.trigger_refresh()

        if self._command is None:
            raise UnknownPip()

        command = [self._command] + list(args)
        if self._use_sudo:
            command = ["sudo"] + command

        joined_command = " ".join(command)
        self._logger.debug(u"Calling: {}".format(joined_command))
        self.on_log_call(joined_command)

        p = sarge.run(command,
                      async=True,
                      stdout=sarge.Capture(),
                      stderr=sarge.Capture())
        p.wait_events()

        all_stdout = []
        all_stderr = []
        try:
            while p.returncode is None:
                line = p.stderr.readline(timeout=0.5)
                if line:
                    line = to_unicode(line, errors="replace")
                    self._log_stderr(line)
                    all_stderr.append(line)

                line = p.stdout.readline(timeout=0.5)
                if line:
                    line = to_unicode(line, errors="replace")
                    self._log_stdout(line)
                    all_stdout.append(line)

                p.commands[0].poll()

        finally:
            p.close()

        stderr = p.stderr.text
        if stderr:
            split_lines = stderr.split("\n")
            self._log_stderr(*split_lines)
            all_stderr += split_lines

        stdout = p.stdout.text
        if stdout:
            split_lines = stdout.split("\n")
            self._log_stdout(*split_lines)
            all_stdout += split_lines

        return p.returncode, all_stdout, all_stderr
コード例 #4
0
ファイル: views.py プロジェクト: Jaesin/OctoPrint
	def wizard_key_extractor(d, k):
		if d[1].get("_key", None) == "plugin_corewizard_acl":
			# Ultra special case - we MUST always have the ACL wizard first since otherwise any steps that follow and
			# that require to access APIs to function will run into errors since those APIs won't work before ACL
			# has been configured. See also #2140
			return u"0:{}".format(to_unicode(d[0]))
		elif d[1].get("mandatory", False):
			# Other mandatory steps come before the optional ones
			return u"1:{}".format(to_unicode(d[0]))
		else:
			# Finally everything else
			return u"2:{}".format(to_unicode(d[0]))
コード例 #5
0
	def wizard_key_extractor(d, k):
		if d[1].get("_key", None) == "plugin_corewizard_acl":
			# Ultra special case - we MUST always have the ACL wizard first since otherwise any steps that follow and
			# that require to access APIs to function will run into errors since those APIs won't work before ACL
			# has been configured. See also #2140
			return u"0:{}".format(to_unicode(d[0]))
		elif d[1].get("mandatory", False):
			# Other mandatory steps come before the optional ones
			return u"1:{}".format(to_unicode(d[0]))
		else:
			# Finally everything else
			return u"2:{}".format(to_unicode(d[0]))
コード例 #6
0
    def get_gcode_template(self, key, extruder_count=1):
        if key in self._profile:
            gcode = self._profile[key]
        else:
            gcode = defaults[key]

        if key in ("start_gcode", "end_gcode"):
            return self.regex_broken_replacements.sub(
                "{\\1}", to_unicode(gcode[extruder_count - 1]))
        else:
            return self.regex_broken_replacements.sub("{\\1}",
                                                      to_unicode(gcode))
コード例 #7
0
ファイル: pip.py プロジェクト: MaxOLydian/OctoPrint
	def execute(self, *args):
		if self.refresh:
			self.trigger_refresh()

		if self._command is None:
			raise UnknownPip()

		command = [self._command] + list(args)
		if self._use_sudo:
			command = ["sudo"] + command

		joined_command = " ".join(command)
		self._logger.debug(u"Calling: {}".format(joined_command))
		self.on_log_call(joined_command)

		p = sarge.run(command, async=True, stdout=sarge.Capture(), stderr=sarge.Capture())
		p.wait_events()

		all_stdout = []
		all_stderr = []
		try:
			while p.returncode is None:
				line = p.stderr.readline(timeout=0.5)
				if line:
					line = to_unicode(line, errors="replace")
					self._log_stderr(line)
					all_stderr.append(line)

				line = p.stdout.readline(timeout=0.5)
				if line:
					line = to_unicode(line, errors="replace")
					self._log_stdout(line)
					all_stdout.append(line)

				p.commands[0].poll()

		finally:
			p.close()

		stderr = p.stderr.text
		if stderr:
			split_lines = stderr.split("\n")
			self._log_stderr(*split_lines)
			all_stderr += split_lines

		stdout = p.stdout.text
		if stdout:
			split_lines = stdout.split("\n")
			self._log_stdout(*split_lines)
			all_stdout += split_lines

		return p.returncode, all_stdout, all_stderr
コード例 #8
0
def prefix_multilines(text, prefix=": "):
    # type: (Union[unicode, bytes], unicode) -> unicode
    from octoprint.util import to_unicode

    lines = text.splitlines()
    if not lines:
        return ""

    if len(lines) == 1:
        return to_unicode(lines[0])

    return (to_unicode(lines[0]) + "\n" +
            "\n".join(map(lambda line: prefix + to_unicode(line), lines[1:])))
コード例 #9
0
ファイル: __init__.py プロジェクト: tinkercnc/OctoPrint
def prefix_multilines(text: str, prefix: str = ": ") -> str:
    from octoprint.util import to_unicode

    lines = text.splitlines()
    if not lines:
        return ""

    if len(lines) == 1:
        return to_unicode(lines[0])

    return (
        to_unicode(lines[0])
        + "\n"
        + "\n".join(map(lambda line: prefix + to_unicode(line), lines[1:]))
    )
コード例 #10
0
        def ensure_gst_process():
            ring_buffer = deque(maxlen=50)
            gst_backoff = ExpoBackoff(60 * 10)
            while True:
                err = to_unicode(self.gst_proc.stderr.readline(),
                                 errors='replace')
                if not err:  # EOF when process ends?
                    if self.shutting_down:
                        return

                    returncode = self.gst_proc.wait()
                    msg = 'STDERR:\n{}\n'.format('\n'.join(ring_buffer))
                    _logger.debug(msg)
                    self.sentry.captureMessage(
                        'GST exited un-expectedly. Exit code: {}'.format(
                            returncode),
                        tags=get_tags())
                    gst_backoff.more(
                        'GST exited un-expectedly. Exit code: {}'.format(
                            returncode))

                    ring_buffer = deque(maxlen=50)
                    gst_cmd = os.path.join(GST_DIR, 'run_gst.sh')
                    _logger.debug('Popen: {}'.format(gst_cmd))
                    self.gst_proc = subprocess.Popen(gst_cmd,
                                                     stdout=subprocess.PIPE,
                                                     stderr=subprocess.PIPE)
                else:
                    ring_buffer.append(err)
コード例 #11
0
ファイル: commandline.py プロジェクト: tinkercnc/OctoPrint
def clean_ansi(line: Union[str, bytes]) -> Union[str, bytes]:
    """
    Removes ANSI control codes from ``line``.

    Note: This function also still supports an input of ``bytes``, leading to an
    ``output`` of ``bytes``. This if for reasons of backwards compatibility only,
    should no longer be used and considered to be deprecated and to be removed in
    a future version of OctoPrint. A warning will be logged.

    Parameters:
        line (str or bytes): the line to process

    Returns:
        (str or bytes) The line without any ANSI control codes

    .. changed:: 1.8.0

       Usage as ``clean_ansi(line: bytes) -> bytes`` is now deprecated and will be removed
       in a future version of OctoPrint.
    """
    # TODO: bytes support is deprecated, remove in 2.0.0
    if isinstance(line, bytes):
        warnings.warn(
            "Calling clean_ansi with bytes is deprecated, call with str instead",
            DeprecationWarning,
        )
        return to_bytes(_ANSI_REGEX.sub("", to_unicode(line)))
    return _ANSI_REGEX.sub("", line)
コード例 #12
0
ファイル: commandline.py プロジェクト: tinkercnc/OctoPrint
 def process_lines(lines, logger):
     if not lines:
         return []
     processed = self._preprocess_lines(
         *map(lambda x: to_unicode(x, errors="replace"), lines))
     logger(*processed)
     return list(processed)
コード例 #13
0
ファイル: virtual.py プロジェクト: foosel/OctoPrint
	def write(self, data):
		data = to_bytes(data, errors="replace")
		u_data = to_unicode(data, errors="replace")

		if self._debug_awol:
			return len(data)

		if self._debug_drop_connection:
			self._logger.info("Debug drop of connection requested, raising SerialTimeoutException")
			raise SerialTimeoutException()

		with self._incoming_lock:
			if self.incoming is None or self.outgoing is None:
				return 0

			if "M112" in data and self._supportM112:
				self._seriallog.info(u"<<< {}".format(u_data))
				self._kill()
				return len(data)

			try:
				written = self.incoming.put(data, timeout=self._write_timeout, partial=True)
				self._seriallog.info(u"<<< {}".format(u_data))
				return written
			except queue.Full:
				self._logger.info("Incoming queue is full, raising SerialTimeoutException")
				raise SerialTimeoutException()
コード例 #14
0
    def sanitize(text, safe_chars="-_.", demoji=True):
        """
        Sanitizes text by running it through slugify and optionally emoji translating.
        Examples:
        >>> sanitize("Hello World!") # doctest: +ALLOW_UNICODE
        'Hello-World'
        >>> sanitize("Hello World!", safe_chars="-_. ") # doctest: +ALLOW_UNICODE
        'Hello World'
        >>> sanitize("\u2764") # doctest: +ALLOW_UNICODE
        'red_heart'
        >>> sanitize("\u2764\ufe00") # doctest: +ALLOW_UNICODE
        'red_heart'
        >>> sanitize("\u2764", demoji=False) # doctest: +ALLOW_UNICODE
        ''
        Args:
            text: the text to sanitize
            safe_chars: characters to consider safe and to keep after sanitization
            demoji: whether to also convert emoji to text
        Returns: the sanitized text
        """
        slugify = _SLUGIFIES.get(safe_chars)
        if slugify is None:
            slugify = Slugify()
            slugify.safe_chars = safe_chars
            _SLUGIFIES[safe_chars] = slugify

        text = to_unicode(text)
        if demoji:
            text = remove_unicode_variations(text)
            text = demojize(text, delimiters=("", ""))
        return slugify(text)
コード例 #15
0
def get_user_for_authorization_header(header):
    if not settings().getBoolean(["accessControl", "trustBasicAuthentication"
                                  ]):
        return None

    if header is None:
        return None

    if not header.startswith("Basic "):
        # we currently only support Basic Authentication
        return None

    header = header.replace("Basic ", "", 1)
    try:
        header = to_unicode(base64.b64decode(header))
    except TypeError:
        return None

    name, password = header.split(":", 1)
    if not octoprint.server.userManager.enabled:
        return None

    user = octoprint.server.userManager.find_user(userid=name)
    if settings().getBoolean([
            "accessControl", "checkBasicAuthenticationPassword"
    ]) and not octoprint.server.userManager.check_password(name, password):
        # password check enabled and password don't match
        return None

    return user
コード例 #16
0
 def split_path(self, path):
     path = to_unicode(path)
     split = path.split(u"/")
     if len(split) == 1:
         return u"", split[0]
     else:
         return self.join_path(*split[:-1]), split[-1]
コード例 #17
0
    def create_backup(self, name, data, backup_time=None):
        """
        Creates a backup on the filesystem of data, with a name & time
        :param name: what to call the backup
        :param data: json data from the EEPROM
        :param backup_time: override the default time of 'now'
        :return: None
        """
        for backup in self.metadata.backups:
            if backup["name"] == name:
                raise BackupNameTakenError(
                    "Backup {} already exists!".format(name))

        if not backup_time:
            now = time.strftime("%Y-%m-%d %H:%M:%S")
        else:
            now = backup_time

        with io.open(self._get_backup_filename(name), "wt") as backup_file:
            backup_file.write(
                to_unicode(
                    json.dumps({
                        "name": name,
                        "time": now,
                        "version": BACKUP_VERSION,
                        "data": data,
                    })))

        self.metadata.add_backup(name, now)
コード例 #18
0
ファイル: net.py プロジェクト: osubuu/OctoPrint
def resolve_host(host):
    import socket
    from octoprint.util import to_unicode

    try:
        return [to_unicode(x[4][0]) for x in socket.getaddrinfo(host, 80)]
    except Exception:
        return []
コード例 #19
0
 def save_metadata(self):
     """
     Write metadata to disk
     :return: None
     """
     data = {"version": self.version, "backups": self.backups}
     with io.open(self.path, "wt", encoding="utf-8") as metadata_file:
         metadata_file.write(to_unicode(json.dumps(data)))
コード例 #20
0
 def cancel_slicing(self, machinecode_path):
     with self._job_mutex:
         if machinecode_path in self._slicing_commands:
             self._cancelled_jobs.append(machinecode_path)
             command = self._slicing_commands[machinecode_path]
             if command is not None:
                 command.terminate()
             self._logger.info(u"Cancelled slicing of {}".format(
                 to_unicode(machinecode_path, errors="replace")))
コード例 #21
0
ファイル: __init__.py プロジェクト: ByReaL/OctoPrint
	def cancel_slicing(self, machinecode_path):
		with self._job_mutex:
			if machinecode_path in self._slicing_commands:
				self._cancelled_jobs.append(machinecode_path)
				command = self._slicing_commands[machinecode_path]
				if command is not None:
					command.terminate()
				self._logger.info(u"Cancelled slicing of {}"
				                  .format(to_unicode(machinecode_path, errors="replace")))
コード例 #22
0
        def monitor_ffmpeg_process():  # It's pointless to restart ffmpeg without calling pi_camera.record with the new input. Just capture unexpected exits not to see if it's a big problem
            ring_buffer = deque(maxlen=50)
            while True:
                err = to_unicode(self.ffmpeg_proc.stderr.readline())
                if not err:  # EOF when process ends?
                    if self.shutting_down:
                        return

                    returncode = self.ffmpeg_proc.wait()
                    msg = 'STDERR:\n{}\n'.format('\n'.join(ring_buffer))
                    _logger.error(msg)
                    self.sentry.captureMessage('ffmpeg quit! This should not happen. Exit code: {}'.format(returncode), tags=get_tags())
                    return
                else:
                    ring_buffer.append(err)
コード例 #23
0
        def run_janus():
            janus_backoff = ExpoBackoff(60*1)
            janus_cmd = os.path.join(JANUS_DIR, 'run_janus.sh')
            _logger.debug('Popen: {}'.format(janus_cmd))
            self.janus_proc = subprocess.Popen(janus_cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)

            while not self.shutting_down:
                line = to_unicode(self.janus_proc.stdout.readline())
                if line:
                    _logger.debug('JANUS: ' + line)
                elif not self.shutting_down:
                    self.janus_proc.wait()
                    msg = 'Janus quit! This should not happen. Exit code: {}'.format(self.janus_proc.returncode)
                    self.sentry.captureMessage(msg, tags=get_tags())
                    janus_backoff.more(msg)
                    self.janus_proc = subprocess.Popen(janus_cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
コード例 #24
0
    def _preprocess(text):
        """
		Strips ANSI and VT100 cursor control characters from line and makes sure it's a unicode.

		Parameters:
		    text (str or unicode): The text to process

		Returns:
		    (unicode) The processed text as a unicode, stripped of ANSI and VT100 cursor show/hide codes

		Example::

		    >>> text = b'some text with some\x1b[?25h ANSI codes for \x1b[31mred words\x1b[39m and\x1b[?25l also some cursor control codes'
		    >>> PipCaller._preprocess(text)
		    u'some text with some ANSI codes for red words and also some cursor control codes'
		"""
        return to_unicode(clean_ansi(text))
コード例 #25
0
ファイル: pip.py プロジェクト: Jaesin/OctoPrint
	def _preprocess(text):
		"""
		Strips ANSI and VT100 cursor control characters from line and makes sure it's a unicode.

		Parameters:
		    text (str or unicode): The text to process

		Returns:
		    (unicode) The processed text as a unicode, stripped of ANSI and VT100 cursor show/hide codes

		Example::

		    >>> text = b'some text with some\x1b[?25h ANSI codes for \x1b[31mred words\x1b[39m and\x1b[?25l also some cursor control codes'
		    >>> PipCaller._preprocess(text)
		    u'some text with some ANSI codes for red words and also some cursor control codes'
		"""
		return to_unicode(clean_ansi(text))
コード例 #26
0
ファイル: files.py プロジェクト: lciscon/OctoPrint
def sanitize_filename(name, really_universal=False):
    """
    Sanitizes the provided filename. Implementation differs between Python versions.

    On Python 2, the name will be ASCII-fied, using ``octoprint.util.text.sanitize`` with
    safe chars ``-_.()[] `` and all spaces replaced by ``_``.

    On Python 3, ``pathvalidate.sanitize_filename`` will be used instead, leaving the
    name as intact as possible while still being a legal file name under all operating
    systems.

    In all cases, a single leading ``.`` will be removed (as it denotes hidden files
    on *nix).

    Args:
        name:          The file name to sanitize. Only the name, no path elements.
        really_universal: If ``True``, the Python 2 method of sanitization will always
                          be used. Defaults to ``False``.

    Returns:
        the sanitized file name
    """
    from octoprint.util import to_unicode

    name = to_unicode(name)

    if name is None:
        return None

    if "/" in name or "\\" in name:
        raise ValueError("name must not contain / or \\")

    try:
        from pathvalidate import sanitize_filename as sfn
    except ImportError:
        sfn = _sfn_really_universal

    if really_universal:
        result = _sfn_really_universal(name)
    else:
        result = sfn(name)

    return result.lstrip(".")
コード例 #27
0
    def get_gcode(self, key, extruder_count=1):
        prefix = ""
        postfix = ""

        if self.get("gcode_flavor") == GcodeFlavors.ULTIGCODE:
            if key == "end_gcode":
                return "M25 ;Stop reading from this point on.\n;CURA_OCTO_PROFILE_STRING:%s\n" % (
                    self.get_profile_string())
            return ""

        if key == "start_gcode":
            contents = self.get_gcode_template("start_gcode",
                                               extruder_count=extruder_count)
            prefix += self.get_start_gcode_prefix(
                contents, extruder_count=extruder_count)

        else:
            contents = self.get_gcode_template(key,
                                               extruder_count=extruder_count)

        return to_unicode(prefix + re.sub(
            "(.)\{([^\}]*)\}", self.replaceTagMatch, contents).rstrip() +
                          '\n' + postfix).strip() + '\n'
コード例 #28
0
ファイル: virtual.py プロジェクト: foosel/OctoPrint
	def readline(self):
		if self._debug_awol:
			time.sleep(self._read_timeout)
			return ""

		if self._debug_drop_connection:
			raise SerialTimeoutException()

		if self._debug_sleep > 0:
			# if we are supposed to sleep, we sleep not longer than the read timeout
			# (and then on the next call sleep again if there's time to sleep left)
			sleep_for = min(self._debug_sleep, self._read_timeout)
			self._debug_sleep -= sleep_for
			time.sleep(sleep_for)

			if self._debug_sleep > 0:
				# we slept the full read timeout, return an empty line
				return ""

			# otherwise our left over timeout is the read timeout minus what we already
			# slept for
			timeout = self._read_timeout - sleep_for

		else:
			# use the full read timeout as timeout
			timeout = self._read_timeout

		try:
			# fetch a line from the queue, wait no longer than timeout
			line = to_unicode(self.outgoing.get(timeout=timeout), errors="replace")
			self._seriallog.info(u">>> {}".format(line.strip()))
			self.outgoing.task_done()
			return to_bytes(line, errors="replace")
		except queue.Empty:
			# queue empty? return empty line
			return ""
コード例 #29
0
ファイル: standard.py プロジェクト: mrbeam/OctoPrint
	def on_comm_log(self, message):
		"""
		 Callback method for the comm object, called upon log output.
		"""
		self._addLog(to_unicode(message, "utf-8", errors="replace"))
コード例 #30
0
 def on_gcode_received(self, comm_instance, line, *args, **kwargs):
     if self._scan_received:
         self._run_checks("received", to_unicode(line, errors="replace"))
     return line
コード例 #31
0
ファイル: __init__.py プロジェクト: ByReaL/OctoPrint
	def do_slice(self, model_path, printer_profile, machinecode_path=None, profile_path=None, position=None,
	             on_progress=None, on_progress_args=None, on_progress_kwargs=None):
		try:
			with self._job_mutex:
				if not profile_path:
					profile_path = self._settings.get(["default_profile"])
				if not machinecode_path:
					path, _ = os.path.splitext(model_path)
					machinecode_path = path + ".gco"

				if position and isinstance(position, dict) and "x" in position and "y" in position:
					pos_x = position["x"]
					pos_y = position["y"]
				else:
					pos_x = None
					pos_y = None

				if on_progress:
					if not on_progress_args:
						on_progress_args = ()
					if not on_progress_kwargs:
						on_progress_kwargs = dict()

				self._cura_logger.info(u"### Slicing {} to {} using profile stored at {}"
				                       .format(to_unicode(model_path, errors="replace"),
				                               to_unicode(machinecode_path, errors="replace"),
				                               to_unicode(profile_path, errors="replace")))

				executable = normalize_path(self._settings.get(["cura_engine"]))
				if not executable:
					return False, u"Path to CuraEngine is not configured "

				working_dir = os.path.dirname(executable)

				slicing_profile = Profile(self._load_profile(profile_path), printer_profile, pos_x, pos_y)

				# NOTE: We can assume an extruder count of 1 here since the only way we currently
				# support dual extrusion in this implementation is by using the second extruder for support (which
				# the engine conversion will automatically detect and adapt accordingly).
				#
				# We currently do only support STL files as sliceables, which by default can only contain one mesh,
				# so no risk of having to slice multi-objects at the moment, which would necessitate a full analysis
				# of the objects to slice to determine amount of needed extruders to use here. If we ever decide to
				# also support dual extrusion slicing (including composition from multiple STLs or support for OBJ or
				# AMF files and the like), this code needs to be adapted!
				#
				# The extruder count is needed to decide which start/end gcode will be used from the Cura profile.
				# Stock Cura implementation counts the number of objects in the scene for this (and also takes a look
				# at the support usage, like the engine conversion here does). We only ever have one object.
				engine_settings = self._convert_to_engine(profile_path, printer_profile,
				                                          pos_x=pos_x, pos_y=pos_y,
				                                          used_extruders=1)

				# Start building the argument list for the CuraEngine command execution
				args = [executable, '-v', '-p']

				# Add the settings (sorted alphabetically) to the command
				for k, v in sorted(engine_settings.items(), key=lambda s: s[0]):
					args += ["-s", "%s=%s" % (k, str(v))]
				args += ["-o", machinecode_path, model_path]

				self._logger.info(u"Running {!r} in {}".format(u" ".join(map(lambda x: to_unicode(x, errors="replace"),
				                                                             args)),
				                                               working_dir))

				import sarge
				p = sarge.run(args, cwd=working_dir, async=True, stdout=sarge.Capture(), stderr=sarge.Capture())
				p.wait_events()
				self._slicing_commands[machinecode_path] = p.commands[0]

			try:
				layer_count = None
				step_factor = dict(
					inset=0,
					skin=1,
					export=2
				)
				analysis = None
				while p.returncode is None:
					line = p.stderr.readline(timeout=0.5)
					if not line:
						p.commands[0].poll()
						continue

					line = to_unicode(line, errors="replace")
					self._cura_logger.debug(line.strip())

					if on_progress is not None:
						# The Cura slicing process has three individual steps, each consisting of <layer_count> substeps:
						#
						#   - inset
						#   - skin
						#   - export
						#
						# So each layer will be processed three times, once for each step, resulting in a total amount of
						# substeps of 3 * <layer_count>.
						#
						# The CuraEngine reports the calculated layer count and the continuous progress on stderr.
						# The layer count gets reported right at the beginning in a line of the format:
						#
						#   Layer count: <layer_count>
						#
						# The individual progress per each of the three steps gets reported on stderr in a line of
						# the format:
						#
						#   Progress:<step>:<current_layer>:<layer_count>
						#
						# Thus, for determining the overall progress the following formula applies:
						#
						#   progress = <step_factor> * <layer_count> + <current_layer> / <layer_count> * 3
						#
						# with <step_factor> being 0 for "inset", 1 for "skin" and 2 for "export".

						if line.startswith(u"Layer count:") and layer_count is None:
							try:
								layer_count = float(line[len(u"Layer count:"):].strip())
							except:
								pass

						elif line.startswith(u"Progress:"):
							split_line = line[len(u"Progress:"):].strip().split(":")
							if len(split_line) == 3:
								step, current_layer, _ = split_line
								try:
									current_layer = float(current_layer)
								except:
									pass
								else:
									if not step in step_factor:
										continue
									on_progress_kwargs["_progress"] = (step_factor[step] * layer_count + current_layer) / (layer_count * 3)
									on_progress(*on_progress_args, **on_progress_kwargs)

						elif line.startswith(u"Print time:"):
							try:
								print_time = int(line[len(u"Print time:"):].strip())
								if analysis is None:
									analysis = dict()
								analysis["estimatedPrintTime"] = print_time
							except:
								pass

						# Get the filament usage

						elif line.startswith(u"Filament:") or line.startswith(u"Filament2:"):
							if line.startswith(u"Filament:"):
								filament_str = line[len(u"Filament:"):].strip()
								tool_key = "tool0"
							else:
								filament_str = line[len(u"Filament2:"):].strip()
								tool_key = "tool1"

							try:
								filament = int(filament_str)

								if analysis is None:
									analysis = dict()
								if not "filament" in analysis:
									analysis["filament"] = dict()
								if not tool_key in analysis["filament"]:
									analysis["filament"][tool_key] = dict()

								if slicing_profile.get_float("filament_diameter") is not None:
									if slicing_profile.get("gcode_flavor") == GcodeFlavors.ULTIGCODE or slicing_profile.get("gcode_flavor") == GcodeFlavors.REPRAP_VOLUME:
										analysis["filament"][tool_key] = _get_usage_from_volume(filament, slicing_profile.get_float("filament_diameter"))
									else:
										analysis["filament"][tool_key] = _get_usage_from_length(filament, slicing_profile.get_float("filament_diameter"))

							except:
								pass
			finally:
				p.close()

			with self._job_mutex:
				if machinecode_path in self._cancelled_jobs:
					self._cura_logger.info(u"### Cancelled")
					raise octoprint.slicing.SlicingCancelled()

			self._cura_logger.info(u"### Finished, returncode %d" % p.returncode)
			if p.returncode == 0:
				return True, dict(analysis=analysis)
			else:
				self._logger.warn(u"Could not slice via Cura, got return code %r" % p.returncode)
				return False, "Got returncode %r" % p.returncode

		except octoprint.slicing.SlicingCancelled as e:
			raise e
		except:
			self._logger.exception(u"Could not slice via Cura, got an unknown error")
			return False, "Unknown error, please consult the log file"

		finally:
			with self._job_mutex:
				if machinecode_path in self._cancelled_jobs:
					self._cancelled_jobs.remove(machinecode_path)
				if machinecode_path in self._slicing_commands:
					del self._slicing_commands[machinecode_path]

			self._cura_logger.info("-" * 40)
コード例 #32
0
ファイル: pip.py プロジェクト: leductan-nguyen/RaionPi
	def _convert_line(line):
		return to_unicode(_clean_ansi(line), errors="replace")
コード例 #33
0
ファイル: virtual.py プロジェクト: foosel/OctoPrint
	def _processIncoming(self):
		next_wait_timeout = monotonic_time() + self._waitInterval
		buf = ""
		while self.incoming is not None and not self._killed:
			self._simulateTemps()

			if self._heatingUp:
				time.sleep(1)
				continue

			try:
				data = self.incoming.get(timeout=0.01)
				self.incoming.task_done()
			except queue.Empty:
				if self._sendWait and monotonic_time() > next_wait_timeout:
					self._send("wait")
					next_wait_timeout = monotonic_time() + self._waitInterval
				continue

			buf += data
			if "\n" in buf:
				data = buf[:buf.find("\n") + 1]
				buf = buf[buf.find("\n") + 1:]
			else:
				continue

			next_wait_timeout = monotonic_time() + self._waitInterval

			if data is None:
				continue

			if self._dont_answer:
				self._dont_answer = False
				continue

			data = to_unicode(data.strip(), errors="replace")

			# strip checksum
			if "*" in data:
				checksum = int(data[data.rfind("*") + 1:])
				data = data[:data.rfind("*")]
				if not checksum == self._calculate_checksum(data):
					self._triggerResend(expected=self.currentLine + 1)
					continue

				self.currentLine += 1
			elif settings().getBoolean(["devel", "virtualPrinter", "forceChecksum"]):
				self._send(self._error("checksum_missing"))
				continue

			# track N = N + 1
			if data.startswith("N") and "M110" in data:
				linenumber = int(re.search("N([0-9]+)", data).group(1))
				self.lastN = linenumber
				self.currentLine = linenumber

				self._triggerResendAt100 = True
				self._triggerResendWithTimeoutAt105 = True

				self._sendOk()
				continue
			elif data.startswith("N"):
				linenumber = int(re.search("N([0-9]+)", data).group(1))
				expected = self.lastN + 1
				if linenumber != expected:
					self._triggerResend(actual=linenumber)
					continue
				elif linenumber == 100 and self._triggerResendAt100:
					# simulate a resend at line 100
					self._triggerResendAt100 = False
					self._triggerResend(expected=100)
					continue
				elif linenumber == 105 and self._triggerResendWithTimeoutAt105 and not self._writingToSd:
					# simulate a resend with timeout at line 105
					self._triggerResendWithTimeoutAt105 = False
					self._triggerResend(expected=105)
					self._dont_answer = True
					self.lastN = linenumber
					continue
				elif linenumber == 110 and self._triggerResendWithMissingLinenoAt110 and not self._writingToSd:
					self._triggerResendWithMissingLinenoAt110 = False
					self._send(self._error("lineno_missing", self.lastN))
					continue
				elif linenumber == 115 and self._triggerResendWithChecksumMismatchAt115 and not self._writingToSd:
					self._triggerResendWithChecksumMismatchAt115 = False
					self._triggerResend(checksum=True)
					continue
				elif len(self._prepared_errors):
					prepared = self._prepared_errors.pop(0)
					if callable(prepared):
						prepared(linenumber, self.lastN, data)
						continue
					elif isinstance(prepared, basestring):
						self._send(prepared)
						continue
				else:
					self.lastN = linenumber
				data = data.split(None, 1)[1].strip()

			data += "\n"

			if data.startswith("!!DEBUG:") or data.strip() == "!!DEBUG":
				debug_command = ""
				if data.startswith("!!DEBUG:"):
					debug_command = data[len("!!DEBUG:"):].strip()
				self._debugTrigger(debug_command)
				continue

			# shortcut for writing to SD
			if self._writingToSd and self._writingToSdHandle is not None and not "M29" in data:
				self._writingToSdHandle.write(data)
				self._sendOk()
				continue

			if data.strip() == "version":
				from octoprint import __version__
				self._send("OctoPrint VirtualPrinter v" + __version__)
				continue

			# if we are sending oks before command output, send it now
			if len(data.strip()) > 0 and self._okBeforeCommandOutput:
				self._sendOk()

			# actual command handling
			command_match = VirtualPrinter.command_regex.match(data)
			if command_match is not None:
				command = command_match.group(0)
				letter = command_match.group(1)

				try:
					# if we have a method _gcode_G, _gcode_M or _gcode_T, execute that first
					letter_handler = "_gcode_{}".format(letter)
					if hasattr(self, letter_handler):
						code = command_match.group(2)
						handled = getattr(self, letter_handler)(code, data)
						if handled:
							continue

					# then look for a method _gcode_<command> and execute that if it exists
					command_handler = "_gcode_{}".format(command)
					if hasattr(self, command_handler):
						handled = getattr(self, command_handler)(data)
						if handled:
							continue

				finally:
					# make sure that the debug sleepAfter and sleepAfterNext stuff works even
					# if we continued above
					if len(self._sleepAfter) or len(self._sleepAfterNext):
						interval = None
						if command in self._sleepAfter:
							interval = self._sleepAfter[command]
						elif command in self._sleepAfterNext:
							interval = self._sleepAfterNext[command]
							del self._sleepAfterNext[command]

						if interval is not None:
							self._send("// sleeping for {interval} seconds".format(interval=interval))
							time.sleep(interval)

			# if we are sending oks after command output, send it now
			if len(data.strip()) > 0 and not self._okBeforeCommandOutput:
				self._sendOk()

		self._logger.info("Closing down read loop")
コード例 #34
0
    def do_slice(self,
                 model_path,
                 printer_profile,
                 machinecode_path=None,
                 profile_path=None,
                 position=None,
                 on_progress=None,
                 on_progress_args=None,
                 on_progress_kwargs=None):
        try:
            with self._job_mutex:
                if not profile_path:
                    profile_path = self._settings.get(["default_profile"])
                if not machinecode_path:
                    path, _ = os.path.splitext(model_path)
                    machinecode_path = path + ".gco"

                if position and isinstance(
                        position,
                        dict) and "x" in position and "y" in position:
                    pos_x = position["x"]
                    pos_y = position["y"]
                else:
                    pos_x = None
                    pos_y = None

                if on_progress:
                    if not on_progress_args:
                        on_progress_args = ()
                    if not on_progress_kwargs:
                        on_progress_kwargs = dict()

                self._cura_logger.info(
                    u"### Slicing {} to {} using profile stored at {}".format(
                        to_unicode(model_path, errors="replace"),
                        to_unicode(machinecode_path, errors="replace"),
                        to_unicode(profile_path, errors="replace")))

                executable = normalize_path(self._settings.get(["cura_engine"
                                                                ]))
                if not executable:
                    return False, u"Path to CuraEngine is not configured "

                working_dir = os.path.dirname(executable)

                slicing_profile = Profile(self._load_profile(profile_path),
                                          printer_profile, pos_x, pos_y)

                # NOTE: We can assume an extruder count of 1 here since the only way we currently
                # support dual extrusion in this implementation is by using the second extruder for support (which
                # the engine conversion will automatically detect and adapt accordingly).
                #
                # We currently do only support STL files as sliceables, which by default can only contain one mesh,
                # so no risk of having to slice multi-objects at the moment, which would necessitate a full analysis
                # of the objects to slice to determine amount of needed extruders to use here. If we ever decide to
                # also support dual extrusion slicing (including composition from multiple STLs or support for OBJ or
                # AMF files and the like), this code needs to be adapted!
                #
                # The extruder count is needed to decide which start/end gcode will be used from the Cura profile.
                # Stock Cura implementation counts the number of objects in the scene for this (and also takes a look
                # at the support usage, like the engine conversion here does). We only ever have one object.
                engine_settings = self._convert_to_engine(profile_path,
                                                          printer_profile,
                                                          pos_x=pos_x,
                                                          pos_y=pos_y,
                                                          used_extruders=1)

                # Start building the argument list for the CuraEngine command execution
                args = [executable, '-v', '-p']

                # Add the settings (sorted alphabetically) to the command
                for k, v in sorted(engine_settings.items(),
                                   key=lambda s: s[0]):
                    args += ["-s", "%s=%s" % (k, str(v))]
                args += ["-o", machinecode_path, model_path]

                self._logger.info(u"Running {!r} in {}".format(
                    u" ".join(
                        map(lambda x: to_unicode(x, errors="replace"), args)),
                    working_dir))

                import sarge
                p = sarge.run(args,
                              cwd=working_dir,
                              async=True,
                              stdout=sarge.Capture(),
                              stderr=sarge.Capture())
                p.wait_events()
                self._slicing_commands[machinecode_path] = p.commands[0]

            try:
                layer_count = None
                step_factor = dict(inset=0, skin=1, export=2)
                analysis = None
                while p.returncode is None:
                    line = p.stderr.readline(timeout=0.5)
                    if not line:
                        p.commands[0].poll()
                        continue

                    line = to_unicode(line, errors="replace")
                    self._cura_logger.debug(line.strip())

                    if on_progress is not None:
                        # The Cura slicing process has three individual steps, each consisting of <layer_count> substeps:
                        #
                        #   - inset
                        #   - skin
                        #   - export
                        #
                        # So each layer will be processed three times, once for each step, resulting in a total amount of
                        # substeps of 3 * <layer_count>.
                        #
                        # The CuraEngine reports the calculated layer count and the continuous progress on stderr.
                        # The layer count gets reported right at the beginning in a line of the format:
                        #
                        #   Layer count: <layer_count>
                        #
                        # The individual progress per each of the three steps gets reported on stderr in a line of
                        # the format:
                        #
                        #   Progress:<step>:<current_layer>:<layer_count>
                        #
                        # Thus, for determining the overall progress the following formula applies:
                        #
                        #   progress = <step_factor> * <layer_count> + <current_layer> / <layer_count> * 3
                        #
                        # with <step_factor> being 0 for "inset", 1 for "skin" and 2 for "export".

                        if line.startswith(
                                u"Layer count:") and layer_count is None:
                            try:
                                layer_count = float(
                                    line[len(u"Layer count:"):].strip())
                            except:
                                pass

                        elif line.startswith(u"Progress:"):
                            split_line = line[len(u"Progress:"):].strip(
                            ).split(":")
                            if len(split_line) == 3:
                                step, current_layer, _ = split_line
                                try:
                                    current_layer = float(current_layer)
                                except:
                                    pass
                                else:
                                    if not step in step_factor:
                                        continue
                                    on_progress_kwargs["_progress"] = (
                                        step_factor[step] * layer_count +
                                        current_layer) / (layer_count * 3)
                                    on_progress(*on_progress_args,
                                                **on_progress_kwargs)

                        elif line.startswith(u"Print time:"):
                            try:
                                print_time = int(
                                    line[len(u"Print time:"):].strip())
                                if analysis is None:
                                    analysis = dict()
                                analysis["estimatedPrintTime"] = print_time
                            except:
                                pass

                        # Get the filament usage

                        elif line.startswith(u"Filament:") or line.startswith(
                                u"Filament2:"):
                            if line.startswith(u"Filament:"):
                                filament_str = line[len(u"Filament:"):].strip()
                                tool_key = "tool0"
                            else:
                                filament_str = line[len(u"Filament2:"):].strip(
                                )
                                tool_key = "tool1"

                            try:
                                filament = int(filament_str)

                                if analysis is None:
                                    analysis = dict()
                                if not "filament" in analysis:
                                    analysis["filament"] = dict()
                                if not tool_key in analysis["filament"]:
                                    analysis["filament"][tool_key] = dict()

                                if slicing_profile.get_float(
                                        "filament_diameter") is not None:
                                    if slicing_profile.get(
                                            "gcode_flavor"
                                    ) == GcodeFlavors.ULTIGCODE or slicing_profile.get(
                                            "gcode_flavor"
                                    ) == GcodeFlavors.REPRAP_VOLUME:
                                        analysis["filament"][
                                            tool_key] = _get_usage_from_volume(
                                                filament,
                                                slicing_profile.get_float(
                                                    "filament_diameter"))
                                    else:
                                        analysis["filament"][
                                            tool_key] = _get_usage_from_length(
                                                filament,
                                                slicing_profile.get_float(
                                                    "filament_diameter"))

                            except:
                                pass
            finally:
                p.close()

            with self._job_mutex:
                if machinecode_path in self._cancelled_jobs:
                    self._cura_logger.info(u"### Cancelled")
                    raise octoprint.slicing.SlicingCancelled()

            self._cura_logger.info(u"### Finished, returncode %d" %
                                   p.returncode)
            if p.returncode == 0:
                return True, dict(analysis=analysis)
            else:
                self._logger.warn(
                    u"Could not slice via Cura, got return code %r" %
                    p.returncode)
                return False, "Got returncode %r" % p.returncode

        except octoprint.slicing.SlicingCancelled as e:
            raise e
        except:
            self._logger.exception(
                u"Could not slice via Cura, got an unknown error")
            return False, "Unknown error, please consult the log file"

        finally:
            with self._job_mutex:
                if machinecode_path in self._cancelled_jobs:
                    self._cancelled_jobs.remove(machinecode_path)
                if machinecode_path in self._slicing_commands:
                    del self._slicing_commands[machinecode_path]

            self._cura_logger.info("-" * 40)
コード例 #35
0
ファイル: __init__.py プロジェクト: mrbeam/OctoPrint
	def _populated_check(self, target, check):
		if not "type" in check:
			raise exceptions.UnknownCheckType()

		result = dict(check)

		if target == "octoprint":
			from flask.ext.babel import gettext

			result["displayName"] = to_unicode(check.get("displayName"), errors="replace")
			if result["displayName"] is None:
				# displayName missing or set to None
				result["displayName"] = to_unicode(gettext("OctoPrint"), errors="replace")

			result["displayVersion"] = to_unicode(check.get("displayVersion"), errors="replace")
			if result["displayVersion"] is None:
				# displayVersion missing or set to None
				result["displayVersion"] = u"{octoprint_version}"

			stable_branch = "master"
			release_branches = []
			if "stable_branch" in check:
				release_branches.append(check["stable_branch"]["branch"])
				stable_branch = check["stable_branch"]["branch"]
			if "prerelease_branches" in check:
				release_branches += [x["branch"] for x in check["prerelease_branches"]]
			result["released_version"] = not release_branches or BRANCH in release_branches

			if check["type"] == "github_commit":
				result["current"] = REVISION if REVISION else "unknown"
			else:
				result["current"] = VERSION

				if check["type"] == "github_release" and (check.get("prerelease", None) or BRANCH != stable_branch):
					# we are tracking github releases and are either also tracking prerelease OR are currently installed
					# from something that is not the stable (master) branch => we need to change some parameters

					# we compare versions fully, not just the base so that we see a difference
					# between RCs + stable for the same version release
					result["force_base"] = False

					if check.get("update_script", None):
						# if we are using the update_script, we need to set our update_branch and force
						# to install the exact version we requested

						if check.get("prerelease", None):
							# we are tracking prereleases => we want to be on the correct prerelease channel/branch
							channel = check.get("prerelease_channel", None)
							if channel:
								# if we have a release channel, we also set our update_branch here to our release channel
								# in case it's not already set
								result["update_branch"] = check.get("update_branch", channel)

							# we also force our target version in the update
							result["force_exact_version"] = True

						else:
							# we are not tracking prereleases, but aren't on the stable branch either => switch back
							# to stable branch on update
							result["update_branch"] = check.get("update_branch", stable_branch)


						if BRANCH != result.get("prerelease_channel"):
							# we force python unequality check here because that will also allow us to
							# downgrade on a prerelease channel change (rc/devel => rc/maintenance)
							#
							# we detect channel changes by comparing the current branch with the target
							# branch of the release channel - unequality means we might have to handle
							# a downgrade
							result["release_compare"] = "python_unequal"

		else:
			result["displayName"] = to_unicode(check.get("displayName"), errors="replace")
			if result["displayName"] is None:
				# displayName missing or None
				result["displayName"] = to_unicode(target, errors="replace")

			result["displayVersion"] = to_unicode(check.get("displayVersion", check.get("current")), errors="replace")
			if result["displayVersion"] is None:
				# displayVersion AND current missing or None
				result["displayVersion"] = u"unknown"

			if check["type"] in ("github_commit",):
				result["current"] = check.get("current", None)
			else:
				result["current"] = check.get("current", check.get("displayVersion", None))

		if "pip" in result:
			if not "pip_command" in check and self._settings.get(["pip_command"]) is not None:
				result["pip_command"] = self._settings.get(["pip_command"])

		return result
コード例 #36
0
 def on_firmware_cap_received(self, comm_instance, cap, enabled, all_caps):
     self._run_checks("cap", to_unicode(cap, errors="replace"), enabled)
コード例 #37
0
ファイル: encoding.py プロジェクト: tinkercnc/OctoPrint
    def dumps(cls, obj: Any) -> str:
        """
        Dump an object to JSON, handles additional types that the JSON encoder can't, like
        bytes and frozendicts.
        """
        return json.dumps(
            obj,
            default=cls.encode,
            separators=(",", ":"),
            indent=None,
            allow_nan=False,
        )

    @classmethod
    def loads(cls, s: str) -> Any:
        return json.loads(s)

    @classmethod
    def encode(cls, obj):
        for type, encoder in cls.encoders.items():
            if isinstance(obj, type):
                return encoder(obj)
        raise TypeError(f"Unserializable type {type(obj)}")


JsonEncoding.add_encoder(frozendict, lambda obj: dict(obj))
JsonEncoding.add_encoder(bytes, lambda obj: to_unicode(obj))

dumps = JsonEncoding.dumps
loads = JsonEncoding.loads
コード例 #38
0
ファイル: __init__.py プロジェクト: foosel/OctoPrint
	def on_firmware_cap_received(self, comm_instance, cap, enabled, all_caps):
		self._run_checks("cap",
		                 to_unicode(cap, errors="replace"),
		                 enabled)
コード例 #39
0
ファイル: __init__.py プロジェクト: foosel/OctoPrint
	def on_firmware_info_received(self, comm_instance, firmware_name, firmware_data):
		self._run_checks("m115",
		                 to_unicode(firmware_name, errors="replace"),
		                 dict((to_unicode(key, errors="replace"), to_unicode(value, errors="replace"))
		                      for key, value in firmware_data.items()))
		self._scan_received = False
コード例 #40
0
ファイル: __init__.py プロジェクト: foosel/OctoPrint
	def on_gcode_received(self, comm_instance, line, *args, **kwargs):
		if self._scan_received:
			self._run_checks("received", to_unicode(line, errors="replace"))
		return line
コード例 #41
0
ファイル: files.py プロジェクト: tinkercnc/OctoPrint
def find_collision_free_name(filename,
                             extension,
                             existing_filenames,
                             max_power=2):
    """
    Tries to find a collision free translation of "<filename>.<extension>" to the 8.3 DOS compatible format,
    preventing collisions with any of the ``existing_filenames``.

    First strips all of ``."/\\[]:;=,`` from the filename and extensions, converts them to lower case and truncates
    the ``extension`` to a maximum length of 3 characters.

    If the filename is already equal or less than 8 characters in length after that procedure and "<filename>.<extension>"
    are not contained in the ``existing_files``, that concatenation will be returned as the result.

    If not, the following algorithm will be applied to try to find a collision free name::

        set counter := power := 1
        while counter < 10^max_power:
            set truncated := substr(filename, 0, 6 - power + 1) + "~" + counter
            set result := "<truncated>.<extension>"
            if result is collision free:
                return result
            counter++
            if counter >= 10 ** power:
                power++
        raise ValueError

    This will basically -- for a given original filename of ``some_filename`` and an extension of ``gco`` -- iterate
    through names of the format ``some_f~1.gco``, ``some_f~2.gco``, ..., ``some_~10.gco``, ``some_~11.gco``, ...,
    ``<prefix>~<n>.gco`` for ``n`` less than 10 ^ ``max_power``, returning as soon as one is found that is not colliding.

    Arguments:
        filename (string): The filename without the extension to convert to 8.3.
        extension (string): The extension to convert to 8.3 -- will be truncated to 3 characters if it's longer than
            that.
        existing_filenames (list): A list of existing filenames to prevent name collisions with.
        max_power (int): Limits the possible attempts of generating a collision free name to 10 ^ ``max_power``
            variations. Defaults to 2, so the name generation will maximally reach ``<name>~99.<ext>`` before
            aborting and raising an exception.

    Returns:
        string: A 8.3 representation of the provided original filename, ensured to not collide with the provided
            ``existing_filenames``

    Raises:
        ValueError: No collision free name could be found.

    Examples:

        >>> find_collision_free_name("test1234", "gco", [])
        'test1234.gco'
        >>> find_collision_free_name("test1234", "gcode", [])
        'test1234.gco'
        >>> find_collision_free_name("test12345", "gco", [])
        'test12~1.gco'
        >>> find_collision_free_name("test 123", "gco", [])
        'test_123.gco'
        >>> find_collision_free_name("test1234", "g o", [])
        'test1234.g_o'
        >>> find_collision_free_name("test12345", "gco", ["/test12~1.gco"])
        'test12~2.gco'
        >>> many_files = ["/test12~{}.gco".format(x) for x in range(10)[1:]]
        >>> find_collision_free_name("test12345", "gco", many_files)
        'test1~10.gco'
        >>> many_more_files = many_files + ["/test1~{}.gco".format(x) for x in range(10, 99)]
        >>> find_collision_free_name("test12345", "gco", many_more_files)
        'test1~99.gco'
        >>> many_more_files_plus_one = many_more_files + ["/test1~99.gco"]
        >>> find_collision_free_name("test12345", "gco", many_more_files_plus_one)
        Traceback (most recent call last):
        ...
        ValueError: Can't create a collision free filename
        >>> find_collision_free_name("test12345", "gco", many_more_files_plus_one, max_power=3)
        'test~100.gco'

    """
    from octoprint.util import to_unicode

    filename = to_unicode(filename)
    extension = to_unicode(extension)

    if filename.startswith("/"):
        filename = filename[1:]
    existing_filenames = [
        to_unicode(x[1:] if x.startswith("/") else x)
        for x in existing_filenames
    ]

    def make_valid(text):
        return re.sub(r"\s+", "_",
                      text.translate({ord(i): None
                                      for i in r".\"/\[]:;=,"})).lower()

    filename = make_valid(filename)
    extension = make_valid(extension)
    extension = extension[:3] if len(extension) > 3 else extension

    full_name_format = "{filename}.{extension}" if extension else "{filename}"

    result = full_name_format.format(filename=filename, extension=extension)
    if len(filename) <= 8 and result not in existing_filenames:
        # early exit
        return result

    counter = 1
    power = 1
    prefix_format = "{segment}~{counter}"
    while counter < (10**max_power):
        prefix = prefix_format.format(segment=filename[:(6 - power + 1)],
                                      counter=str(counter))
        result = full_name_format.format(filename=prefix, extension=extension)
        if result not in existing_filenames:
            return result
        counter += 1
        if counter >= 10**power:
            power += 1

    raise ValueError("Can't create a collision free filename")
コード例 #42
0
ファイル: __init__.py プロジェクト: Jaesin/OctoPrint
	def _populated_check(self, target, check):
		if not "type" in check:
			raise exceptions.UnknownCheckType()

		result = dict(check)

		if target == "octoprint":
			from flask.ext.babel import gettext

			from octoprint.util.version import is_released_octoprint_version, is_stable_octoprint_version

			result["displayName"] = to_unicode(check.get("displayName"), errors="replace")
			if result["displayName"] is None:
				# displayName missing or set to None
				result["displayName"] = to_unicode(gettext("OctoPrint"), errors="replace")

			result["displayVersion"] = to_unicode(check.get("displayVersion"), errors="replace")
			if result["displayVersion"] is None:
				# displayVersion missing or set to None
				result["displayVersion"] = u"{octoprint_version}"

			stable_branch = "master"
			release_branches = []
			if "stable_branch" in check:
				release_branches.append(check["stable_branch"]["branch"])
				stable_branch = check["stable_branch"]["branch"]
			if "prerelease_branches" in check:
				release_branches += [x["branch"] for x in check["prerelease_branches"]]
			result["released_version"] = is_released_octoprint_version()

			if check["type"] in self.COMMIT_TRACKING_TYPES:
				result["current"] = REVISION if REVISION else "unknown"
			else:
				result["current"] = VERSION

				if check["type"] == "github_release" and (check.get("prerelease", None) or not is_stable_octoprint_version()):
					# we are tracking github releases and are either also tracking prerelease OR are currently running
					# a non stable version => we need to change some parameters

					# we compare versions fully, not just the base so that we see a difference
					# between RCs + stable for the same version release
					result["force_base"] = False

					if check.get("prerelease", None):
						# we are tracking prereleases => we want to be on the correct prerelease channel/branch
						channel = check.get("prerelease_channel", None)
						if channel:
							# if we have a release channel, we also set our update_branch here to our release channel
							# in case it's not already set
							result["update_branch"] = check.get("update_branch", channel)

					else:
						# we are not tracking prereleases, but aren't on the stable branch either => switch back
						# to stable branch on update
						result["update_branch"] = check.get("update_branch", stable_branch)

					if check.get("update_script", None):
						# we force an exact version & python unequality check, to be able to downgrade
						result["force_exact_version"] = True
						result["release_compare"] = "python_unequal"

					elif check.get("pip", None):
						# we force python unequality check for pip installs, to be able to downgrade
						result["release_compare"] = "python_unequal"

		else:
			result["displayName"] = to_unicode(check.get("displayName"), errors="replace")
			if result["displayName"] is None:
				# displayName missing or None
				result["displayName"] = to_unicode(target, errors="replace")

			result["displayVersion"] = to_unicode(check.get("displayVersion", check.get("current")), errors="replace")
			if result["displayVersion"] is None:
				# displayVersion AND current missing or None
				result["displayVersion"] = u"unknown"

			if check["type"] in self.COMMIT_TRACKING_TYPES:
				result["current"] = check.get("current", None)
			else:
				result["current"] = check.get("current", check.get("displayVersion", None))

		if "pip" in result:
			if not "pip_command" in check and self._settings.get(["pip_command"]) is not None:
				result["pip_command"] = self._settings.get(["pip_command"])

		return result
コード例 #43
0
ファイル: __init__.py プロジェクト: 3DKIN/fw-OctoPrint
	def _populated_check(self, target, check):
		if not "type" in check:
			raise exceptions.UnknownCheckType()

		result = dict(check)

		if target == "octoprint":
			from flask_babel import gettext

			from octoprint.util.version import is_released_octoprint_version, is_stable_octoprint_version

			result["displayName"] = to_unicode(check.get("displayName"), errors="replace")
			if result["displayName"] is None:
				# displayName missing or set to None
				result["displayName"] = to_unicode(gettext("OctoPrint"), errors="replace")

			result["displayVersion"] = to_unicode(check.get("displayVersion"), errors="replace")
			if result["displayVersion"] is None:
				# displayVersion missing or set to None
				result["displayVersion"] = u"{octoprint_version}"

			stable_branch = "master"
			release_branches = []
			if "stable_branch" in check:
				release_branches.append(check["stable_branch"]["branch"])
				stable_branch = check["stable_branch"]["branch"]
			if "prerelease_branches" in check:
				release_branches += [x["branch"] for x in check["prerelease_branches"]]
			result["released_version"] = is_released_octoprint_version()

			if check["type"] in self.COMMIT_TRACKING_TYPES:
				result["current"] = REVISION if REVISION else "unknown"
			else:
				result["current"] = VERSION

				if check["type"] == "github_release" and (check.get("prerelease", None) or not is_stable_octoprint_version()):
					# we are tracking github releases and are either also tracking prerelease OR are currently running
					# a non stable version => we need to change some parameters

					# we compare versions fully, not just the base so that we see a difference
					# between RCs + stable for the same version release
					result["force_base"] = False

					if check.get("prerelease", None):
						# we are tracking prereleases => we want to be on the correct prerelease channel/branch
						channel = check.get("prerelease_channel", None)
						if channel:
							# if we have a release channel, we also set our update_branch here to our release channel
							# in case it's not already set
							result["update_branch"] = check.get("update_branch", channel)

					else:
						# we are not tracking prereleases, but aren't on the stable branch either => switch back
						# to stable branch on update
						result["update_branch"] = check.get("update_branch", stable_branch)

					if check.get("update_script", None):
						# we force an exact version & python unequality check, to be able to downgrade
						result["force_exact_version"] = True
						result["release_compare"] = "python_unequal"

					elif check.get("pip", None):
						# we force python unequality check for pip installs, to be able to downgrade
						result["release_compare"] = "python_unequal"

		else:
			result["displayName"] = to_unicode(check.get("displayName"), errors="replace")
			if result["displayName"] is None:
				# displayName missing or None
				result["displayName"] = to_unicode(target, errors="replace")

			result["displayVersion"] = to_unicode(check.get("displayVersion", check.get("current")), errors="replace")
			if result["displayVersion"] is None:
				# displayVersion AND current missing or None
				result["displayVersion"] = u"unknown"

			if check["type"] in self.COMMIT_TRACKING_TYPES:
				result["current"] = check.get("current", None)
			else:
				result["current"] = check.get("current", check.get("displayVersion", None))

		if "pip" in result:
			if not "pip_command" in check and self._settings.get(["pip_command"]) is not None:
				result["pip_command"] = self._settings.get(["pip_command"])

		return result
コード例 #44
0
ファイル: standard.py プロジェクト: mrbeam/OctoPrint
	def on_comm_message(self, message):
		"""
		 Callback method for the comm object, called upon message exchanges via serial.
		 Stores the message in the message buffer, truncates buffer to the last 300 lines.
		"""
		self._addMessage(to_unicode(message, "utf-8", errors="replace"))
コード例 #45
0
	def on_comm_message(self, message):
		"""
		 Callback method for the comm object, called upon message exchanges via serial.
		 Stores the message in the message buffer, truncates buffer to the last 300 lines.
		"""
		self._addMessage(to_unicode(message, "utf-8", errors="replace"))
コード例 #46
0
	def on_comm_log(self, message):
		"""
		 Callback method for the comm object, called upon log output.
		"""
		self._addLog(to_unicode(message, "utf-8", errors="replace"))