def run(base_dir, timeout): """run the program. return (ok?, msg)""" cmd = ['./a.out'] out_path = os.path.join(base_dir, 'test.out') in_path = os.path.join(base_dir, 'test.in') with open(out_path, 'w') as fout, open(in_path) as fin: p = Popen(cmd, stdin=fin, stdout=fout, cwd=base_dir) try: p.wait(timeout) except TimeoutExpired: p.kill() return False, 'time limit exceed' else: if p.returncode == 0: return True, '' else: return False, 'runtime error'
def test_streams(self): self.sniff_log(logging.getLogger('')) print('test1') with log_std_streams(logger=self.captured_logger, stdout_level=logging.DEBUG): print('test2') with log_std_streams(stdout_level=logging.DEBUG): print('test3') with log_std_streams(stdout_level=logging.DEBUG): sys.stdout.write('test3') with log_std_streams(logger=self.captured_logger, stdout_level=logging.DEBUG): process = Popen(['echo', '"test5"']) process.wait() missed_file = get_uniq_name('.', 'test6', '') with log_std_streams(logger=self.captured_logger, stderr_level=logging.WARNING): process = Popen(['dir', missed_file]) process.wait() debug_buf = self.log_recorder.debug_buff.getvalue() warn_buf = self.log_recorder.warn_buff.getvalue() self.assertNotIn('test1', debug_buf) self.assertIn('test2', debug_buf) self.assertNotIn('test3', debug_buf) self.assertIn('test5', debug_buf) self.assertTrue(len(warn_buf) > 0)
def test_streams(self): self.log = logging.getLogger('') handler = RecordingHandler() self.log.addHandler(handler) print('test1') with log_std_streams(logger=self.log, stdout_level=logging.DEBUG): print('test2') with log_std_streams(stdout_level=logging.DEBUG): print('test3') with log_std_streams(stdout_level=logging.DEBUG): sys.stdout.write('test3') with log_std_streams(logger=self.log, stdout_level=logging.DEBUG): process = Popen(['echo', '"test5"']) process.wait() missed_file = get_uniq_name('.', 'test6', '') with log_std_streams(logger=self.log, stderr_level=logging.WARNING): process = Popen(['dir', missed_file]) process.wait() self.log.removeHandler(handler) debug_buf = handler.debug_buff.getvalue() warn_buf = handler.warn_buff.getvalue() self.assertNotIn('test1', debug_buf) self.assertIn('test2', debug_buf) self.assertNotIn('test3', debug_buf) self.assertIn('test5', debug_buf) self.assertTrue(len(warn_buf) > 0)
def checkTools(source): source = source.upper() if source.endswith('.CBR') or source.endswith('.RAR'): rarExitCode = Popen('unrar', stdout=PIPE, stderr=STDOUT, stdin=PIPE, shell=True) rarExitCode = rarExitCode.wait() if rarExitCode != 0 and rarExitCode != 1 and rarExitCode != 7: print('ERROR: UnRAR is missing!') exit(1) elif source.endswith('.CB7') or source.endswith('.7Z'): sevenzaExitCode = Popen('7za', stdout=PIPE, stderr=STDOUT, stdin=PIPE, shell=True) sevenzaExitCode = sevenzaExitCode.wait() if sevenzaExitCode != 0 and sevenzaExitCode != 7: print('ERROR: 7za is missing!') exit(1) if options.format == 'MOBI': kindleGenExitCode = Popen('kindlegen -locale en', stdout=PIPE, stderr=STDOUT, stdin=PIPE, shell=True) if kindleGenExitCode.wait() != 0: print('ERROR: KindleGen is missing!') exit(1)
def stop_scan_cleanup( self, kbdb: BaseDB, scan_id: str, ovas_process: psutil.Popen, # pylint: disable=arguments-differ ): """Set a key in redis to indicate the wrapper is stopped. It is done through redis because it is a new multiprocess instance and it is not possible to reach the variables of the grandchild process. Indirectly sends SIGUSR1 to the running openvas scan process via an invocation of openvas with the --scan-stop option to stop it.""" if kbdb: # Set stop flag in redis kbdb.stop_scan(scan_id) # Check if openvas is running if ovas_process.is_running(): # Cleaning in case of Zombie Process if ovas_process.status() == psutil.STATUS_ZOMBIE: logger.debug( '%s: Process with PID %s is a Zombie process.' ' Cleaning up...', scan_id, ovas_process.pid, ) ovas_process.wait() # Stop openvas process and wait until it stopped else: can_stop_scan = Openvas.stop_scan( scan_id, not self.is_running_as_root and self.sudo_available, ) if not can_stop_scan: logger.debug( 'Not possible to stop scan process: %s.', ovas_process, ) return logger.debug('Stopping process: %s', ovas_process) while ovas_process.is_running(): if ovas_process.status() == psutil.STATUS_ZOMBIE: ovas_process.wait() else: time.sleep(0.1) else: logger.debug( "%s: Process with PID %s already stopped", scan_id, ovas_process.pid, ) # Clean redis db for scan_db in kbdb.get_scan_databases(): self.main_db.release_database(scan_db)
def is_openvas_process_alive(openvas_process: psutil.Popen) -> bool: try: if openvas_process.status() == psutil.STATUS_ZOMBIE: logger.debug("Process is a Zombie, waiting for it to clean up") openvas_process.wait() except psutil.NoSuchProcess: return False return openvas_process.is_running()
def system(cmd, timeout = 60): global proc logger().info(u"Executing command '{0}'".format(cmd)) proc = Popen(cmd, close_fds=True, shell=True, stdout=PIPE) logger().info(u"PID is {0} for command '{1}'".format(proc.pid, cmd)) for line in proc.stdout: logger().info(u"Process output: {0}".format(line.decode('latin1'))) ## Kill process after timeout seconds. _timer = Timer(timeout, terminate_process, [proc]) _timer.start() proc.wait(timeout=3) ## Wait for process to complete, increase timeout parameter if default 3 seconds is not enough _timer.cancel() return proc
def close_spider(self, spider): logger = logging.getLogger() logger.info('Starting MOBI generation of scrapped images') storage = IMAGES_STORE folder_name = spider.folder_name results_path = path.abspath(path.join(storage, folder_name)) chapter_directories = listdir(results_path) for i in range(len(chapter_directories)): chapter_directory = chapter_directories[i] logger.info('Generating MOBI file for %s (%d/%d)' % (chapter_directory, i + 1, len(chapter_directories))) chapter_number = "" if '.' in chapter_directory: chapter_number = "##%05.1f" % float(chapter_directory) else: chapter_number = "#%03d" % int(chapter_directory) chapter_title = folder_name + " " + chapter_number pipe = Popen( 'kcc-c2e -m -p KPW --whiteborders -f MOBI -u -r 2 -t "%s" "%s"' % (chapter_title, path.join(results_path, chapter_directory)), stdout=PIPE, stderr=STDOUT, stdin=PIPE, shell=True) try: pipe.wait(60) generated_file_name = path.join(results_path, chapter_directory) + ".mobi" rename(generated_file_name, path.join(results_path, chapter_title) + ".mobi") logger.info('MOBI file generated successfully') except TimeoutExpired: logger.error('Unable to generate MOBI file for %s' % (chapter_directory)) logger.info('Finalized MOBI generation of scrapped images')
def raise_process(self): venv = self.ensure_environment() args = [ os.path.join(venv.path, "bin", "python"), "-m", self.package_name, ] process = Popen(args, env=self.env, stdout=subprocess.PIPE) process.wait() retcode = process.returncode output = process.stdout.read().decode("utf-8") self._process_return_value = retcode, output
def make(base_dir, timeout=5): """compile source code. return (ok?, msg)""" cmd = ['clang++', '-std=c++11', 'main.cpp'] p = Popen(cmd, stderr=PIPE, cwd=base_dir) try: p.wait(timeout) except TimeoutExpired: return False, 'compilation take too much time.' else: if p.returncode == 0: return True, '' else: return False, p.communicate()[1]
def check(): data = request.json t = time() out_file = f'output{t}.txt' code_file = f'code{t}.php' output = open(out_file, 'w+') with open(code_file, 'w') as f: f.write(data['code']) process = Popen(['php', '-d', php_disable_functions, code_file], stdout=output, stderr=output) exit_code = -1 result = '' try: exit_code = process.wait(5) except Exception as e: print(e) if process.is_running(): process.kill() output.seek(0) result = output.read() output.close() os.remove(out_file) os.remove(code_file) return jsonify({'exit_code': exit_code, 'result': result})
def checkTools(source): source = source.upper() if source.endswith('.CBR') or source.endswith('.RAR'): rarExitCode = Popen('unrar', stdout=PIPE, stderr=STDOUT, stdin=PIPE, shell=True) rarExitCode = rarExitCode.wait() if rarExitCode != 0 and rarExitCode != 7: print('ERROR: UnRAR is missing!') exit(1) elif source.endswith('.CB7') or source.endswith('.7Z'): sevenzaExitCode = Popen('7za', stdout=PIPE, stderr=STDOUT, stdin=PIPE, shell=True) sevenzaExitCode = sevenzaExitCode.wait() if sevenzaExitCode != 0 and sevenzaExitCode != 7: print('ERROR: 7za is missing!') exit(1) if options.format == 'MOBI': kindleGenExitCode = Popen('kindlegen -locale en', stdout=PIPE, stderr=STDOUT, stdin=PIPE, shell=True) if kindleGenExitCode.wait() != 0: print('ERROR: KindleGen is missing!') exit(1)
def test_streams(self): self.sniff_log() print('test1') with log_std_streams(logger=self.captured_logger, stdout_level=logging.DEBUG): print('test2') with log_std_streams(stdout_level=logging.DEBUG): print('test3') with log_std_streams(stdout_level=logging.DEBUG): sys.stdout.write('test3') with log_std_streams(logger=self.captured_logger, stdout_level=logging.DEBUG): cmd = ['echo', '"test5"'] if is_windows(): cmd = ['cmd', '/c'] + cmd process = Popen(cmd) process.wait() missed_file = get_uniq_name('.', 'test6', '') with log_std_streams(logger=self.captured_logger, stderr_level=logging.WARNING): if is_windows(): cmd = ['cmd', '/c', 'dir'] else: cmd = ['ls'] process = Popen(cmd + [missed_file]) process.wait() debug_buf = self.log_recorder.debug_buff.getvalue() warn_buf = self.log_recorder.warn_buff.getvalue() self.assertNotIn('test1', debug_buf) self.assertIn('test2', debug_buf) self.assertNotIn('test3', debug_buf) self.assertIn('test5', debug_buf) self.assertTrue(len(warn_buf) > 0)
def find_device(self): for drive in disk_partitions(False): if 'removable' in drive[3]: if os.path.isdir(os.path.join(drive[1], 'system')) and \ os.path.isdir(os.path.join(drive[1], 'documents')): return drive[1] # print(self.config['GENERAL']['SSHEnabled']) if self.config['GENERAL']['SSHEnabled'] == "True": ssh = Popen('"' + self.config['SSH']['PLinkPath'] + '" root@' + self.config['SSH']['KindleIP'] + ' whoami', stdout=PIPE, stderr=STDOUT, shell=True) ssh_check = ssh.wait() if ssh_check == 0: self.ssh = True return self.config['SSH']['KindleIP'] else: raise OSError('Can\'t connect to Kindle!') else: raise OSError('Not found any connected Kindle!')
class Process(object): """Wraps a process. Options: - **wid**: the process unique identifier. This value will be used to replace the *$WID* string in the command line if present. - **cmd**: the command to run. May contain any of the variables available that are being passed to this class. They will be replaced using the python format syntax. - **args**: the arguments for the command to run. Can be a list or a string. If **args** is a string, it's splitted using :func:`shlex.split`. Defaults to None. - **executable**: When executable is given, the first item in the args sequence obtained from **cmd** is still treated by most programs as the command name, which can then be different from the actual executable name. It becomes the display name for the executing program in utilities such as **ps**. - **working_dir**: the working directory to run the command in. If not provided, will default to the current working directory. - **shell**: if *True*, will run the command in the shell environment. *False* by default. **warning: this is a security hazard**. - **uid**: if given, is the user id or name the command should run with. The current uid is the default. - **gid**: if given, is the group id or name the command should run with. The current gid is the default. - **env**: a mapping containing the environment variables the command will run with. Optional. - **rlimits**: a mapping containing rlimit names and values that will be set before the command runs. - **use_fds**: if True, will not close the fds in the subprocess. Must be be set to True on Windows if stdout or stderr are redirected. default: False. - **pipe_stdout**: if True, will open a PIPE on stdout. default: True. - **pipe_stderr**: if True, will open a PIPE on stderr. default: True. - **close_child_stdin**: If True, redirects the child process' stdin to /dev/null after the fork. default: True. - **close_child_stdout**: If True, redirects the child process' stdout to /dev/null after the fork. default: False. - **close_child_stderr**: If True, redirects the child process' stdout to /dev/null after the fork. default: False. """ def __init__(self, name, wid, cmd, args=None, working_dir=None, shell=False, uid=None, gid=None, env=None, rlimits=None, executable=None, use_fds=False, watcher=None, spawn=True, pipe_stdout=True, pipe_stderr=True, close_child_stdin=True, close_child_stdout=False, close_child_stderr=False): self.name = name self.wid = wid self.cmd = cmd self.args = args self.working_dir = working_dir or get_working_dir() self.shell = shell if uid: self.uid = to_uid(uid) self.username = get_username_from_uid(self.uid) else: self.username = None self.uid = None self.gid = to_gid(gid) if gid else None self.env = env or {} self.rlimits = rlimits or {} self.executable = executable self.use_fds = use_fds self.watcher = watcher self.pipe_stdout = pipe_stdout self.pipe_stderr = pipe_stderr self.close_child_stdin = close_child_stdin self.close_child_stdout = close_child_stdout self.close_child_stderr = close_child_stderr self.stopping = False # sockets created before fork, should be let go after. self._sockets = [] self._worker = None self.redirected = False self.started = 0 if self.uid is not None and self.gid is None: self.gid = get_default_gid(self.uid) if IS_WINDOWS: if not self.use_fds and (self.pipe_stderr or self.pipe_stdout): raise ValueError("On Windows, you can't close the fds if " "you are redirecting stdout or stderr") if spawn: self.spawn() def _null_streams(self, streams): devnull = os.open(os.devnull, os.O_RDWR) try: for stream in streams: if not hasattr(stream, 'fileno'): # we're probably dealing with a file-like continue try: try: stream.flush() except AttributeError: if type(stream).__name__ == 'DontReadFromInput': continue else: raise os.dup2(devnull, stream.fileno()) except IOError: # some streams, like stdin - might be already closed. pass finally: os.close(devnull) def _get_sockets_fds(self): """Returns sockets dict. If this worker's cmd indicates use of a SO_REUSEPORT socket, a new socket is created and bound. This new socket's FD replaces original socket's FD in returned dict. This method populates `self._sockets` list. This list should be let go after `fork()`. """ sockets_fds = None if self.watcher is not None and self.watcher.sockets is not None: sockets_fds = self.watcher._get_sockets_fds() reuseport_sockets = tuple((sn, s) for (sn, s) in self.watcher.sockets.items() if s.so_reuseport) for sn, s in reuseport_sockets: # watcher.cmd uses this reuseport socket if 'circus.sockets.%s' % sn in self.watcher.cmd: sock = CircusSocket.load_from_config(s._cfg) sock.bind_and_listen() # replace original socket's fd sockets_fds[sn] = sock.fileno() # keep new socket until fork returns self._sockets.append(sock) return sockets_fds def _get_stdin_socket_fd(self): if self.watcher is not None: return self.watcher._get_stdin_socket_fd() def spawn(self): self.started = time.time() sockets_fds = self._get_sockets_fds() args = self.format_args(sockets_fds=sockets_fds) def preexec(): streams = [] if self.close_child_stdin: streams.append(sys.stdin) if self.close_child_stdout: streams.append(sys.stdout) if self.close_child_stderr: streams.append(sys.stderr) self._null_streams(streams) os.setsid() if resource: for limit, value in self.rlimits.items(): res = getattr( resource, 'RLIMIT_%s' % limit.upper(), None ) if res is None: raise ValueError('unknown rlimit "%s"' % limit) # TODO(petef): support hard/soft limits # for the NOFILE limit, if we fail to set an unlimited # value then check the existing hard limit because we # probably can't bypass it due to a kernel limit - so just # assume that the caller means they want to use the kernel # limit when they pass the unlimited value. This is better # than failing to start the process and forcing the caller # to always be aware of what the kernel configuration is. # If they do pass in a real limit value, then we'll just # raise the failure as they should know that their # expectations couldn't be met. # TODO - we can't log here as this occurs in the child # process after the fork but it would be very good to # notify the admin of the situation somehow. retry = False try: resource.setrlimit(res, (value, value)) except ValueError: if res == resource.RLIMIT_NOFILE and \ value == resource.RLIM_INFINITY: _soft, value = resource.getrlimit(res) retry = True else: raise if retry: resource.setrlimit(res, (value, value)) if self.gid: try: os.setgid(self.gid) except OverflowError: if not ctypes: raise # versions of python < 2.6.2 don't manage unsigned int for # groups like on osx or fedora os.setgid(-ctypes.c_int(-self.gid).value) if self.username is not None: try: os.initgroups(self.username, self.gid) except (OSError, AttributeError): # not support on Mac or 2.6 pass if self.uid: os.setuid(self.uid) stdin_socket_fd = self._get_stdin_socket_fd() if stdin_socket_fd is not None: os.dup2(stdin_socket_fd, 0) if IS_WINDOWS: # On Windows we can't use a pre-exec function preexec_fn = None else: preexec_fn = preexec extra = {} if self.pipe_stdout: extra['stdout'] = PIPE if self.pipe_stderr: extra['stderr'] = PIPE self._worker = Popen(args, cwd=self.working_dir, shell=self.shell, preexec_fn=preexec_fn, env=self.env, close_fds=not self.use_fds, executable=self.executable, **extra) # let go of sockets created only for self._worker to inherit self._sockets = [] def format_args(self, sockets_fds=None): """ It's possible to use environment variables and some other variables that are available in this context, when spawning the processes. """ logger.debug('cmd: ' + bytestring(self.cmd)) logger.debug('args: ' + str(self.args)) current_env = ObjectDict(self.env.copy()) format_kwargs = { 'wid': self.wid, 'shell': self.shell, 'args': self.args, 'env': current_env, 'working_dir': self.working_dir, 'uid': self.uid, 'gid': self.gid, 'rlimits': self.rlimits, 'executable': self.executable, 'use_fds': self.use_fds} if sockets_fds is not None: format_kwargs['sockets'] = sockets_fds if self.watcher is not None: for option in self.watcher.optnames: if option not in format_kwargs\ and hasattr(self.watcher, option): format_kwargs[option] = getattr(self.watcher, option) cmd = replace_gnu_args(self.cmd, **format_kwargs) if '$WID' in cmd or (self.args and '$WID' in self.args): msg = "Using $WID in the command is deprecated. You should use "\ "the python string format instead. In your case, this "\ "means replacing the $WID in your command by $(WID)." warnings.warn(msg, DeprecationWarning) self.cmd = cmd.replace('$WID', str(self.wid)) if self.args is not None: if isinstance(self.args, string_types): args = shlex.split(bytestring(replace_gnu_args( self.args, **format_kwargs))) else: args = [bytestring(replace_gnu_args(arg, **format_kwargs)) for arg in self.args] args = shlex.split(bytestring(cmd), posix=not IS_WINDOWS) + args else: args = shlex.split(bytestring(cmd), posix=not IS_WINDOWS) if self.shell: # subprocess.Popen(shell=True) implies that 1st arg is the # requested command, remaining args are applied to sh. args = [' '.join(quote(arg) for arg in args)] shell_args = format_kwargs.get('shell_args', None) if shell_args and IS_WINDOWS: logger.warn("shell_args won't apply for " "windows platforms: %s", shell_args) elif isinstance(shell_args, string_types): args += shlex.split(bytestring(replace_gnu_args( shell_args, **format_kwargs))) elif shell_args: args += [bytestring(replace_gnu_args(arg, **format_kwargs)) for arg in shell_args] elif format_kwargs.get('shell_args', False): logger.warn("shell_args is defined but won't be used " "in this context: %s", format_kwargs['shell_args']) logger.debug("process args: %s", args) return args def returncode(self): return self._worker.returncode @debuglog def poll(self): return self._worker.poll() @debuglog def is_alive(self): return self.poll() is None @debuglog def send_signal(self, sig): """Sends a signal **sig** to the process.""" logger.debug("sending signal %s to %s" % (sig, self.pid)) return self._worker.send_signal(sig) @debuglog def stop(self): """Stop the process and close stdout/stderr If the corresponding process is still here (normally it's already killed by the watcher), a SIGTERM is sent, then a SIGKILL after 1 second. The shutdown process (SIGTERM then SIGKILL) is normally taken by the watcher. So if the process is still there here, it's a kind of bad behavior because the graceful timeout won't be respected here. """ try: try: if self.is_alive(): try: return self._worker.terminate() except AccessDenied: # It can happen on Windows if the process # dies after poll returns (unlikely) pass finally: self.close_output_channels() except NoSuchProcess: pass def close_output_channels(self): if self._worker.stderr is not None: self._worker.stderr.close() if self._worker.stdout is not None: self._worker.stdout.close() def wait(self, timeout=None): """ Wait for the process to terminate, in the fashion of waitpid. Accepts a timeout in seconds. """ self._worker.wait(timeout) def age(self): """Return the age of the process in seconds.""" return time.time() - self.started def info(self): """Return process info. The info returned is a mapping with these keys: - **mem_info1**: Resident Set Size Memory in bytes (RSS) - **mem_info2**: Virtual Memory Size in bytes (VMS). - **cpu**: % of cpu usage. - **mem**: % of memory usage. - **ctime**: process CPU (user + system) time in seconds. - **pid**: process id. - **username**: user name that owns the process. - **nice**: process niceness (between -20 and 20) - **cmdline**: the command line the process was run with. """ try: info = get_info(self._worker) except NoSuchProcess: return "No such process (stopped?)" info["age"] = self.age() info["started"] = self.started info["children"] = [] info['wid'] = self.wid for child in get_children(self._worker): info["children"].append(get_info(child)) return info def children(self, recursive=False): """Return a list of children pids.""" return [child.pid for child in get_children(self._worker, recursive)] def is_child(self, pid): """Return True is the given *pid* is a child of that process.""" pids = [child.pid for child in get_children(self._worker)] if pid in pids: return True return False @debuglog def send_signal_child(self, pid, signum): """Send signal *signum* to child *pid*.""" children = dict((child.pid, child) for child in get_children(self._worker)) try: children[pid].send_signal(signum) except KeyError: raise NoSuchProcess(pid) @debuglog def send_signal_children(self, signum, recursive=False): """Send signal *signum* to all children.""" for child in get_children(self._worker, recursive): try: child.send_signal(signum) except OSError as e: if e.errno != errno.ESRCH: raise @property def status(self): """Return the process status as a constant - RUNNING - DEAD_OR_ZOMBIE - UNEXISTING - OTHER """ try: if get_status(self._worker) in (STATUS_ZOMBIE, STATUS_DEAD): return DEAD_OR_ZOMBIE except NoSuchProcess: return UNEXISTING if self._worker.is_running(): return RUNNING return OTHER @property def pid(self): """Return the *pid*""" return self._worker.pid @property def stdout(self): """Return the *stdout* stream""" return self._worker.stdout @property def stderr(self): """Return the *stdout* stream""" return self._worker.stderr def __eq__(self, other): return self is other def __lt__(self, other): return self.started < other.started def __gt__(self, other): return self.started > other.started
def save_file(self, cover,directory,getcover,cloud, cover_size): if(self.mode=='reader'): # need.cover means that directory system/thumbnails has been found on the Kindle (this is Kindle PW) if self.kindle.need_cover: if cover != '': try: ready_cover = Image.open(cover) # ready_cover.thumbnail((217, 330), Image.ANTIALIAS) if self.cover_size =='pw': ready_cover = ready_cover.resize((217, 330), Image.ANTIALIAS) else: ready_cover = ready_cover.resize((330, 470), Image.ANTIALIAS) ready_cover = ready_cover.convert('L') self.txt2img(self.title, self.seqnumber, ready_cover, self.position) except: raise OSError('Failed to load custom cover!') else: if getcover=='search': # search for cover try: ready_cover = self.get_cover_image() except: if(self.write_thumb): #raise OSError('Failed to extract cover!') try: if sys.frozen or sys.importers: butler_dir = os.path.dirname(sys.executable) except AttributeError: butler_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) path_to_cover = butler_dir + "\\default_banner.jpeg" ready_cover = Image.open(path_to_cover) if self.cover_size =='pw': ready_cover = ready_cover.resize((217, 330), Image.ANTIALIAS) else: ready_cover = ready_cover.resize((330, 470), Image.ANTIALIAS) ready_cover = ready_cover.convert('L') self.txt2defaultcover(self.title, self.author, self.seqnumber, ready_cover, self.position ) else: # extract cover try: # ready_cover = self.get_cover_image() extractcover_34.extractThumbnail(self.path, "tmpdir.$$$"); shutil.rmtree("tmpdir.$$$") coverfile = 'images.$$$' + '\\' + self.infilename +'.cover' + '.jpeg' ready_cover = Image.open(coverfile) ready_cover = ready_cover.resize((217, 330), Image.ANTIALIAS) ready_cover = ready_cover.convert('L') self.txt2img(self.title, self.seqnumber, ready_cover, self.position) shutil.rmtree("images.$$$") except: if(self.write_thumb): #shutil.rmtree("images.$$$") #raise OSError('Failed to extract cover!') try: if sys.frozen or sys.importers: butler_dir = os.path.dirname(sys.executable) except AttributeError: butler_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) path_to_cover = butler_dir + "\\default_banner.jpeg" ready_cover = Image.open(path_to_cover) if self.cover_size =='pw': ready_cover = ready_cover.resize((217, 330), Image.ANTIALIAS) else: ready_cover = ready_cover.resize((330, 470), Image.ANTIALIAS) ready_cover = ready_cover.convert('L') self.txt2defaultcover(self.title, self.author, self.seqnumber, ready_cover, self.position) if self.kindle.ssh: tmp_cover = os.path.join(gettempdir(), 'KindleButlerCover') ready_cover.save(tmp_cover, 'JPEG') ssh = Popen('"' + self.config['SSH']['PSCPPath'] + '" "' + tmp_cover + '" root@' + self.kindle.path + ':/mnt/us/system/thumbnails/thumbnail_' + self.asin + '_EBOK_portrait.jpg', stdout=PIPE, stderr=STDOUT, shell=True) ssh_check = ssh.wait() if ssh_check != 0: raise OSError('Failed to upload cover!') os.remove(tmp_cover) else: if(self.write_thumb): if(cloud=='no'): ready_cover.save(os.path.join(self.kindle.path, 'system', 'thumbnails', 'thumbnail_' + self.asin + '_EBOK_portrait.jpg'), 'JPEG') else: # get ASIN from file section = KindleUnpack.Sectionizer(self.path) mhlst = [KindleUnpack.MobiHeader(section, 0)] mh = mhlst[0] metadata = mh.getmetadata() assa = metadata.get('ASIN') assassin = assa[0].decode("utf-8") if assassin==None: assassin='None' ready_cover.save(os.path.join(self.kindle.path, 'system', 'thumbnails', 'thumbnail_' + assassin + '_PDOC_portrait.jpg'), 'JPEG') # for all modes prepare processed file try: # noinspection PyArgumentList ready_file = DualMetaFix.DualMobiMetaFix(self.path, bytes(self.asin,'UTF-8'),cloud) except: raise OSError('E-Book modification failed!') ready_file, source_size = ready_file.getresult() # save processed file to reader if(self.mode=='reader'): if source_size < self.kindle.get_free_space(): if self.kindle.ssh: tmp_book = os.path.join(gettempdir(), os.path.basename(self.path)) open(tmp_book, 'wb').write(ready_file.getvalue()) ssh = Popen('"' + self.config['SSH']['PSCPPath'] + '" "' + tmp_book + '" root@' + self.kindle.path + ':/mnt/us/documents/', stdout=PIPE, stderr=STDOUT, shell=True) for line in ssh.stdout: for inside_line in line.split(b'\r'): if b'|' in inside_line: inside_line = inside_line.decode('utf-8').split(' | ')[-1].rstrip()[:-1] self.progressbar['value'] = int(inside_line) ssh_check = ssh.wait() os.remove(tmp_book) if ssh_check != 0: raise OSError('Failed to upload E-Book!') Popen('"' + self.config['SSH']['PLinkPath'] + '" root@' + self.kindle.path + ' "dbus-send --system /default com.lab126.powerd.resuming int32:1"', stdout=PIPE, stderr=STDOUT, shell=True) else: if cloud=='no': saved = 0 if directory == None: target = open(os.path.join(self.kindle.path, 'documents', os.path.basename(self.path)), 'wb') else: new_dir = self.kindle.path + 'documents' + '\\' + directory self.make_sure_path_exists(new_dir) target = open(new_dir + '\\' + os.path.basename(self.path), 'wb') # target = open(os.path.join(self.kindle.path, 'documents' + '\\directory', os.path.basename(self.path)), 'wb') while True: chunk = ready_file.read(32768) if not chunk: break target.write(chunk) saved += len(chunk) self.progressbar['value'] = int((saved/source_size)*100) else: raise OSError('Not enough space on target device!') # save cover and processed book to pc if(self.mode=='pc'): # prepare and save cover # if key -a asin then self.write_thumb=False and no need to save cover if cover != '': # means that cover was imported from external file try: ready_cover = Image.open(cover) if self.cover_size =='pw': ready_cover = ready_cover.resize((217, 330), Image.ANTIALIAS) else: ready_cover = ready_cover.resize((330, 470), Image.ANTIALIAS) ready_cover = ready_cover.convert('L') self.txt2img(self.title, self.seqnumber, ready_cover, self.position) except: raise OSError('Failed to load custom cover!') else: # search/extract cover from inside the book if(getcover=='search'): #search for cover try: ready_cover = self.get_cover_image() except: if(self.write_thumb): #raise OSError('Failed to extract cover!') self.no_cover = True try: if sys.frozen or sys.importers: butler_dir = os.path.dirname(sys.executable) except AttributeError: butler_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) path_to_cover = butler_dir + "\\default_banner.jpeg" ready_cover = Image.open(path_to_cover) if self.cover_size =='pw': ready_cover = ready_cover.resize((217, 330), Image.ANTIALIAS) else: ready_cover = ready_cover.resize((330, 470), Image.ANTIALIAS) ready_cover = ready_cover.convert('L') self.txt2defaultcover(self.title, self.author, self.seqnumber, ready_cover, self.position) else: #extract the cover from the book try: # ready_cover = self.get_cover_image() extractcover_34.extractThumbnail(self.path, "tmpdir.$$$"); shutil.rmtree("tmpdir.$$$") coverfile = 'images.$$$' + '\\' + self.infilename +'.cover' + '.jpeg' ready_cover = Image.open(coverfile) if self.cover_size =='pw': ready_cover = ready_cover.resize((217, 330), Image.ANTIALIAS) else: ready_cover = ready_cover.resize((330, 470), Image.ANTIALIAS) ready_cover = ready_cover.convert('L') self.txt2img(self.title, self.seqnumber, ready_cover, self.position) except: if(self.write_thumb): # shutil.rmtree("images.$$$") #raise OSError('Failed to extract cover!') self.no_cover = True try: if sys.frozen or sys.importers: butler_dir = os.path.dirname(sys.executable) except AttributeError: butler_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) path_to_cover = butler_dir + "\\default_banner.jpeg" ready_cover = Image.open(path_to_cover) if self.cover_size =='pw': ready_cover = ready_cover.resize((217, 330), Image.ANTIALIAS) else: ready_cover = ready_cover.resize((330, 470), Image.ANTIALIAS) ready_cover = ready_cover.convert('L') self.txt2defaultcover(self.title, self.author, self.seqnumber, ready_cover, self.position) if(self.write_thumb): if cloud=='no': ready_cover.save('thumbnail_' + self.asin + '_EBOK_portrait.jpg', 'JPEG') else: # get ASIN from file section = KindleUnpack.Sectionizer(self.path) mhlst = [KindleUnpack.MobiHeader(section, 0)] mh = mhlst[0] metadata = mh.getmetadata() assa = metadata.get('ASIN') assassin = assa[0].decode("utf-8") if assassin==None: assassin='None' ready_cover.save('thumbnail_' + assassin + '_EBOK_portrait.jpg', 'JPEG') if getcover !='search' and cover == '' and self.no_cover == False: shutil.rmtree("images.$$$") if cloud=='no': #save processed file saved = 0 # ready_file.seek(0) target = open(self.infilename + '.processed' + self.infileext, 'wb') while True: chunk = ready_file.read(32768) if not chunk: break target.write(chunk) saved += len(chunk) self.progressbar['value'] = int((saved/source_size)*100)
def _execute(self, cmd, stdin, meta=None, callback=None): """ The "real" execute function. Just like executing a command in the shell on the compute node. Do not use directly. Call the execute() wrapper instead. """ request = self.request p = Popen(cmd, shell=True, bufsize=0, close_fds=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, preexec_fn=os.setsid) exec_time = datetime.utcnow() try: stdout, stderr = p.communicate(input=stdin) except (Terminated, KeyboardInterrupt, SystemExit) as exc: # This is mainly used for fetching SIGTERM # The SIGTERM signal will be caught here as Terminated exception and the SIGKILL will never be caught here. sig = _exc_signal(exc) logger.error( 'Task %s received %r exception -> sending signal %d to %d', request.id, exc, sig, p.pid) try: os.killpg(p.pid, sig) # Send signal to process group except OSError: pass try: p.send_signal(sig) # Send signal to process and wait p.wait() except (OSError, NoSuchProcess): pass raise exc finish_time = datetime.utcnow() if meta is None: meta = {} elif meta: if 'replace_text' in meta: for i in meta['replace_text']: stdout = stdout.replace(i[0], i[1]) stderr = stderr.replace(i[0], i[1]) del meta['replace_text'] if 'replace_stdout' in meta: for i in meta['replace_stdout']: stdout = stdout.replace(i[0], i[1]) del meta['replace_stdout'] if 'replace_stderr' in meta: for i in meta['replace_stderr']: stderr = stderr.replace(i[0], i[1]) del meta['replace_stderr'] if 'compress_stdout' in meta: stdout = compress(stdout) del meta['compress_stdout'] if 'compress_stderr' in meta: stderr = compress(stderr) del meta['compress_stderr'] if 'encode_stdout' in meta: stdout = b64encode(stdout) del meta['encode_stdout'] if 'encode_stderr' in meta: stderr = b64encode(stderr) del meta['encode_stderr'] meta['exec_time'] = exec_time.isoformat() meta['finish_time'] = finish_time.isoformat() if 'output' in meta: result = meta.pop('output', {}) result['meta'] = meta _stdout = result.pop('stdout', None) if _stdout: result[_stdout] = stdout.strip() _stderr = result.pop('stderr', None) if _stderr: result[_stderr] = stderr.strip() _returncode = result.pop('returncode', None) if _returncode: result[_returncode] = p.returncode else: result = { 'returncode': p.returncode, 'stdout': stdout, 'stderr': stderr, 'meta': meta, } # Implicit logging if no callback is specified # Use callback=False to disable automatic logging if callback is None: callback = [LOGTASK, meta, None] if callback: nolog = meta.get('nolog', False) cb_name = callback[0] cb_kwargs = {} cb_expire = None if len(callback) > 1: cb_kwargs = callback[1] if len(callback) > 2: cb_expire = callback[2] t = send_task_forever(request.id, cb_name, nolog=nolog, args=(result, request.id), kwargs=cb_kwargs, queue=Q_MGMT, expires=cb_expire, task_id=task_id_from_task_id(request.id)) result['meta']['cb_name'] = cb_name result['meta']['callback'] = t.id # Do not run emergency callback in after_return self.all_done = True return result
def check_cmd_output(cmd): proc = Popen(cmd, stdout=PIPE, shell=True) exit_code = proc.wait(timeout=5) if exit_code != 0: raise ValueError(u"No HDHR device found") return proc.stdout
class Run(object): """Class to handle processes. :ivar cmds: The ``cmds`` argument passed to the __init__ method (a command line passed in a list, or a list of command lines passed as a list of list). :ivar status: The exit status. As the exit status is only meaningful after the process has exited, its initial value is None. When a problem running the command is detected and a process does not get created, its value gets set to the special value 127. :ivar out: process standard output (if instanciated with output = PIPE) :ivar err: same as out but for standard error :ivar pid: PID. Set to -1 if the command failed to run. """ def __init__(self, cmds, cwd=None, output=PIPE, error=STDOUT, input=None, bg=False, timeout=None, env=None, set_sigpipe=True, parse_shebang=False, ignore_environ=True): """Spawn a process. :param cmds: two possibilities: 1) a command line: a tool name and its arguments, passed in a list. e.g. ['ls', '-a', '.'] 2) a list of command lines (as defined in (1)): the different commands will be piped. This means that [['ps', '-a'], ['grep', 'vxsim']] will be equivalent to the system command line 'ps -a | grep vxsim'. :type cmds: list[str] | list[list[str]] :param cwd: directory in which the process should be executed (string or None). If None then current directory is used :type cwd: str | None :param output: can be PIPE (default), a filename string, a fd on an already opened file, a python file object or None (for stdout). :type output: int | str | file | None :param error: same as output or STDOUT, which indicates that the stderr data from the applications should be captured into the same file handle as for stdout. :type error: int | str | file | None :param input: same as output :type input: int | str | file | None :param bg: if True then run in background :type bg: bool :param timeout: limit execution time (in seconds), None means unlimited :type timeout: int | None :param env: dictionary for environment variables (e.g. os.environ) :type env: dict :param set_sigpipe: reset SIGPIPE handler to default value :type set_sigpipe: bool :param parse_shebang: take the #! interpreter line into account :type parse_shebang: bool :param ignore_environ: Applies only when env parameter is not None. When set to True (the default), the only environment variables passed to the program are the ones provided by the env parameter. Otherwise, the environment passed to the program consists of the environment variables currently defined (os.environ) augmented by the ones provided in env. :type ignore_environ: bool :raise OSError: when trying to execute a non-existent file. If you specify a filename for output or stderr then file content is reseted (equiv. to > in shell). If you prepend the filename with '+' then the file will be opened in append mode (equiv. to >> in shell) If you prepend the input with '|', then the content of input string will be used for process stdin. """ def add_interpreter_command(cmd_line): """Add the interpreter defined in the #! line to cmd_line. If the #! line cannot be parsed, just return the cmd_line unchanged On windows, /usr/bin/env will be ignored to avoid a dependency on cygwin and /bin/bash & /bin/sh are replaced by $SHELL if defined. :param cmd_line: command line :type cmd_line: list[str] """ if not parse_shebang: # nothing to do return cmd_line prog = which(cmd_line[0], default=None) if prog is None: # Not found. Do not modify the command line return cmd_line with open(prog) as f: try: header = f.read()[0:2] except UnicodeDecodeError: # py3-only # unknown header - cannot decode the first two bytes return cmd_line if header != "#!": # Unknown header return cmd_line # Header found, get the interpreter command in the first line f.seek(0) line = f.readline() interpreter_cmds = [ l.strip() for l in line[line.find('!') + 1:].split() ] # Pass the program path to the interpreter if len(cmd_line) > 1: cmd_line = [prog] + list(cmd_line[1:]) else: cmd_line = [prog] if sys.platform == 'win32': # unix: no cover if interpreter_cmds[0] == '/usr/bin/env': return interpreter_cmds[1:] + cmd_line elif interpreter_cmds[0] in ('/bin/bash', '/bin/sh') and \ 'SHELL' in os.environ: return [os.environ['SHELL']] + cmd_line return interpreter_cmds + cmd_line # First resolve output, error and input self.input_file = File(input, 'r') self.output_file = File(output, 'w') self.error_file = File(error, 'w') self.status = None self.out = '' self.err = '' self.cmds = [] if env is not None and not ignore_environ: # ignore_environ is False, so get a copy of the current # environment and update it with the env dictionnary. tmp = os.environ.copy() tmp.update(env) env = tmp rlimit_args = [] if timeout is not None: rlimit = get_rlimit() if os.path.exists(rlimit): rlimit_args = [rlimit, '%d' % timeout] else: logger.warning('cannot find rlimit at %s', rlimit) rlimit_args = [] try: if isinstance(cmds[0], basestring): self.cmds = rlimit_args + list(add_interpreter_command(cmds)) else: self.cmds = [add_interpreter_command(c) for c in cmds] self.cmds[0] = rlimit_args + list(self.cmds[0]) cmdlogger.debug('Run: cd %s; %s', cwd if cwd is not None else os.getcwd(), self.command_line_image()) if isinstance(cmds[0], basestring): popen_args = { 'stdin': self.input_file.fd, 'stdout': self.output_file.fd, 'stderr': self.error_file.fd, 'cwd': cwd, 'env': env, 'universal_newlines': True } if sys.platform != 'win32' and \ set_sigpipe: # windows: no cover # preexec_fn is no supported on windows popen_args['preexec_fn'] = subprocess_setup if WIN_NEW_PG and sys.platform == 'win32': popen_args['creationflags'] = \ subprocess.CREATE_NEW_PROCESS_GROUP self.internal = Popen(self.cmds, **popen_args) else: runs = [] for index, cmd in enumerate(self.cmds): if index == 0: stdin = self.input_file.fd else: stdin = runs[index - 1].stdout # When connecting two processes using a Pipe don't use # universal_newlines mode. Indeed commands transmitting # binary data between them will crash # (e.g. gzip -dc foo.txt | tar -xf -) if index == len(self.cmds) - 1: stdout = self.output_file.fd txt_mode = True else: stdout = subprocess.PIPE txt_mode = False popen_args = { 'stdin': stdin, 'stdout': stdout, 'stderr': self.error_file.fd, 'cwd': cwd, 'env': env, 'universal_newlines': txt_mode } if sys.platform != 'win32' and \ set_sigpipe: # windows: no cover # preexec_fn is no supported on windows popen_args['preexec_fn'] = subprocess_setup if WIN_NEW_PG and sys.platform == 'win32': popen_args['creationflags'] = \ subprocess.CREATE_NEW_PROCESS_GROUP try: runs.append(Popen(cmd, **popen_args)) except OSError: logger.error('error when spawning %s', cmd) # We have an error (e.g. file not found), try to kill # all processes already started. for p in runs: p.terminate() raise self.internal = runs[-1] except Exception as e: # defensive code self.__error(e, self.cmds) raise self.pid = self.internal.pid if not bg: self.wait() def command_line_image(self): """Get shell command line image of the spawned command(s). :rtype: str This just a convenient wrapper around the function of the same name. """ return command_line_image(self.cmds) def close_files(self): """Close all file descriptors.""" self.output_file.close() self.error_file.close() self.input_file.close() def __error(self, error, cmds): """Set pid to -1 and status to 127 before closing files.""" self.close_files() logger.error(error) def not_found(path): """Raise OSError. :param path: path of the executable :type path: str """ logger.error("%s not found", path) e3.log.debug('PATH=%s', os.environ['PATH']) raise OSError(errno.ENOENT, 'No such file or directory, %s not found' % path) # Try to send an helpful message if one of the executable has not # been found. if isinstance(cmds[0], basestring): if which(cmds[0], default=None) is None: not_found(cmds[0]) else: for cmd in cmds: if which(cmd[0], default=None) is None: not_found(cmd[0]) def wait(self): """Wait until process ends and return its status. :return: exit code of the process :rtype: int """ if self.status is not None: # Wait has already been called return self.status # If there is no pipe in the loop then just do a wait. Otherwise # in order to avoid blocked processes due to full pipes, use # communicate. if self.output_file.fd != subprocess.PIPE and \ self.error_file.fd != subprocess.PIPE and \ self.input_file.fd != subprocess.PIPE: self.status = self.internal.wait() else: tmp_input = None if self.input_file.fd == subprocess.PIPE: tmp_input = self.input_file.get_command() (self.out, self.err) = self.internal.communicate(tmp_input) self.status = self.internal.returncode self.close_files() return self.status def poll(self): """Check the process status and set self.status if available. This method checks whether the underlying process has exited or not. If it hasn't, then it just returns None immediately. Otherwise, it stores the process' exit code in self.status and then returns it. :return: None if the process is still alive; otherwise, returns the process exit status. :rtype: int | None """ if self.status is not None: # Process is already terminated and wait been called return self.status result = self.internal.poll() if result is not None: # Process is finished, call wait to finalize it (closing handles, # ...) return self.wait() else: return None def kill(self, recursive=True, timeout=3): """Kill the process. :param recursive: if True, try to kill the complete process tree :type recursive: bool :param timeout: wait timeout (in seconds) after sending the kill signal (when recursive=True) :type timeout: int """ if recursive: kill_process_tree(self.internal, timeout=timeout) else: self.internal.kill() def interrupt(self): """Send SIGINT to the process, kill on Windows.""" if sys.platform == 'win32': self.kill() # Ctrl-C event is unreliable on Windows else: self.internal.send_signal(signal.SIGINT) def is_running(self): """Check whether the process is running. :rtype: bool """ if psutil is None: # defensive code # psutil not imported, use our is_running function return is_running(self.pid) else: return self.internal.is_running() def children(self): """Return list of child processes (using psutil). :rtype: list[psutil.Process] """ if psutil is None: # defensive code raise NotImplementedError('Run.children() require psutil') return self.internal.children()
class McStasImage(ImageChannelMixin, PassiveChannel): """Image channel based on McStas simulation. This channel should be used together with `McStasTimerChannel` which provides the preselection [s] for calculating the number of simulated neutron counts: Ncounts = preselection [s] * ratio [cts/s] Note: Please configure **ratio** to match the average simulated neutron counts per second on your system. """ _mythread = None _process = None _started = None parameters = { 'size': Param( 'Detector size in pixels (x, y)', settable=False, type=tupleof(intrange(1, 8192), intrange(1, 8192)), default=(1, 1), ), 'mcstasprog': Param('Name of the McStas simulation executable', type=str, settable=False), 'mcstasdir': Param('Directory where McStas stores results', type=str, default='%(session.experiment.dataroot)s' '/singlecount', settable=False), 'mcstasfile': Param('Name of the McStas data file', type=str, settable=False), 'mcsiminfo': Param('Name for the McStas Siminfo file', settable=False, type=str, default='mccode.sim'), 'ratio': Param( 'Simulated neutrons per second for this machine. Please' ' tune this parameter according to your hardware for ' ' realistic count times', settable=False, type=floatrange(1e3), default=1e6), 'ci': Param('Constant ci multiplied with simulated intensity I', settable=False, type=floatrange(1.)), # preselection time, usually set by McStasTimer 'preselection': Param('Preset value for this channel', type=float, settable=True, default=1.), } def doInit(self, mode): self.arraydesc = ArrayDesc(self.name, self.size, '<u4') self._workdir = os.getcwd() self._start_time = None def doReadArray(self, quality): self.log.debug('quality: %s', quality) if quality == LIVE: self._send_signal(SIGUSR2) elif quality == FINAL: if self._mythread and self._mythread.is_alive(): self._mythread.join(1.) if self._mythread.is_alive(): self.log.exception("Couldn't join readout thread.") else: self._mythread = None self._readpsd(quality) return self._buf def _prepare_params(self): """Return a list of key=value strings. Each entry defines a parameter setting for the mcstas simulation call. examples: param=10 """ raise NotImplementedError('Please implement _prepare_params method') def doPrepare(self): self._mcstas_params = ' '.join(self._prepare_params()) self.log.debug('McStas parameters: %s', self._mcstas_params) self._buf = np.zeros(self.size[::-1]) self.readresult = [0] self._start_time = None self._mcstasdirpath = session.experiment.data.expandNameTemplates( self.mcstasdir)[0] def valueInfo(self): return (Value(self.name + '.sum', unit='cts', type='counter', errors='sqrt', fmtstr='%d'), ) def doStart(self): self._started = True self._mythread = createThread('detector %s' % self, self._run) def doStatus(self, maxage=0): if self._started or (self._mythread and self._mythread.is_alive()): return status.BUSY, 'busy' return status.OK, 'idle' def doFinish(self): self.log.debug('finish') self._started = None self._send_signal(SIGTERM) def _send_signal(self, sig): if self._process and self._process.is_running(): self._process.send_signal(sig) # wait for mcstas releasing fds datafile = path.join(self._mcstasdirpath, self.mcstasfile) siminfo = path.join(self._mcstasdirpath, self.mcsiminfo) try: while self._process and self._process.is_running(): fnames = [f.path for f in self._process.open_files()] if siminfo not in fnames and datafile not in fnames: break session.delay(.01) except (AccessDenied, NoSuchProcess): self.log.debug( 'McStas process already terminated in _send_signal(%r)', sig) self.log.debug('McStas process has written file on signal (%r)', sig) def _run(self): """Run McStas simulation executable. The current settings of the instrument parameters will be transferred to it. """ try: shutil.rmtree(self._mcstasdirpath) except OSError: self.log.warning('could not remove old data') command = '%s -n %d -d %s %s' % ( self.mcstasprog, self.ratio * self.preselection, self._mcstasdirpath, self._mcstas_params, ) self.log.debug('run %s', command) try: self._start_time = time.time() self._process = Popen(command.split(), stdout=PIPE, stderr=PIPE, cwd=self._workdir) out, err = self._process.communicate() if out: self.log.debug('McStas output:') for line in out.splitlines(): self.log.debug('[McStas] %s', line.decode('utf-8', 'ignore')) if err: self.log.warning('McStas found some problems:') for line in err.splitlines(): self.log.warning('[McStas] %s', line.decode('utf-8', 'ignore')) except OSError as e: self.log.error('Execution failed: %s', e) if self._process: self._process.wait() self._process = None self._started = None def _readpsd(self, quality): try: with open(path.join(self._mcstasdirpath, self.mcstasfile), 'r') as f: lines = f.readlines()[-3 * (self.size[0] + 1):] if lines[0].startswith('# Data') and self.mcstasfile in lines[0]: if quality == FINAL: seconds = self.preselection else: seconds = min(time.time() - self._start_time, self.preselection) self._buf = ( np.loadtxt(lines[1:self.size[0] + 1], dtype=np.float32) * self.ci * seconds).astype(np.uint32) self.readresult = [self._buf.sum()] elif quality != LIVE: raise OSError('Did not find start line: %s' % lines[0]) except OSError: if quality != LIVE: self.log.exception('Could not read result file')
class Process(object): """Wraps a process. Options: - **wid**: the process unique identifier. This value will be used to replace the *$WID* string in the command line if present. - **cmd**: the command to run. May contain any of the variables available that are being passed to this class. They will be replaced using the python format syntax. - **args**: the arguments for the command to run. Can be a list or a string. If **args** is a string, it's splitted using :func:`shlex.split`. Defaults to None. - **executable**: When executable is given, the first item in the args sequence obtained from **cmd** is still treated by most programs as the command name, which can then be different from the actual executable name. It becomes the display name for the executing program in utilities such as **ps**. - **working_dir**: the working directory to run the command in. If not provided, will default to the current working directory. - **shell**: if *True*, will run the command in the shell environment. *False* by default. **warning: this is a security hazard**. - **uid**: if given, is the user id or name the command should run with. The current uid is the default. - **gid**: if given, is the group id or name the command should run with. The current gid is the default. - **env**: a mapping containing the environment variables the command will run with. Optional. - **rlimits**: a mapping containing rlimit names and values that will be set before the command runs. - **use_fds**: if True, will not close the fds in the subprocess. Must be be set to True on Windows if stdout or stderr are redirected. default: False. - **pipe_stdout**: if True, will open a PIPE on stdout. default: True. - **pipe_stderr**: if True, will open a PIPE on stderr. default: True. - **close_child_stdin**: If True, redirects the child process' stdin to /dev/null after the fork. default: True. - **close_child_stdout**: If True, redirects the child process' stdout to /dev/null after the fork. default: False. - **close_child_stderr**: If True, redirects the child process' stdout to /dev/null after the fork. default: False. """ def __init__(self, name, wid, cmd, args=None, working_dir=None, shell=False, uid=None, gid=None, env=None, rlimits=None, executable=None, use_fds=False, watcher=None, spawn=True, pipe_stdout=True, pipe_stderr=True, close_child_stdin=True, close_child_stdout=False, close_child_stderr=False): self.name = name self.wid = wid self.cmd = cmd self.args = args self.working_dir = working_dir or get_working_dir() self.shell = shell if uid: self.uid = to_uid(uid) self.username = get_username_from_uid(self.uid) else: self.username = None self.uid = None self.gid = to_gid(gid) if gid else None self.env = env or {} self.rlimits = rlimits or {} self.executable = executable self.use_fds = use_fds self.watcher = watcher self.pipe_stdout = pipe_stdout self.pipe_stderr = pipe_stderr self.close_child_stdin = close_child_stdin self.close_child_stdout = close_child_stdout self.close_child_stderr = close_child_stderr self.stopping = False # sockets created before fork, should be let go after. self._sockets = [] self._worker = None self.redirected = False self.started = 0 if self.uid is not None and self.gid is None: self.gid = get_default_gid(self.uid) if IS_WINDOWS: if not self.use_fds and (self.pipe_stderr or self.pipe_stdout): raise ValueError("On Windows, you can't close the fds if " "you are redirecting stdout or stderr") if spawn: self.spawn() def _null_streams(self, streams): devnull = os.open(os.devnull, os.O_RDWR) try: for stream in streams: if not hasattr(stream, 'fileno'): # we're probably dealing with a file-like continue try: stream.flush() os.dup2(devnull, stream.fileno()) except IOError: # some streams, like stdin - might be already closed. pass finally: os.close(devnull) def _get_sockets_fds(self): """Returns sockets dict. If this worker's cmd indicates use of a SO_REUSEPORT socket, a new socket is created and bound. This new socket's FD replaces original socket's FD in returned dict. This method populates `self._sockets` list. This list should be let go after `fork()`. """ sockets_fds = None if self.watcher is not None and self.watcher.sockets is not None: sockets_fds = self.watcher._get_sockets_fds() reuseport_sockets = tuple((sn, s) for (sn, s) in self.watcher.sockets.items() if s.so_reuseport) for sn, s in reuseport_sockets: # watcher.cmd uses this reuseport socket if 'circus.sockets.%s' % sn in self.watcher.cmd: sock = CircusSocket.load_from_config(s._cfg) sock.bind_and_listen() # replace original socket's fd sockets_fds[sn] = sock.fileno() # keep new socket until fork returns self._sockets.append(sock) return sockets_fds def spawn(self): self.started = time.time() sockets_fds = self._get_sockets_fds() args = self.format_args(sockets_fds=sockets_fds) def preexec(): streams = [] if self.close_child_stdin: streams.append(sys.stdin) if self.close_child_stdout: streams.append(sys.stdout) if self.close_child_stderr: streams.append(sys.stderr) self._null_streams(streams) os.setsid() if resource: for limit, value in self.rlimits.items(): res = getattr( resource, 'RLIMIT_%s' % limit.upper(), None ) if res is None: raise ValueError('unknown rlimit "%s"' % limit) # TODO(petef): support hard/soft limits # for the NOFILE limit, if we fail to set an unlimited # value then check the existing hard limit because we # probably can't bypass it due to a kernel limit - so just # assume that the caller means they want to use the kernel # limit when they pass the unlimited value. This is better # than failing to start the process and forcing the caller # to always be aware of what the kernel configuration is. # If they do pass in a real limit value, then we'll just # raise the failure as they should know that their # expectations couldn't be met. # TODO - we can't log here as this occurs in the child # process after the fork but it would be very good to # notify the admin of the situation somehow. retry = False try: resource.setrlimit(res, (value, value)) except ValueError: if res == resource.RLIMIT_NOFILE and \ value == resource.RLIM_INFINITY: _soft, value = resource.getrlimit(res) retry = True else: raise if retry: resource.setrlimit(res, (value, value)) if self.gid: try: os.setgid(self.gid) except OverflowError: if not ctypes: raise # versions of python < 2.6.2 don't manage unsigned int for # groups like on osx or fedora os.setgid(-ctypes.c_int(-self.gid).value) if self.username is not None: try: os.initgroups(self.username, self.gid) except (OSError, AttributeError): # not support on Mac or 2.6 pass if self.uid: os.setuid(self.uid) if IS_WINDOWS: # On Windows we can't use a pre-exec function preexec_fn = None else: preexec_fn = preexec extra = {} if self.pipe_stdout: extra['stdout'] = PIPE if self.pipe_stderr: extra['stderr'] = PIPE self._worker = Popen(args, cwd=self.working_dir, shell=self.shell, preexec_fn=preexec_fn, env=self.env, close_fds=not self.use_fds, executable=self.executable, **extra) # let go of sockets created only for self._worker to inherit self._sockets = [] def format_args(self, sockets_fds=None): """ It's possible to use environment variables and some other variables that are available in this context, when spawning the processes. """ logger.debug('cmd: ' + bytestring(self.cmd)) logger.debug('args: ' + str(self.args)) current_env = ObjectDict(self.env.copy()) format_kwargs = { 'wid': self.wid, 'shell': self.shell, 'args': self.args, 'env': current_env, 'working_dir': self.working_dir, 'uid': self.uid, 'gid': self.gid, 'rlimits': self.rlimits, 'executable': self.executable, 'use_fds': self.use_fds} if sockets_fds is not None: format_kwargs['sockets'] = sockets_fds if self.watcher is not None: for option in self.watcher.optnames: if option not in format_kwargs\ and hasattr(self.watcher, option): format_kwargs[option] = getattr(self.watcher, option) cmd = replace_gnu_args(self.cmd, **format_kwargs) if '$WID' in cmd or (self.args and '$WID' in self.args): msg = "Using $WID in the command is deprecated. You should use "\ "the python string format instead. In your case, this "\ "means replacing the $WID in your command by $(WID)." warnings.warn(msg, DeprecationWarning) self.cmd = cmd.replace('$WID', str(self.wid)) if self.args is not None: if isinstance(self.args, string_types): args = shlex.split(bytestring(replace_gnu_args( self.args, **format_kwargs))) else: args = [bytestring(replace_gnu_args(arg, **format_kwargs)) for arg in self.args] args = shlex.split(bytestring(cmd), posix=not IS_WINDOWS) + args else: args = shlex.split(bytestring(cmd), posix=not IS_WINDOWS) if self.shell: # subprocess.Popen(shell=True) implies that 1st arg is the # requested command, remaining args are applied to sh. args = [' '.join(quote(arg) for arg in args)] shell_args = format_kwargs.get('shell_args', None) if shell_args and IS_WINDOWS: logger.warn("shell_args won't apply for " "windows platforms: %s", shell_args) elif isinstance(shell_args, string_types): args += shlex.split(bytestring(replace_gnu_args( shell_args, **format_kwargs))) elif shell_args: args += [bytestring(replace_gnu_args(arg, **format_kwargs)) for arg in shell_args] elif format_kwargs.get('shell_args', False): logger.warn("shell_args is defined but won't be used " "in this context: %s", format_kwargs['shell_args']) logger.debug("process args: %s", args) return args def returncode(self): return self._worker.returncode @debuglog def poll(self): return self._worker.poll() @debuglog def is_alive(self): return self.poll() is None @debuglog def send_signal(self, sig): """Sends a signal **sig** to the process.""" logger.debug("sending signal %s to %s" % (sig, self.pid)) return self._worker.send_signal(sig) @debuglog def stop(self): """Stop the process and close stdout/stderr If the corresponding process is still here (normally it's already killed by the watcher), a SIGTERM is sent, then a SIGKILL after 1 second. The shutdown process (SIGTERM then SIGKILL) is normally taken by the watcher. So if the process is still there here, it's a kind of bad behavior because the graceful timeout won't be respected here. """ try: try: if self.is_alive(): try: return self._worker.terminate() except AccessDenied: # It can happen on Windows if the process # dies after poll returns (unlikely) pass finally: self.close_output_channels() except NoSuchProcess: pass def close_output_channels(self): if self._worker.stderr is not None: self._worker.stderr.close() if self._worker.stdout is not None: self._worker.stdout.close() def wait(self, timeout=None): """ Wait for the process to terminate, in the fashion of waitpid. Accepts a timeout in seconds. """ self._worker.wait(timeout) def age(self): """Return the age of the process in seconds.""" return time.time() - self.started def info(self): """Return process info. The info returned is a mapping with these keys: - **mem_info1**: Resident Set Size Memory in bytes (RSS) - **mem_info2**: Virtual Memory Size in bytes (VMS). - **cpu**: % of cpu usage. - **mem**: % of memory usage. - **ctime**: process CPU (user + system) time in seconds. - **pid**: process id. - **username**: user name that owns the process. - **nice**: process niceness (between -20 and 20) - **cmdline**: the command line the process was run with. """ try: info = get_info(self._worker) except NoSuchProcess: return "No such process (stopped?)" info["age"] = self.age() info["started"] = self.started info["children"] = [] info['wid'] = self.wid for child in get_children(self._worker): info["children"].append(get_info(child)) return info def children(self): """Return a list of children pids.""" return [child.pid for child in get_children(self._worker)] def is_child(self, pid): """Return True is the given *pid* is a child of that process.""" pids = [child.pid for child in get_children(self._worker)] if pid in pids: return True return False @debuglog def send_signal_child(self, pid, signum): """Send signal *signum* to child *pid*.""" children = dict((child.pid, child) for child in get_children(self._worker)) try: children[pid].send_signal(signum) except KeyError: raise NoSuchProcess(pid) @debuglog def send_signal_children(self, signum, recursive=False): """Send signal *signum* to all children.""" for child in get_children(self._worker, recursive): try: child.send_signal(signum) except OSError as e: if e.errno != errno.ESRCH: raise @property def status(self): """Return the process status as a constant - RUNNING - DEAD_OR_ZOMBIE - UNEXISTING - OTHER """ try: if get_status(self._worker) in (STATUS_ZOMBIE, STATUS_DEAD): return DEAD_OR_ZOMBIE except NoSuchProcess: return UNEXISTING if self._worker.is_running(): return RUNNING return OTHER @property def pid(self): """Return the *pid*""" return self._worker.pid @property def stdout(self): """Return the *stdout* stream""" return self._worker.stdout @property def stderr(self): """Return the *stdout* stream""" return self._worker.stderr def __eq__(self, other): return self is other def __lt__(self, other): return self.started < other.started def __gt__(self, other): return self.started > other.started
default=0) parser.add_argument('--disable-plugin-list', help='plugins to disable by name, separate with commas', type=str, default='chat') parser.add_argument( '--store-plaintext', help= 'store plaintext blocks or not. note that encrypted blocks may not really be encrypted, but we cannot detect that', type=int, default=1) args = parser.parse_args() p = Popen([sub_script, 'version'], stdout=DEVNULL) p.wait() from filepaths import config_file, keys_file from coredb import blockmetadb import onionrcrypto with open(config_file, 'r') as cf: config = ujson.loads(cf.read()) if args.private_key: priv = args.private_key pub = onionrcrypto.cryptoutils.get_pub_key_from_priv(priv) with open(keys_file, "a") as f: f.write(',' + pub.decode() + ',' + priv) config['general']['public_key'] = pub config['plugins']['disabled'] = args.disable_plugin_list.split(',')
class Run(object): """Class to handle processes. :ivar cmds: The ``cmds`` argument passed to the __init__ method (a command line passed in a list, or a list of command lines passed as a list of list). :ivar status: The exit status. As the exit status is only meaningful after the process has exited, its initial value is None. When a problem running the command is detected and a process does not get created, its value gets set to the special value 127. :ivar out: process standard output (if instanciated with output = PIPE) :ivar err: same as out but for standard error :ivar pid: PID. Set to -1 if the command failed to run. """ def __init__(self, cmds, cwd=None, output=PIPE, error=STDOUT, input=None, bg=False, timeout=None, env=None, set_sigpipe=True, parse_shebang=False, ignore_environ=True, python_executable=sys.executable): """Spawn a process. :param cmds: two possibilities: 1) a command line: a tool name and its arguments, passed in a list. e.g. ['ls', '-a', '.'] 2) a list of command lines (as defined in (1)): the different commands will be piped. This means that [['ps', '-a'], ['grep', 'vxsim']] will be equivalent to the system command line 'ps -a | grep vxsim'. :type cmds: list[str] | list[list[str]] :param cwd: directory in which the process should be executed (string or None). If None then current directory is used :type cwd: str | None :param output: can be PIPE (default), a filename string, a fd on an already opened file, a python file object or None (for stdout). :type output: int | str | file | None :param error: same as output or STDOUT, which indicates that the stderr data from the applications should be captured into the same file handle as for stdout. :type error: int | str | file | None :param input: same as output :type input: int | str | file | None :param bg: if True then run in background :type bg: bool :param timeout: limit execution time (in seconds), None means unlimited :type timeout: int | None :param env: dictionary for environment variables (e.g. os.environ) :type env: dict :param set_sigpipe: reset SIGPIPE handler to default value :type set_sigpipe: bool :param parse_shebang: take the #! interpreter line into account :type parse_shebang: bool :param ignore_environ: Applies only when env parameter is not None. When set to True (the default), the only environment variables passed to the program are the ones provided by the env parameter. Otherwise, the environment passed to the program consists of the environment variables currently defined (os.environ) augmented by the ones provided in env. :type ignore_environ: bool :param python_executable: name or path to the python executable :type python_executable: str :raise OSError: when trying to execute a non-existent file. If you specify a filename for output or stderr then file content is reseted (equiv. to > in shell). If you prepend the filename with '+' then the file will be opened in append mode (equiv. to >> in shell) If you prepend the input with '|', then the content of input string will be used for process stdin. """ def add_interpreter_command(cmd_line): """Add the interpreter defined in the #! line to cmd_line. If the #! line cannot be parsed, just return the cmd_line unchanged If the interpreter command line contains /usr/bin/env python it will be replaced by the value of python_executable On windows, /usr/bin/env will be ignored to avoid a dependency on cygwin :param cmd_line: command line :type cmd_line: list[str] """ if not parse_shebang: # nothing to do return cmd_line prog = which(cmd_line[0], default=None) if prog is None: # Not found. Do not modify the command line return cmd_line with open(prog) as f: header = f.read()[0:2] if header != "#!": # Unknown header return cmd_line # Header found, get the interpreter command in the first line f.seek(0) line = f.readline() interpreter_cmds = [l.strip() for l in line[line.find('!') + 1:].split()] # Pass the program path to the interpreter if len(cmd_line) > 1: cmd_line = [prog] + list(cmd_line[1:]) else: cmd_line = [prog] # If the interpreter is '/usr/bin/env python', use # python_executable instead to keep the same python executable if interpreter_cmds[0:2] == ['/usr/bin/env', 'python']: if len(interpreter_cmds) > 2: return [python_executable] + \ interpreter_cmds[2:] + cmd_line else: return [python_executable] + cmd_line elif sys.platform == 'win32': # unix: no cover if interpreter_cmds[0] == '/usr/bin/env': return interpreter_cmds[1:] + cmd_line elif interpreter_cmds[0] in ('/bin/bash', '/bin/sh') and \ 'SHELL' in os.environ: return [os.environ['SHELL']] + cmd_line return interpreter_cmds + cmd_line # First resolve output, error and input self.input_file = File(input, 'r') self.output_file = File(output, 'w') self.error_file = File(error, 'w') self.status = None self.out = '' self.err = '' self.cmds = [] if env is not None and not ignore_environ: # ignore_environ is False, so get a copy of the current # environment and update it with the env dictionnary. tmp = os.environ.copy() tmp.update(env) env = tmp rlimit_args = [] if timeout is not None: rlimit = get_rlimit() if os.path.exists(rlimit): rlimit_args = [rlimit, '%d' % timeout] else: logger.warning('cannot find rlimit at %s', rlimit) rlimit_args = [] try: if isinstance(cmds[0], basestring): self.cmds = rlimit_args + list(add_interpreter_command(cmds)) else: self.cmds = [add_interpreter_command(c) for c in cmds] self.cmds[0] = rlimit_args + list(self.cmds[0]) cmdlogger.debug('Run: cd %s; %s' % ( cwd if cwd is not None else os.getcwd(), self.command_line_image())) if isinstance(cmds[0], basestring): popen_args = { 'stdin': self.input_file.fd, 'stdout': self.output_file.fd, 'stderr': self.error_file.fd, 'cwd': cwd, 'env': env, 'universal_newlines': True} if sys.platform != 'win32' and set_sigpipe: # preexec_fn is no supported on windows popen_args['preexec_fn'] = subprocess_setup self.internal = Popen(self.cmds, **popen_args) else: runs = [] for index, cmd in enumerate(self.cmds): if index == 0: stdin = self.input_file.fd else: stdin = runs[index - 1].stdout # When connecting two processes using a Pipe don't use # universal_newlines mode. Indeed commands transmitting # binary data between them will crash # (e.g. gzip -dc foo.txt | tar -xf -) if index == len(self.cmds) - 1: stdout = self.output_file.fd txt_mode = True else: stdout = subprocess.PIPE txt_mode = False popen_args = { 'stdin': stdin, 'stdout': stdout, 'stderr': self.error_file.fd, 'cwd': cwd, 'env': env, 'universal_newlines': txt_mode} if sys.platform != 'win32' and set_sigpipe: # preexec_fn is no supported on windows popen_args['preexec_fn'] = subprocess_setup try: runs.append(Popen(cmd, **popen_args)) except OSError as e: logger.error('error when spawning %s', cmd) # We have an error (e.g. file not found), try to kill # all processes already started. for p in runs: p.terminate() raise self.internal = runs[-1] except Exception as e: self.__error(e, self.cmds) raise self.pid = self.internal.pid if not bg: self.wait() def command_line_image(self): """Get shell command line image of the spawned command(s). :rtype: str This just a convenient wrapper around the function of the same name. """ return command_line_image(self.cmds) def close_files(self): """Close all file descriptors.""" self.output_file.close() self.error_file.close() self.input_file.close() def __error(self, error, cmds): """Set pid to -1 and status to 127 before closing files.""" self.close_files() logger.error(error) def not_found(path): """Raise OSError. :param path: path of the executable :type path: str """ logger.error("%s not found", path) e3.log.debug('PATH=%s', os.environ['PATH']) raise OSError(errno.ENOENT, 'No such file or directory, %s not found' % path) # Try to send an helpful message if one of the executable has not # been found. if isinstance(cmds[0], basestring): if which(cmds[0], default=None) is None: not_found(cmds[0]) else: for cmd in cmds: if which(cmd[0], default=None) is None: not_found(cmd[0]) def wait(self): """Wait until process ends and return its status. :return: exit code of the process :rtype: int """ if self.status is not None: # Wait has already been called return self.status # If there is no pipe in the loop then just do a wait. Otherwise # in order to avoid blocked processes due to full pipes, use # communicate. if self.output_file.fd != subprocess.PIPE and \ self.error_file.fd != subprocess.PIPE and \ self.input_file.fd != subprocess.PIPE: self.status = self.internal.wait() else: tmp_input = None if self.input_file.fd == subprocess.PIPE: tmp_input = self.input_file.get_command() (self.out, self.err) = self.internal.communicate(tmp_input) self.status = self.internal.returncode self.close_files() return self.status def poll(self): """Check the process status and set self.status if available. This method checks whether the underlying process has exited or not. If it hasn't, then it just returns None immediately. Otherwise, it stores the process' exit code in self.status and then returns it. :return: None if the process is still alive; otherwise, returns the process exit status. :rtype: int | None """ if self.status is not None: # Process is already terminated and wait been called return self.status result = self.internal.poll() if result is not None: # Process is finished, call wait to finalize it (closing handles, # ...) return self.wait() else: return None def kill(self): """Kill the process.""" self.internal.kill() def interrupt(self): """Send SIGINT CTRL_C_EVENT to the process.""" # On windows CTRL_C_EVENT is available and SIGINT is not; # and the other way around on other platforms. interrupt_signal = getattr(signal, 'CTRL_C_EVENT', signal.SIGINT) self.internal.send_signal(interrupt_signal) def is_running(self): """Check whether the process is running. :rtype: bool """ if psutil is None: # psutil not imported, use our is_running function return is_running(self.pid) else: return self.internal.is_running() def children(self): """Return list of child processes (using psutil). :rtype: list[psutil.Process] """ if psutil is None: raise NotImplementedError('Run.children() require psutil') return self.internal.children()
def run_subprocess(command, shell=False, doexec=True, monitor=False, tile_id=None): """Subprocess runner If subrocess returns non-zero exit code, STDERR is sent to the logger. Parameters ---------- command : list of str Command to pass to subprocess.run(). Eg ['wget', '-q', '-r', dl_url] shell : bool Passed to subprocess.run() doexec : bool Execute the subprocess or just print out the concatenated command Returns ------- nothing nothing """ if doexec: cmd = " ".join(command) if shell: command = cmd logger.debug(command) popen = Popen(command, shell=shell, stderr=PIPE, stdout=PIPE) pid = popen.pid if monitor: proc = Process(pid) with proc.oneshot(): try: logger_perf.debug( "%s;%s;%s" % (tile_id, virtual_memory().used, swap_memory().used)) except NoSuchProcess or ZombieProcess: logger.debug("%s is Zombie or NoSuchProcess" % tile_id) except AccessDenied as e: logger_perf.exception(e) # if monitor: # running = True # proc = Process(pid) # with proc.oneshot(): # while running: # try: # logger_perf.debug("%s - %s - %s - %s - %s" % ( # tile_id, proc.cpu_percent(), proc.cpu_times(), proc.memory_full_info(), swap_memory())) # except NoSuchProcess or ZombieProcess: # logger.debug("%s is Zombie or NoSuchProcess" % tile_id) # break # except AccessDenied as e: # logger_perf.exception(e) # break # running = proc.is_running() # logger.debug("%s is running: %s" % (tile_id, running)) # sleep(1) stdout, stderr = popen.communicate() err = stderr.decode(locale.getpreferredencoding(do_setlocale=True)) popen.wait() if popen.returncode != 0: logger.debug("Process returned with non-zero exit code: %s", popen.returncode) logger.error(err) return False else: return True else: logger.debug("Not executing %s", command) return True
class McStasImage(ImageChannelMixin, PassiveChannel): """Image channel based on McStas simulation.""" _mythread = None _process = None parameters = { 'size': Param( 'Detector size in pixels (x, y)', settable=False, type=tupleof(intrange(1, 8192), intrange(1, 8192)), default=(1, 1), ), 'mcstasprog': Param('Name of the McStas simulation executable', type=str, settable=False), 'mcstasdir': Param('Directory where McStas stores results', type=str, default='singlecount', settable=False), 'mcstasfile': Param('Name of the McStas data file', type=str, settable=False), 'mcsiminfo': Param('Name for the McStas Siminfo file', settable=False, type=str, default='mccode.sim'), 'ci': Param('Constant ci applied to simulated intensity I', settable=False, type=floatrange(0.), default=1e3) } def doInit(self, mode): self.arraydesc = ArrayDesc(self.name, self.size, '<u4') self._workdir = os.getcwd() def doReadArray(self, quality): self.log.debug('quality: %s', quality) if quality == LIVE: self._send_signal(SIGUSR2) elif quality == FINAL: if self._mythread and self._mythread.is_alive(): self._mythread.join(1.) if self._mythread.is_alive(): self.log.exception("Couldn't join readout thread.") else: self._mythread = None self._readpsd(quality == LIVE) return self._buf def _prepare_params(self): """Return a list of key=value strings. Each entry defines a parameter setting for the mcstas simulation call. examples: param=10 """ raise NotImplementedError('Please implement _prepare_params method') def doPrepare(self): self._mcstas_params = ' '.join(self._prepare_params()) self.log.debug('McStas parameters: %s', self._mcstas_params) self._buf = np.zeros(self.size[::-1]) self.readresult = [0] def valueInfo(self): return (Value(self.name + '.sum', unit='cts', type='counter', errors='sqrt', fmtstr='%d'), ) def doStart(self): self._mythread = createThread('detector %s' % self, self._run) def doStatus(self, maxage=0): if self._mythread and self._mythread.is_alive(): return status.BUSY, 'busy' return status.OK, 'idle' def doFinish(self): self.log.debug('finish') self._send_signal(SIGTERM) def _send_signal(self, sig): if self._process and self._process.is_running(): self._process.send_signal(sig) # wait for mcstas releasing fds datafile = path.join(self._workdir, self.mcstasdir, self.mcstasfile) siminfo = path.join(self._workdir, self.mcstasdir, self.mcsiminfo) try: while self._process and self._process.is_running(): fnames = [f.path for f in self._process.open_files()] if siminfo not in fnames and datafile not in fnames: break session.delay(.01) except (AccessDenied, NoSuchProcess): self.log.debug( 'McStas process already terminated in _send_signal(%r)', sig) self.log.debug('McStas process has written file on signal (%r)', sig) def _run(self): """Run McStas simulation executable. The current settings of the instrument parameters will be transferred to it. """ try: shutil.rmtree(self.mcstasdir) except (IOError, OSError): self.log.info('could not remove old data') command = '%s -n 1e8 -d %s %s' % (self.mcstasprog, self.mcstasdir, self._mcstas_params) self.log.debug('run %s', command) try: self._process = Popen(command.split(), stdout=PIPE, stderr=PIPE, cwd=self._workdir) out, err = self._process.communicate() if out: self.log.debug('McStas output:') for line in out.splitlines(): self.log.debug('[McStas] %s', line) if err: self.log.warning('McStas found some problems:') for line in err.splitlines(): self.log.warning('[McStas] %s', line) except OSError as e: self.log.error('Execution failed: %s', e) self._process.wait() self._process = None def _readpsd(self, ignore_error=False): try: with open( path.join(self._workdir, self.mcstasdir, self.mcstasfile), 'r') as f: lines = f.readlines()[-3 * (self.size[0] + 1):] if lines[0].startswith('# Data') and self.mcstasfile in lines[0]: self._buf = ( np.loadtxt(lines[1:self.size[0] + 1], dtype=np.float32) * self.ci).astype(np.uint32) self.readresult = [self._buf.sum()] elif not ignore_error: raise IOError('Did not find start line: %s' % lines[0]) except IOError: if not ignore_error: self.log.exception('Could not read result file')
class Run: """Class to handle processes. :ivar cmds: The ``cmds`` argument passed to the __init__ method (a command line passed in a list, or a list of command lines passed as a list of list). :ivar status: The exit status. As the exit status is only meaningful after the process has exited, its initial value is None. When a problem running the command is detected and a process does not get created, its value gets set to the special value 127. :ivar raw_out: process standard output as bytes (if instanciated with output = PIPE). Use self.out to get a decoded string. :ivar raw_err: same as raw_out but for standard error. :ivar pid: PID. Set to -1 if the command failed to run. """ def __init__( self, cmds: AnyCmdLine, cwd: Optional[str] = None, output: STDOUT_VALUE | DEVNULL_VALUE | PIPE_VALUE | str | IO | None = PIPE, error: STDOUT_VALUE | DEVNULL_VALUE | PIPE_VALUE | str | IO | None = STDOUT, input: DEVNULL_VALUE | PIPE_VALUE | str | IO | None = None, # noqa: A002 bg: bool = False, timeout: Optional[int] = None, env: Optional[dict] = None, set_sigpipe: bool = True, parse_shebang: bool = False, ignore_environ: bool = True, ) -> None: """Spawn a process. :param cmds: two possibilities: 1) a command line: a tool name and its arguments, passed in a list. e.g. ['ls', '-a', '.'] 2) a list of command lines (as defined in (1)): the different commands will be piped. This means that [['ps', '-a'], ['grep', 'vxsim']] will be equivalent to the system command line 'ps -a | grep vxsim'. :param cwd: directory in which the process should be executed (string or None). If None then current directory is used :param output: can be PIPE (default), a filename string, a fd on an already opened file, a python file object or None (for stdout). :param error: same as output or STDOUT, which indicates that the stderr data from the applications should be captured into the same file handle as for stdout. :param input: same as output :param bg: if True then run in background :param timeout: limit execution time (in seconds), None means unlimited :param env: dictionary for environment variables (e.g. os.environ) :param set_sigpipe: reset SIGPIPE handler to default value :param parse_shebang: take the #! interpreter line into account :param ignore_environ: Applies only when env parameter is not None. When set to True (the default), the only environment variables passed to the program are the ones provided by the env parameter. Otherwise, the environment passed to the program consists of the environment variables currently defined (os.environ) augmented by the ones provided in env. :raise OSError: when trying to execute a non-existent file. If you specify a filename for output or stderr then file content is reseted (equiv. to > in shell). If you prepend the filename with '+' then the file will be opened in append mode (equiv. to >> in shell) If you prepend the input with '|', then the content of input string will be used for process stdin. """ def add_interpreter_command(cmd_line: CmdLine) -> CmdLine: """Add the interpreter defined in the #! line to cmd_line. If the #! line cannot be parsed, just return the cmd_line unchanged On windows, /usr/bin/env will be ignored to avoid a dependency on cygwin and /bin/bash & /bin/sh are replaced by $SHELL if defined. :param cmd_line: command line """ if not parse_shebang: # nothing to do return cmd_line prog = which(cmd_line[0], default=None) if prog is None: # Not found. Do not modify the command line return cmd_line with open(prog) as f: try: header = f.read()[0:2] except UnicodeDecodeError: # unknown header - cannot decode the first two bytes return cmd_line if header != "#!": # Unknown header return cmd_line # Header found, get the interpreter command in the first line f.seek(0) line = f.readline() interpreter_cmds = [ word.strip() for word in line[line.find("!") + 1:].split() ] # Pass the program path to the interpreter if len(cmd_line) > 1: cmd_line = [prog] + list(cmd_line[1:]) else: cmd_line = [prog] if sys.platform == "win32": # unix: no cover if interpreter_cmds[0] == "/usr/bin/env": # On windows be sure that PATH is taken into account by # using which. In some cases involving python # interpreter, the python interpreter used to run this # module has been used rather than the first one on the # path. interpreter_cmds[1] = which( interpreter_cmds[1], default=interpreter_cmds[1]) return interpreter_cmds[1:] + cmd_line elif (interpreter_cmds[0] in ("/bin/bash", "/bin/sh") and "SHELL" in os.environ): return [os.environ["SHELL"]] + cmd_line return interpreter_cmds + cmd_line # First resolve output, error and input self.input_file = File(input, "r") self.output_file = File(output, "w") self.error_file = File(error, "w") self.status: Optional[int] = None self.raw_out = b"" self.raw_err = b"" self.cmds = [] if env is not None: if ignore_environ: if sys.platform == "win32": # On Windows not all environment variables can be # discarded. At least SYSTEMDRIVE, SYSTEMROOT should be # set. In order to be portable propagate their value in # case the user does not pass them in env when # ignore_environ is set to True. tmp = {} for var in ("SYSTEMDRIVE", "SYSTEMROOT"): if var not in env and var in os.environ: tmp[var] = os.environ[var] tmp.update(env) env = tmp else: # ignore_environ is False, so get a copy of the current # environment and update it with the env dictionary. tmp = os.environ.copy() tmp.update(env) env = tmp rlimit_args = [] if timeout is not None: rlimit = get_rlimit() if os.path.exists(rlimit): rlimit_args = [rlimit, "%d" % timeout] else: logger.warning("cannot find rlimit at %s", rlimit) rlimit_args = [] try: self.cmds = [ add_interpreter_command(c) for c in to_cmd_lines(cmds) ] self.cmds[0] = rlimit_args + list(self.cmds[0]) cmdlogger.debug( "Run: cd %s; %s", cwd if cwd is not None else os.getcwd(), self.command_line_image(), ) if len(self.cmds) == 1: popen_args = { "stdin": self.input_file.fd, "stdout": self.output_file.fd, "stderr": self.error_file.fd, "cwd": cwd, "env": env, "universal_newlines": False, } if sys.platform != "win32" and set_sigpipe: # windows: no cover # preexec_fn is no supported on windows popen_args["preexec_fn"] = subprocess_setup # type: ignore if sys.platform == "win32": popen_args[ "creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP self.internal = Popen(self.cmds[0], **popen_args) else: runs: list[subprocess.Popen] = [] for index, cmd in enumerate(self.cmds): if index == 0: stdin: int | IO[Any] = self.input_file.fd else: previous_stdout = runs[index - 1].stdout assert previous_stdout is not None stdin = previous_stdout # When connecting two processes using a Pipe don't use # universal_newlines mode. Indeed commands transmitting # binary data between them will crash # (e.g. gzip -dc foo.txt | tar -xf -) if index == len(self.cmds) - 1: stdout = self.output_file.fd else: stdout = subprocess.PIPE popen_args = { "stdin": stdin, "stdout": stdout, "stderr": self.error_file.fd, "cwd": cwd, "env": env, "universal_newlines": False, } if sys.platform != "win32" and set_sigpipe: # windows: no cover # preexec_fn is no supported on windows popen_args[ "preexec_fn"] = subprocess_setup # type: ignore if sys.platform == "win32": popen_args[ "creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP try: runs.append(Popen(cmd, **popen_args)) except OSError: logger.error("error when spawning %s", cmd) # We have an error (e.g. file not found), try to kill # all processes already started. for p in runs: p.terminate() raise self.internal = runs[-1] except Exception as e: # defensive code self.__error(e, self.cmds) raise self.pid = self.internal.pid if not bg: self.wait() @property def out(self) -> str: """Process output as string. Attempt is done to decode as utf-8 the output. If the output is not in utf-8 a string representation will be returned (see e3.text.bytes_as_str). """ return bytes_as_str(self.raw_out) @property def err(self) -> str: """Process error as string. Attempt is done to decode as utf-8 the output. If the output is not in utf-8 a string representation will be returned (see e3.text.bytes_as_str). """ return bytes_as_str(self.raw_err) def command_line_image(self) -> str: """Get shell command line image of the spawned command(s). This just a convenient wrapper around the function of the same name. """ return command_line_image(self.cmds) def close_files(self) -> None: """Close all file descriptors.""" self.output_file.close() self.error_file.close() self.input_file.close() def __error(self, error: Exception, cmds: list[CmdLine]) -> None: """Set pid to -1 and status to 127 before closing files.""" self.close_files() logger.error(error) def not_found(path: str) -> NoReturn: """Raise OSError. :param path: path of the executable """ logger.error("%s not found", path) e3.log.debug("PATH=%s", os.environ["PATH"]) raise OSError(errno.ENOENT, f"No such file or directory, {path} not found") # Try to send an helpful message if one of the executable has not # been found. for cmd in cmds: if which(cmd[0], default=None) is None: not_found(cmd[0]) def wait(self) -> int: """Wait until process ends and return its status. :return: exit code of the process """ if self.status is not None: # Wait has already been called return self.status # If there is no pipe in the loop then just do a wait. Otherwise # in order to avoid blocked processes due to full pipes, use # communicate. if (self.output_file.fd != subprocess.PIPE and self.error_file.fd != subprocess.PIPE and self.input_file.fd != subprocess.PIPE): self.status = self.internal.wait() else: tmp_input: Optional[str | bytes] = None if self.input_file.fd == subprocess.PIPE: tmp_input = self.input_file.get_command() if isinstance(tmp_input, str): tmp_input = tmp_input.encode("utf-8") (self.raw_out, self.raw_err) = self.internal.communicate(tmp_input) self.status = self.internal.returncode self.close_files() return self.status def poll(self) -> Optional[int]: """Check the process status and set self.status if available. This method checks whether the underlying process has exited or not. If it hasn't, then it just returns None immediately. Otherwise, it stores the process' exit code in self.status and then returns it. :return: None if the process is still alive; otherwise, returns the process exit status. """ if self.status is not None: # Process is already terminated and wait been called return self.status result = self.internal.poll() if result is not None: # Process is finished, call wait to finalize it (closing handles, # ...) return self.wait() else: return None def kill(self, recursive: bool = True, timeout: int = 3) -> None: """Kill the process. :param recursive: if True, try to kill the complete process tree :param timeout: wait timeout (in seconds) after sending the kill signal (when recursive=True) """ if recursive: kill_process_tree(self.internal, timeout=timeout) else: self.internal.kill() def interrupt(self) -> None: """Send SIGINT to the process, kill on Windows.""" if sys.platform == "win32": self.kill() # Ctrl-C event is unreliable on Windows else: self.internal.send_signal(signal.SIGINT) def is_running(self) -> bool: """Check whether the process is running.""" if psutil is None: # defensive code # psutil not imported, use our is_running function return is_running(self.pid) else: return self.internal.is_running() def children(self) -> list[Any]: """Return list of child processes (using psutil).""" if psutil is None: # defensive code raise NotImplementedError("Run.children() require psutil") return self.internal.children()
class FastDaemon(_PeriodicTaskDaemon): """ Danube Cloud internal fast daemon - runs two threads for monitoring VM status changes. """ label = 'FastDaemon' node_uuid = None vm_status_queue = None vm_status_watcher = None vm_status_dispatcher_thread = None vm_status_monitor_thread = None SYSEVENT = ('sysevent', '-j', '-c', 'com.sun:zones:status', 'status') VM_STATUS = frozendict({'running': 'running', 'uninitialized': 'stopped'}) def __init__(self, parent, **kwargs): hostname = parent.hostname self._conf = parent.app.conf self.enabled = self._conf.ERIGONES_FAST_DAEMON_ENABLED and hostname.startswith( Q_FAST + '@') super(FastDaemon, self).__init__(parent, **kwargs) if self.enabled: self._periodic_tasks.append(self._vm_status_thread_check) def _vm_status_dispatcher(self): """THREAD: Reads VM status changes from queue and creates a vm_status_event_cb task for every status change""" from que.utils import task_id_from_string, send_task_forever # Circular imports vm_status_task = self._conf.ERIGONES_VM_STATUS_TASK task_user = self._conf.ERIGONES_TASK_USER queue = self.vm_status_queue logger.info('Emitting VM status changes on node %s via %s', self.node_uuid, vm_status_task) while True: event = queue.get() task_id = task_id_from_string(task_user) logger.info('Creating task %s for event: "%s"', task_id, event) # Create VM status task send_task_forever(self.label, vm_status_task, args=(event, task_id), queue=Q_MGMT, expires=None, task_id=task_id) def _vm_status_monitor(self, sysevent_stdout): """THREAD: Reads line by line from sysevent process and puts relevant VM status changes into queue""" vm_status = self.VM_STATUS node_uuid = self.node_uuid queue = self.vm_status_queue logger.info('Monitoring VM status changes on node %s', node_uuid) for line in iter(sysevent_stdout.readline, ''): line = line.strip() try: event = json.loads(line)['data'] except Exception as e: logger.critical('Could not parse (%s), sysevent line: "%s"', e, line) continue try: state = vm_status[event.get('newstate')] except KeyError: logger.debug('Ignoring event "%s"', event) continue event['node_uuid'] = node_uuid event['state'] = state logger.info('Got new event: "%s"', event) queue.put(event) def _vm_status_thread_check(self): """Check if both vm_status threads are alive. Run periodically.""" if not self._stopping: if self.vm_status_monitor_thread and not self.vm_status_monitor_thread.is_alive( ): err = 'VM status monitoring thread is not running - terminating %s!' % self.label logger.critical(err) raise SystemExit(err) if self.vm_status_dispatcher_thread and not self.vm_status_dispatcher_thread.is_alive( ): err = 'VM status dispatcher thread is not running - terminating %s!' % self.label logger.critical(err) raise SystemExit(err) def _set_node_uuid(self): """Fetch compute node's UUID""" from que.utils import fetch_node_uuid # Circular imports from que.exceptions import NodeError try: self.node_uuid = fetch_node_uuid() except NodeError as exc: err = str(exc) logger.critical(err) raise SystemExit(err) def start(self, parent): self._set_node_uuid() super(FastDaemon, self).start(parent) self.vm_status_queue = Queue() self.vm_status_watcher = Popen(self.SYSEVENT, bufsize=0, close_fds=True, stdout=PIPE, stderr=STDOUT, preexec_fn=os.setsid) self.vm_status_monitor_thread = Thread( target=self._vm_status_monitor, name='VMStatusMonitor', args=(self.vm_status_watcher.stdout, )) self.vm_status_monitor_thread.daemon = True self.vm_status_monitor_thread.start() self.vm_status_dispatcher_thread = Thread( target=self._vm_status_dispatcher, name='VMStatusDispatcher') self.vm_status_dispatcher_thread.daemon = True self.vm_status_dispatcher_thread.start() def stop(self, parent): super(FastDaemon, self).stop(parent) if self.vm_status_watcher: try: self.vm_status_watcher.terminate() except NoSuchProcess: pass else: self.vm_status_watcher.wait()
def save_file(self, cover, directory, getcover, cloud, cover_size): if (self.mode == 'reader'): # need.cover means that directory system/thumbnails has been found on the Kindle (this is Kindle PW) if self.kindle.need_cover: if cover != '': try: ready_cover = Image.open(cover) # ready_cover.thumbnail((217, 330), Image.ANTIALIAS) if self.cover_size == 'pw': ready_cover = ready_cover.resize((217, 330), Image.ANTIALIAS) else: if self.cover_size == 'kv': ready_cover = ready_cover.resize( (303, 455), Image.ANTIALIAS) else: ready_cover = ready_cover.resize( (333, 500), Image.ANTIALIAS) ready_cover = ready_cover.convert('L') self.txt2img(self.title, self.seqnumber, ready_cover, self.position) except: raise OSError('Failed to load custom cover!') else: if getcover == 'search': # search for cover try: ready_cover = self.get_cover_image() except: if (self.write_thumb): # raise OSError('Failed to extract cover!') try: if sys.frozen or sys.importers: butler_dir = os.path.dirname( sys.executable) except AttributeError: butler_dir = os.path.dirname( os.path.abspath( inspect.getfile( inspect.currentframe()))) path_to_cover = butler_dir + "\\default_banner.jpeg" ready_cover = Image.open(path_to_cover) if self.cover_size == 'pw': ready_cover = ready_cover.resize( (217, 330), Image.ANTIALIAS) else: if self.cover_size == 'kv': ready_cover = ready_cover.resize( (303, 455), Image.ANTIALIAS) else: ready_cover = ready_cover.resize( (333, 500), Image.ANTIALIAS) ready_cover = ready_cover.convert('L') self.txt2defaultcover(self.title, self.author, self.seqnumber, ready_cover, self.position) else: # extract cover try: # ready_cover = self.get_cover_image() extractcover_34.extractThumbnail( self.path, "tmpdir.$$$") shutil.rmtree("tmpdir.$$$") coverfile = 'images.$$$' + '\\' + self.infilename + '.cover' + '.jpeg' ready_cover = Image.open(coverfile) ready_cover = ready_cover.resize((217, 330), Image.ANTIALIAS) ready_cover = ready_cover.convert('L') self.txt2img(self.title, self.seqnumber, ready_cover, self.position) shutil.rmtree("images.$$$") except: if (self.write_thumb): # shutil.rmtree("images.$$$") # raise OSError('Failed to extract cover!') try: if sys.frozen or sys.importers: butler_dir = os.path.dirname( sys.executable) except AttributeError: butler_dir = os.path.dirname( os.path.abspath( inspect.getfile( inspect.currentframe()))) path_to_cover = butler_dir + "\\default_banner.jpeg" ready_cover = Image.open(path_to_cover) if self.cover_size == 'pw': ready_cover = ready_cover.resize( (217, 330), Image.ANTIALIAS) else: if self.cover_size == 'kv': ready_cover = ready_cover.resize( (303, 455), Image.ANTIALIAS) else: ready_cover = ready_cover.resize( (333, 500), Image.ANTIALIAS) ready_cover = ready_cover.convert('L') self.txt2defaultcover(self.title, self.author, self.seqnumber, ready_cover, self.position) if self.kindle.ssh: tmp_cover = os.path.join(gettempdir(), 'KindleButlerCover') ready_cover.save(tmp_cover, 'JPEG') ssh = Popen('"' + self.config['SSH']['PSCPPath'] + '" "' + tmp_cover + '" root@' + self.kindle.path + ':/mnt/us/system/thumbnails/thumbnail_' + self.asin + '_EBOK_portrait.jpg', stdout=PIPE, stderr=STDOUT, shell=True) ssh_check = ssh.wait() if ssh_check != 0: raise OSError('Failed to upload cover!') os.remove(tmp_cover) else: if (self.write_thumb): if (cloud == 'no'): ready_cover.save( os.path.join( self.kindle.path, 'system', 'thumbnails', 'thumbnail_' + self.asin + '_EBOK_portrait.jpg'), 'JPEG') else: # get ASIN from file section = KindleUnpack.Sectionizer(self.path) mhlst = [KindleUnpack.MobiHeader(section, 0)] mh = mhlst[0] metadata = mh.getmetadata() assa = metadata.get('ASIN') assassin = assa[0].decode("utf-8") if assassin == None: assassin = 'None' ready_cover.save( os.path.join( self.kindle.path, 'system', 'thumbnails', 'thumbnail_' + assassin + '_PDOC_portrait.jpg'), 'JPEG') # for all modes prepare processed file try: # noinspection PyArgumentList ready_file = DualMetaFix.DualMobiMetaFix(self.path, bytes(self.asin, 'UTF-8'), cloud) except: raise OSError('E-Book modification failed!') ready_file, source_size = ready_file.getresult() # save processed file to reader if (self.mode == 'reader'): if source_size < self.kindle.get_free_space(): if self.kindle.ssh: tmp_book = os.path.join(gettempdir(), os.path.basename(self.path)) open(tmp_book, 'wb').write(ready_file.getvalue()) ssh = Popen('"' + self.config['SSH']['PSCPPath'] + '" "' + tmp_book + '" root@' + self.kindle.path + ':/mnt/us/documents/', stdout=PIPE, stderr=STDOUT, shell=True) for line in ssh.stdout: for inside_line in line.split(b'\r'): if b'|' in inside_line: inside_line = inside_line.decode( 'utf-8').split(' | ')[-1].rstrip()[:-1] ssh_check = ssh.wait() os.remove(tmp_book) if ssh_check != 0: raise OSError('Failed to upload E-Book!') Popen( '"' + self.config['SSH']['PLinkPath'] + '" root@' + self.kindle.path + ' "dbus-send --system /default com.lab126.powerd.resuming int32:1"', stdout=PIPE, stderr=STDOUT, shell=True) else: if cloud == 'no': saved = 0 if directory == None: target = open( os.path.join(self.kindle.path, 'documents', os.path.basename(self.path)), 'wb') else: new_dir = self.kindle.path + 'documents' + '\\' + directory self.make_sure_path_exists(new_dir) target = open( new_dir + '\\' + os.path.basename(self.path), 'wb') # target = open(os.path.join(self.kindle.path, 'documents' + '\\directory', os.path.basename(self.path)), 'wb') while True: chunk = ready_file.read(32768) if not chunk: break target.write(chunk) saved += len(chunk) else: raise OSError('Not enough space on target device!') # save cover and processed book to pc if (self.mode == 'pc'): # prepare and save cover # if key -a asin then self.write_thumb=False and no need to save cover if cover != '': # means that cover was imported from external file try: ready_cover = Image.open(cover) if self.cover_size == 'pw': ready_cover = ready_cover.resize((217, 330), Image.ANTIALIAS) else: if self.cover_size == 'kv': ready_cover = ready_cover.resize((303, 455), Image.ANTIALIAS) else: ready_cover = ready_cover.resize((333, 500), Image.ANTIALIAS) ready_cover = ready_cover.convert('L') self.txt2img(self.title, self.seqnumber, ready_cover, self.position) except: raise OSError('Failed to load custom cover!') else: # search/extract cover from inside the book if (getcover == 'search'): # search for cover try: ready_cover = self.get_cover_image() except: if (self.write_thumb): # raise OSError('Failed to extract cover!') self.no_cover = True try: if sys.frozen or sys.importers: butler_dir = os.path.dirname( sys.executable) except AttributeError: butler_dir = os.path.dirname( os.path.abspath( inspect.getfile( inspect.currentframe()))) path_to_cover = butler_dir + "\\default_banner.jpeg" ready_cover = Image.open(path_to_cover) if self.cover_size == 'pw': ready_cover = ready_cover.resize( (217, 330), Image.ANTIALIAS) else: if self.cover_size == 'kv': ready_cover = ready_cover.resize( (303, 455), Image.ANTIALIAS) else: ready_cover = ready_cover.resize( (333, 500), Image.ANTIALIAS) ready_cover = ready_cover.convert('L') self.txt2defaultcover(self.title, self.author, self.seqnumber, ready_cover, self.position) else: # extract the cover from the book try: # ready_cover = self.get_cover_image() extractcover_34.extractThumbnail( self.path, "tmpdir.$$$") shutil.rmtree("tmpdir.$$$") coverfile = 'images.$$$' + '\\' + self.infilename + '.cover' + '.jpeg' ready_cover = Image.open(coverfile) if self.cover_size == 'pw': ready_cover = ready_cover.resize((217, 330), Image.ANTIALIAS) else: if self.cover_size == 'kv': ready_cover = ready_cover.resize( (303, 455), Image.ANTIALIAS) else: ready_cover = ready_cover.resize( (333, 500), Image.ANTIALIAS) ready_cover = ready_cover.convert('L') self.txt2img(self.title, self.seqnumber, ready_cover, self.position) except: if (self.write_thumb): # shutil.rmtree("images.$$$") # raise OSError('Failed to extract cover!') self.no_cover = True try: if sys.frozen or sys.importers: butler_dir = os.path.dirname( sys.executable) except AttributeError: butler_dir = os.path.dirname( os.path.abspath( inspect.getfile( inspect.currentframe()))) path_to_cover = butler_dir + "\\default_banner.jpeg" ready_cover = Image.open(path_to_cover) if self.cover_size == 'pw': ready_cover = ready_cover.resize( (217, 330), Image.ANTIALIAS) else: if self.cover_size == 'kv': ready_cover = ready_cover.resize( (303, 455), Image.ANTIALIAS) else: ready_cover = ready_cover.resize( (333, 500), Image.ANTIALIAS) ready_cover = ready_cover.convert('L') self.txt2defaultcover(self.title, self.author, self.seqnumber, ready_cover, self.position) if (self.write_thumb): if cloud == 'no': ready_cover.save( 'thumbnail_' + self.asin + '_EBOK_portrait.jpg', 'JPEG') else: # get ASIN from file section = KindleUnpack.Sectionizer(self.path) mhlst = [KindleUnpack.MobiHeader(section, 0)] mh = mhlst[0] metadata = mh.getmetadata() assa = metadata.get('ASIN') assassin = assa[0].decode("utf-8") if assassin == None: assassin = 'None' ready_cover.save( 'thumbnail_' + assassin + '_EBOK_portrait.jpg', 'JPEG') if getcover != 'search' and cover == '' and self.no_cover == False: shutil.rmtree("images.$$$") if cloud == 'no': # save processed file saved = 0 # ready_file.seek(0) target = open( self.infilename + '.processed' + self.infileext, 'wb') while True: chunk = ready_file.read(32768) if not chunk: break target.write(chunk) saved += len(chunk)