def makeMOBIWorker(item): item = item[0] kindlegenErrorCode = 0 kindlegenError = '' try: if os.path.getsize(item) < 629145600: output = Popen('kindlegen -dont_append_source -locale en "' + item + '"', stdout=PIPE, stderr=STDOUT, stdin=PIPE, shell=True) for line in output.stdout: line = line.decode('utf-8') # ERROR: Generic error if "Error(" in line: kindlegenErrorCode = 1 kindlegenError = line # ERROR: EPUB too big if ":E23026:" in line: kindlegenErrorCode = 23026 if kindlegenErrorCode > 0: break if ":I1036: Mobi file built successfully" in line: output.terminate() else: # ERROR: EPUB too big kindlegenErrorCode = 23026 return [kindlegenErrorCode, kindlegenError, item] except Exception as err: # ERROR: KCC unknown generic error kindlegenErrorCode = 1 kindlegenError = format(err) return [kindlegenErrorCode, kindlegenError, item]
def test_get_info(self): worker = Popen(["python -c 'import time;time.sleep(5)'"], shell=True) try: info = get_info(worker) finally: worker.terminate() self.assertTrue(isinstance(info['pid'], int)) self.assertEqual(info['nice'], 0)
def test_get_info(self): worker = Popen(["python -c 'import time;time.sleep(5)'"], shell=True) try: info = get_info(worker) finally: worker.terminate() self.assertTrue(isinstance(info["pid"], int)) self.assertEqual(info["nice"], 0)
def test_get_info(self): worker = Popen(['top'], shell=True) try: info = get_info(worker) finally: worker.terminate() self.assertTrue(isinstance(info['pid'], int)) self.assertEqual(info['nice'], 0)
class ROSWebVis: def __init__(self, ros_port, ws_port, uid): self.ros_port = ros_port self.ws_port = ws_port self.uid = uid self.vis = Popen( shlex.split('python start_ros.py {} {}'.format(ros_port, ws_port))) def terminate(self): self.vis.terminate()
def test_get_info(self): worker = Popen(["python", "-c", SLEEP % 5]) try: info = get_info(worker) finally: worker.terminate() self.assertTrue(isinstance(info['pid'], int)) if IS_WINDOWS: self.assertEqual(info['nice'], psutil.NORMAL_PRIORITY_CLASS) else: self.assertEqual(info['nice'], 0)
def test_get_info(self): worker = Popen(["python", "-c", SLEEP % 5]) try: info = get_info(worker) finally: worker.terminate() self.assertTrue(isinstance(info['pid'], int)) if IS_WINDOWS: self.assertEqual(info['nice'], psutil.NORMAL_PRIORITY_CLASS) else: self.assertEqual(info['nice'], 0)
class Process(object): """Wraps a process. Options: - **wid**: the process unique identifier. This value will be used to replace the *$WID* string in the command line if present. - **cmd**: the command to run. May contain *$WID*, which will be replaced by **wid**. - **args**: the arguments for the command to run. Can be a list or a string. If **args** is a string, it's splitted using :func:`shlex.split`. Defaults to None. - **executable**: When executable is given, the first item in the args sequence obtained from **cmd** is still treated by most programs as the command name, which can then be different from the actual executable name. It becomes the display name for the executing program in utilities such as **ps**. - **working_dir**: the working directory to run the command in. If not provided, will default to the current working directory. - **shell**: if *True*, will run the command in the shell environment. *False* by default. **warning: this is a security hazard**. - **uid**: if given, is the user id or name the command should run with. The current uid is the default. - **gid**: if given, is the group id or name the command should run with. The current gid is the default. - **env**: a mapping containing the environment variables the command will run with. Optional. - **rlimits**: a mapping containing rlimit names and values that will be set before the command runs. """ def __init__(self, wid, cmd, args=None, working_dir=None, shell=False, uid=None, gid=None, env=None, rlimits=None, executable=None): self.wid = wid if working_dir is None: self.working_dir = get_working_dir() else: self.working_dir = working_dir self.shell = shell self.env = env if rlimits is not None: self.rlimits = rlimits else: self.rlimits = {} self.cmd = cmd.replace('$WID', str(self.wid)) if uid is None: self.uid = None else: self.uid = to_uid(uid) if gid is None: self.gid = None else: self.gid = to_gid(gid) def preexec_fn(): os.setsid() for limit, value in self.rlimits.items(): res = getattr(resource, 'RLIMIT_%s' % limit.upper(), None) if res is None: raise ValueError('unknown rlimit "%s"' % limit) # TODO(petef): support hard/soft limits resource.setrlimit(res, (value, value)) if self.gid: try: os.setgid(self.gid) except OverflowError: if not ctypes: raise # versions of python < 2.6.2 don't manage unsigned int for # groups like on osx or fedora os.setgid(-ctypes.c_int(-self.gid).value) if self.uid: os.setuid(self.uid) logger.debug('cmd: ' + cmd) logger.debug('args: ' + str(args)) if args is not None: if isinstance(args, str): args_ = shlex.split(bytestring(args)) else: args_ = args[:] args_ = shlex.split(bytestring(cmd)) + args_ else: args_ = shlex.split(bytestring(cmd)) logger.debug('Running %r' % ' '.join(args_)) self._worker = Popen(args_, cwd=self.working_dir, shell=self.shell, preexec_fn=preexec_fn, env=self.env, close_fds=True, stdout=PIPE, stderr=PIPE, executable=executable) self.started = time.time() @debuglog def poll(self): return self._worker.poll() @debuglog def send_signal(self, sig): """Sends a signal **sig** to the process.""" return self._worker.send_signal(sig) @debuglog def stop(self): """Terminate the process.""" try: if self._worker.poll() is None: return self._worker.terminate() finally: self._worker.stderr.close() self._worker.stdout.close() def age(self): """Return the age of the process in seconds.""" return time.time() - self.started def info(self): """Return process info. The info returned is a mapping with these keys: - **mem_info1**: Resident Set Size Memory in bytes (RSS) - **mem_info2**: Virtual Memory Size in bytes (VMS). - **cpu**: % of cpu usage. - **mem**: % of memory usage. - **ctime**: process CPU (user + system) time in seconds. - **pid**: process id. - **username**: user name that owns the process. - **nice**: process niceness (between -20 and 20) - **cmdline**: the command line the process was run with. """ try: info = get_info(self._worker) except NoSuchProcess: return "No such process (stopped?)" info["children"] = [] for child in self._worker.get_children(): info["children"].append(get_info(child)) return info def children(self): """Return a list of children pids.""" return [child.pid for child in self._worker.get_children()] def is_child(self, pid): """Return True is the given *pid* is a child of that process.""" pids = [child.pid for child in self._worker.get_children()] if pid in pids: return True return False @debuglog def send_signal_child(self, pid, signum): """Send signal *signum* to child *pid*.""" children = dict([(child.pid, child) \ for child in self._worker.get_children()]) children[pid].send_signal(signum) @debuglog def send_signal_children(self, signum): """Send signal *signum* to all children.""" for child in self._worker.get_children(): try: child.send_signal(signum) except OSError as e: if e.errno != errno.ESRCH: raise @property def status(self): """Return the process status as a constant - RUNNING - DEAD_OR_ZOMBIE - OTHER """ try: if self._worker.status in (STATUS_ZOMBIE, STATUS_DEAD): return DEAD_OR_ZOMBIE except NoSuchProcess: return OTHER if self._worker.is_running(): return RUNNING return OTHER @property def pid(self): """Return the *pid*""" return self._worker.pid @property def stdout(self): """Return the *stdout* stream""" return self._worker.stdout @property def stderr(self): """Return the *stdout* stream""" return self._worker.stderr
def cmd(command, user=None, input=None, cli_input=None, cli_output=False, communicate=True, timeout=None, fail=True, log=None, tries=1, delay_min=5, delay_max=10, **kwargs): """ Calls the `command` and returns a dictionary with process, stdout, stderr, and the returncode. Returned returncode, stdout and stderr will be None if `communicate` is set to False. :param user: If set, this will use ``sudo -u <user> ...`` to execute `command` as `user`. :type user: unicode :param input: If set, sended to stdin (if `communicate` is True). :type input: unicode :param cli_input: If set, sended to stdin (no condition). :type cli_input: unicode :param cli_output: Set to True to output (in real-time) stdout to stdout and stderr to stderr. :type cli_output: bool :param fail: Set to False to avoid the exception `subprocess.CalledProcessError`. :type fail: bool :param log: A function to log/print details about what is executed/any failure, can be a logger. :type log: callable, logging.Logger :param communicate: Set to True to communicate with the process, this is a locking call (if timeout is None). :type communicate: bool :param timeout: Time-out for the communication with the process, in seconds. :type timeout: float :param tries: How many times you want the command to be retried ? :type tries: int :param delay_min: Minimum delay to sleep after every attempt communicate must be True. :type delay: float, int :param delay_max: Maximum delay to sleep after every attempt communicate must be True. :type delay: float, int * Delay will be a random number in range (`delay_min`, `delay_max`) * Set kwargs with any argument of the :mod:`subprocess`.Popen constructor excepting stdin, stdout and stderr. """ # convert log argument to logging functions log_debug = log_warning = log_exception = None if isinstance(log, logging.Logger): log_debug, log_warning, log_exception = log.debug, log.warning, log.exception elif hasattr(log, '__call__'): log_debug = log_warning = log_exception = log # create a list and a string of the arguments if isinstance(command, string_types): if user is not None: command = 'sudo -u {0} {1}'.format(user, command) args_list, args_string = shlex.split(to_bytes(command)), command else: if user is not None: command = ['sudo', '-u', user] + command args_list = [to_bytes(a) for a in command if a is not None] args_string = ' '.join([to_unicode(a) for a in command if a is not None]) # log the execution if log_debug: log_debug('Execute {0}{1}{2}'.format( '' if input is None else 'echo {0}|'.format(repr(input)), args_string, '' if cli_input is None else ' < {0}'.format(repr(cli_input)))) for trial in xrange(tries): # noqa # create the sub-process try: process = Popen( args_list, stdin=subprocess.PIPE, stdout=None if cli_output else subprocess.PIPE, stderr=None if cli_output else subprocess.PIPE, **kwargs) except OSError as e: # unable to execute the program (e.g. does not exist) if log_exception: log_exception(e) if fail: raise return {'process': None, 'stdout': '', 'stderr': e, 'returncode': 2} # write to stdin (answer to questions, ...) if cli_input is not None: process.stdin.write(to_bytes(cli_input)) process.stdin.flush() # interact with the process and wait for the process to terminate if communicate: data = {} # thanks http://stackoverflow.com/questions/1191374/subprocess-with-timeout def communicate_with_timeout(data=None): data['stdout'], data['stderr'] = process.communicate(input=input) thread = threading.Thread(target=communicate_with_timeout, kwargs={'data': data}) thread.start() thread.join(timeout=timeout) if thread.is_alive(): try: process.terminate() thread.join() except OSError as e: # Manage race condition with process that may terminate just after the call to # thread.is_alive() ! if e.errno != errno.ESRCH: raise stdout, stderr = data['stdout'], data['stderr'] else: # get a return code that may be None of course ... process.poll() stdout = stderr = None result = { 'process': process, 'stdout': stdout, 'stderr': stderr, 'returncode': process.returncode } if process.returncode == 0: break # failed attempt, may retry do_retry = trial < tries - 1 delay = random.uniform(delay_min, delay_max) if log_warning: log_warning('Attempt {0} out of {1}: {2}'.format(trial+1, tries, 'Will retry in {0} seconds'.format(delay) if do_retry else 'Failed')) # raise if this is the last try if fail and not do_retry: raise subprocess.CalledProcessError(process.returncode, args_string, stderr) if do_retry: time.sleep(delay) return result
class Process(object): """Wraps a process. Options: - **wid**: the process unique identifier. This value will be used to replace the *$WID* string in the command line if present. - **cmd**: the command to run. May contain *$WID*, which will be replaced by **wid**. - **working_dir**: the working directory to run the command in. If not provided, will default to the current working directory. - **shell**: if *True*, will run the command in the shell environment. *False* by default. **warning: this is a security hazard**. - **uid**: if given, is the user id or name the command should run with. The current uid is the default. - **gid**: if given, is the group id or name the command should run with. The current gid is the default. - **env**: a mapping containing the environment variables the command will run with. Optional. """ def __init__(self, wid, cmd, working_dir=None, shell=False, uid=None, gid=None, env=None): self.wid = wid if working_dir is None: self.working_dir = get_working_dir() else: self.working_dir = working_dir self.shell = shell self.env = env self.cmd = cmd.replace('$WID', str(self.wid)) if uid is None: self.uid = None else: self.uid = to_uid(uid) if gid is None: self.gid = None else: self.gid = to_gid(gid) def preexec_fn(): os.setsid() if self.gid: try: os.setgid(self.gid) except OverflowError: if not ctypes: raise # versions of python < 2.6.2 don't manage unsigned int for # groups like on osx or fedora os.setgid(-ctypes.c_int(-self.gid).value) if self.uid: os.setuid(self.uid) self._worker = Popen(self.cmd.split(), cwd=self.working_dir, shell=self.shell, preexec_fn=preexec_fn, env=self.env, close_fds=True, stdout=PIPE, stderr=PIPE) self.started = time.time() @debuglog def poll(self): return self._worker.poll() @debuglog def send_signal(self, sig): """Sends a signal **sig** to the process.""" return self._worker.send_signal(sig) @debuglog def stop(self): """Terminate the process.""" if self._worker.poll() is None: return self._worker.terminate() def age(self): """Return the age of the process in seconds.""" return time.time() - self.started def info(self): """Return process info. The info returned is a mapping with these keys: - **mem_info1**: Resident Set Size Memory in bytes (RSS) - **mem_info2**: Virtual Memory Size in bytes (VMS). - **cpu**: % of cpu usage. - **mem**: % of memory usage. - **ctime**: process CPU (user + system) time in seconds. - **pid**: process id. - **username**: user name that owns the process. - **nice**: process niceness (between -20 and 20) - **cmdline**: the command line the process was run with. """ try: info = get_info(self._worker) except NoSuchProcess: return "No such process (stopped?)" info["children"] = [] for child in self._worker.get_children(): info["children"].append(get_info(child)) return info def children(self): """Return a list of children pids.""" return [child.pid for child in self._worker.get_children()] def is_child(self, pid): """Return True is the given *pid* is a child of that process.""" pids = [child.pid for child in self._worker.get_children()] if pid in pids: return True return False @debuglog def send_signal_child(self, pid, signum): """Send signal *signum* to child *pid*.""" children = dict([(child.pid, child) \ for child in self._worker.get_children()]) children[pid].send_signal(signum) @debuglog def send_signal_children(self, signum): """Send signal *signum* to all children.""" for child in self._worker.get_children(): try: child.send_signal(signum) except OSError as e: if e.errno != errno.ESRCH: raise @property def status(self): """Return the process status as a constant - RUNNING - DEAD_OR_ZOMBIE - OTHER """ try: if self._worker.status in (STATUS_ZOMBIE, STATUS_DEAD): return DEAD_OR_ZOMBIE except NoSuchProcess: return OTHER if self._worker.is_running(): return RUNNING return OTHER @property def pid(self): """Return the *pid*""" return self._worker.pid @property def stdout(self): """Return the *stdout* stream""" return self._worker.stdout @property def stderr(self): """Return the *stdout* stream""" return self._worker.stderr
class MovingBobcatEnv(gym.Env): STATE_SIZE = 2 ACTION_SIZE = 3 def __init__(self): self.number_actions = rospy.get_param('/moving_cube/n_actions') self.field_division = rospy.get_param("/moving_cube/field_division") self.seed() self.viewer = None self.action_space = spaces.Box(0.0, 0.5, (self.ACTION_SIZE, ), dtype=np.float32) self.observation_space = spaces.Box(-np.inf, +np.inf, shape=(self.STATE_SIZE, ), dtype=np.float32) # get configuration parameters self.init_roll_vel = rospy.get_param('/moving_cube/init_roll_vel') self.get_link_state = rospy.ServiceProxy("/gazebo/get_link_state", GetLinkState) # Actions self.roll_speed_fixed_value = rospy.get_param( '/moving_cube/roll_speed_fixed_value') self.roll_speed_increment_value = rospy.get_param( '/moving_cube/roll_speed_increment_value') self.start_point = Point() self.start_point.x = rospy.get_param("/moving_cube/init_cube_pose/x") self.start_point.y = rospy.get_param("/moving_cube/init_cube_pose/y") self.start_point.z = rospy.get_param("/moving_cube/init_cube_pose/z") self.hyd = 0 self.bucket_vel = 0 self.depth = 0 self.m = (2.1213) / (1.9213 + 0.2) # the slope of the pile self.density = 1922 # density of material in kg/m^3 self.z_collision = 0 self.last_volume = 0 self.volume_sum = 0 self.last_z_pile = 0 self.last_x_step = 0 self.last_z_collision = 0 self.tip_position = Point() self.last_reward = 0 self.pitch = 0 self.counter = 0 self.c = 0 self.flag = 0 self.max_volume = 850 # max bucket operation load for tool # Done self.max_pitch_angle = rospy.get_param('/moving_cube/max_pitch_angle') # Rewards self.move_distance_reward_weight = rospy.get_param( "/moving_cube/move_distance_reward_weight") self.y_linear_speed_reward_weight = rospy.get_param( "/moving_cube/y_linear_speed_reward_weight") self.y_axis_angle_reward_weight = rospy.get_param( "/moving_cube/y_axis_angle_reward_weight") self.end_episode_points = rospy.get_param( "/moving_cube/end_episode_points") # stablishes connection with simulator self.gazebo = GazeboConnection() self.gazebo.unpauseSim() self.check_all_sensors_ready() self.pub_bucket_tip = rospy.Publisher('/bobcat/tip_position', Point, queue_size=10) rospy.Subscriber('/bobcat/arm/hydraulics', Float64, self.get_hyd) rospy.Subscriber('/WPD/Speed', TwistStamped, self.get_vel) rospy.Subscriber('/bobcat/arm/loader', Float64, self.get_bucket_vel) rospy.Subscriber("/Bobby/joint_states", JointState, self.joints_callback) rospy.Subscriber("/Bobby/odom", Odometry, self.odom_callback) rospy.Subscriber('/robot_bumper', ContactsState, self.get_depth) rospy.Subscriber('/Bobby/imu', Imu, self.get_angular_vel) rospy.Subscriber("/gazebo/link_states", LinkStates, self.cb_link_states) self.pub = rospy.Publisher('/WPD/Speed', TwistStamped, queue_size=10) self.pub2 = rospy.Publisher('/bobcat/arm/hydraulics', Float64, queue_size=10) self.pub3 = rospy.Publisher('/bobcat/arm/loader', Float64, queue_size=10) self.pub4 = rospy.Publisher('/volume', Float32, queue_size=10) self.pub5 = rospy.Publisher('/penetration_z', Float32, queue_size=10) self.gazebo.pauseSim() self.command = TwistStamped() self.node_process = Popen( shlex.split('rosrun robil_lihi C_response_loader.py')) self.node_process.terminate() def cb_link_states(self, msg): self.link_states = msg self.loader_index = self.link_states.name.index("Bobby::loader") self.loader_pos = self.link_states.pose[self.loader_index] def get_angular_vel(self, msg): self.angular_vel = msg.angular_velocity.y orientation_q = msg.orientation orientation_list = [ orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w ] (self.roll, self.pitch, self.yaw) = euler_from_quaternion(orientation_list) def plot_x(self): rospy.wait_for_service('/gazebo/get_link_state') loader_pos = self.get_link_state('Bobby::loader', 'world').link_state.pose x = np.linspace(-0.2, 1.5, 100) y = self.m * x + 0.2 plt.plot(x, y, color='goldenrod') if self.counter % 20 == 0: plt.plot(loader_pos.position.x + 0.96 * math.cos(self.pitch), loader_pos.position.z + 0.96 * math.sin(self.pitch), 'bo') plt.title("Bucket position") plt.ylabel("z axis") plt.xlabel("x axis") plt.axis([-1, 1.5, 0, 1.5]) plt.axis('equal') plt.draw() plt.pause(0.00000000001) self.counter += 1 def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def step(self, action): self.gazebo.unpauseSim() now = rospy.get_rostime() action[0] = 0.25 * action[0] + 0.25 action[1] = 0.25 * action[1] + 0.25 action[2] = 0.25 * action[2] + 0.25 self.set_action(action) self.gazebo.pauseSim() obs = self._get_obs() done = self._is_done(obs, now) reward = self.compute_reward(obs, done, now) simplified_obs = self.convert_obs_to_state(obs) self.plot_x() # plot graph of bucket tip position plt.ion() plt.show() return np.array(simplified_obs, dtype=np.float32), reward, done, {} def reset(self): self.gazebo.unpauseSim() self.check_all_sensors_ready() self.z_collision = 0 self.depth = 0 self.command.twist.linear.x = 0 self.pub.publish(self.command) self.pub2.publish(0) self.pub3.publish(0) self.node_process.terminate() self.set_init_pose() obs = self._get_obs() now = rospy.get_rostime() self._is_done(obs, now) self.gazebo.pauseSim() self.gazebo.resetSim() self.node_process = Popen( shlex.split('rosrun robil_lihi C_response_loader.py')) self.gazebo.unpauseSim() self.command.twist.linear.x = 0 self.pub.publish(self.command) self.pub2.publish(0) self.pub3.publish(0) self.set_init_pose() self.init_env_variables() self.check_all_sensors_ready() rospy.sleep(1) self.gazebo.pauseSim() self.init_env_variables() obs = self._get_obs() simplified_obs = self.convert_obs_to_state(obs) plt.clf() return simplified_obs def render(self, mode='human', close=False): pass def init_env_variables(self): """ Inits variables needed to be initialised each time we reset at the start of an episode. :return: """ self.depth = 0.0 self.last_volume = 0.0 self.total_distance_moved = 0.0 self.volume_sum = 0 self.last_z_pile = 0 self.last_x_step = 0 self.last_z_collision = 0 self.z_collision = 0 self.last_reward = 0 self.pitch = 0 self.flag = 0 def _is_done(self, observations, now): if self.max_volume - 5 < self.volume_sum or now.secs > 15 or ( self.depth < 0.001 and self.volume_sum > 5): # term to restart the episode done = True else: done = False return done def set_action(self, action): 'execute the action choosen in the gazebo environment' command = TwistStamped() command.twist.linear.x = action[0] self.pub.publish(command) self.pub2.publish(action[1]) self.pub3.publish(action[2]) def _get_obs(self): """ Here we define what sensor data defines our robots observations To know which Variables we have acces to, we need to read the MyCubeSingleDiskEnv API DOCS :return: """ bucket_observations = [ self.loader_pos.position.x + 0.96 * math.cos(self.pitch), self.loader_pos.position.z + 0.96 * math.sin(self.pitch) ] return bucket_observations def get_orientation_euler(self): # We convert from quaternions to euler orientation_list = [ self.odom.pose.pose.orientation.x, self.odom.pose.pose.orientation.y, self.odom.pose.pose.orientation.z, self.odom.pose.pose.orientation.w ] roll, pitch, yaw = euler_from_quaternion(orientation_list) return roll, pitch, yaw def compute_reward(self, observations, done, now): if not done: reward = -10 self.calc_volume() self.pub4.publish(self.volume_sum) if self.volume_sum > 0 and self.depth <= 0.01 and self.flag == 0: self.flag = 1 self.pub5.publish(self.loader_pos.position.z + 0.96 * math.sin(self.pitch)) if self.volume_sum > self.max_volume: # failed to take the bucket out, too many soil reward -= self.volume_sum rospy.logwarn("############### Fail=>" + str(self.volume_sum)) elif self.volume_sum > 0.01: reward += 0.3 * self.volume_sum if self.volume_sum == 0: reward = reward - 10 * (self.loader_pos.position.z + 0.96 * math.sin(self.pitch)) else: # The episode didn't success so we need to give a big negative reward reward = 0 self.depth = 0 return reward def joints_callback(self, data): self.joints = data def odom_callback(self, data): self.odom = data roll, pitch, yaw = self.get_orientation_euler() def check_all_sensors_ready(self): self.check_joint_states_ready() self.check_odom_ready() rospy.logdebug("ALL SENSORS READY") def get_hyd(self, data): # subscriber for arm vel self.hyd = data.data def get_bucket_vel(self, data): # subscriber for bucket vel self.bucket_vel = data.data def get_vel(self, msg): # subscriber for bobcat velocity self.bobcat_vel = msg.twist.linear.x def check_joint_states_ready(self): self.joints = None while self.joints is None and not rospy.is_shutdown(): try: self.joints = rospy.wait_for_message("/Bobby/joint_states", JointState, timeout=1.0) rospy.logdebug("Current Bobby/joint_states READY=>" + str(self.joints)) except: rospy.logerr( "Current Bobby/joint_states not ready yet, retrying for getting Bobby" ) return self.joints def check_odom_ready(self): self.odom = None while self.odom is None and not rospy.is_shutdown(): try: self.odom = rospy.wait_for_message("/Bobby/odom", Odometry, timeout=1.0) rospy.logdebug("Current /Bobby/odom READY=>" + str(self.odom)) except: rospy.logerr( "Current /Bobby/odom not ready yet, retrying for getting odom" ) return self.odom def set_init_pose(self): """Sets the Robot in its init pose """ now = rospy.get_rostime() while not rospy.is_shutdown(): self.command.twist.linear.x = 0 self.pub.publish(self.command) while self.odom.pose.pose.position.z > 0.13: if now.secs > 45: break self.pub2.publish(-0.5) while self.odom.pose.pose.position.z < 0.03: if now.secs > 45: break self.pub2.publish(0.5) self.pub2.publish(0) roll, pitch, yaw = self.get_orientation_euler() while pitch > 0.0: if now.secs > 45: break self.pub3.publish(0.3) roll, pitch, yaw = self.get_orientation_euler() while pitch < -0.05: if now.secs > 45: break self.pub3.publish(-0.3) roll, pitch, yaw = self.get_orientation_euler() self.pub3.publish(0) break def go_up(self): """get the bucket out of soil """ self.gazebo.unpauseSim() # must in order to get the bucket up self.check_all_sensors_ready() self.command.twist.linear.x = 0 while not rospy.is_shutdown(): self.command.twist.linear.x = 0 self.pub.publish(self.command) self.pub2.publish(0) self.pub3.publish(0) print("pitch:", self.pitch) print("z:", self.loader_pos.position.z) while self.pitch > -0.45: self.pub3.publish(0.3) self.pub3.publish(0) while self.loader_pos.position.z < 1.0: self.pub2.publish(0.2) self.pub2.publish(0) print("total volume:", self.volume_sum) self.plot_x() # plot graph of bucket tip position plt.ion() plt.show() break def convert_obs_to_state(self, observations): """ Converts the observations used for reward and so on to the essentials for the robot state In this case we only need the orientation of the cube and the speed of the disc. The distance doesnt condition at all the actions """ BobcatPos_x = observations[0] BobcatPos_z = observations[1] state_converted = [BobcatPos_x, BobcatPos_z] return state_converted def get_depth(self, data): if (ContactsState.states != []): i = 0 for i in range(len(data.states)): if ('box' in data.states[i].collision2_name) or ( 'box' in data.states[i].collision1_name ): # check that the string exist in collision2_name/1 self.depth = np.mean(data.states[i].depths) self.z_collision = np.mean( data.states[i].contact_positions[0].z) def calc_volume(self): z_pile = self.m * ( self.loader_pos.position.x + 0.96 * math.cos(self.pitch) + 0.2 ) # 0.96 is the distance between center mass of the loader to the end H = z_pile - self.z_collision x = self.loader_pos.position.x + 0.96 * math.cos(self.pitch) z = self.z_collision big_trapezoid = (x - self.last_x_step) * (z_pile + self.last_z_pile) / 2 small_trapezoid = (x - self.last_x_step) * (self.z_collision + self.last_z_collision) / 2 volume = (big_trapezoid - small_trapezoid ) * 1.612 * self.density # 1.66 is the tool width if z_pile > 0 and z > 0 and z_pile > z: self.volume_sum = self.volume_sum + volume self.last_z_pile = z_pile self.last_x_step = x self.last_z_collision = self.z_collision
class Process(object): """Wraps a process. Options: - **wid**: the process unique identifier. This value will be used to replace the *$WID* string in the command line if present. - **cmd**: the command to run. May contain *$WID*, which will be replaced by **wid**. - **args**: the arguments for the command to run. Can be a list or a string. If **args** is a string, it's splitted using :func:`shlex.split`. Defaults to None. - **executable**: When executable is given, the first item in the args sequence obtained from **cmd** is still treated by most programs as the command name, which can then be different from the actual executable name. It becomes the display name for the executing program in utilities such as **ps**. - **working_dir**: the working directory to run the command in. If not provided, will default to the current working directory. - **shell**: if *True*, will run the command in the shell environment. *False* by default. **warning: this is a security hazard**. - **uid**: if given, is the user id or name the command should run with. The current uid is the default. - **gid**: if given, is the group id or name the command should run with. The current gid is the default. - **env**: a mapping containing the environment variables the command will run with. Optional. - **rlimits**: a mapping containing rlimit names and values that will be set before the command runs. """ def __init__(self, wid, cmd, args=None, working_dir=None, shell=False, uid=None, gid=None, env=None, rlimits=None, executable=None): self.wid = wid if working_dir is None: self.working_dir = get_working_dir() else: self.working_dir = working_dir self.shell = shell self.env = env if rlimits is not None: self.rlimits = rlimits else: self.rlimits = {} self.cmd = cmd.replace('$WID', str(self.wid)) if uid is None: self.uid = None else: self.uid = to_uid(uid) if gid is None: self.gid = None else: self.gid = to_gid(gid) def preexec_fn(): os.setsid() for limit, value in self.rlimits.items(): res = getattr(resource, 'RLIMIT_%s' % limit.upper(), None) if res is None: raise ValueError('unknown rlimit "%s"' % limit) # TODO(petef): support hard/soft limits resource.setrlimit(res, (value, value)) if self.gid: try: os.setgid(self.gid) except OverflowError: if not ctypes: raise # versions of python < 2.6.2 don't manage unsigned int for # groups like on osx or fedora os.setgid(-ctypes.c_int(-self.gid).value) if self.uid: os.setuid(self.uid) logger.debug('cmd: ' + bytestring(cmd)) logger.debug('args: ' + str(args)) if args is not None: if isinstance(args, string_types): args_ = shlex.split(bytestring(args)) else: args_ = [bytestring(arg) for arg in args] args_ = shlex.split(bytestring(cmd)) + args_ else: args_ = shlex.split(bytestring(cmd)) logger.debug("process args: %s", args_) logger.debug('Running %r' % ' '.join(args_)) self._worker = Popen(args_, cwd=self.working_dir, shell=self.shell, preexec_fn=preexec_fn, env=self.env, close_fds=True, stdout=PIPE, stderr=PIPE, executable=executable) self.started = time.time() @debuglog def poll(self): return self._worker.poll() @debuglog def send_signal(self, sig): """Sends a signal **sig** to the process.""" return self._worker.send_signal(sig) @debuglog def stop(self): """Terminate the process.""" try: if self._worker.poll() is None: return self._worker.terminate() finally: self._worker.stderr.close() self._worker.stdout.close() def age(self): """Return the age of the process in seconds.""" return time.time() - self.started def info(self): """Return process info. The info returned is a mapping with these keys: - **mem_info1**: Resident Set Size Memory in bytes (RSS) - **mem_info2**: Virtual Memory Size in bytes (VMS). - **cpu**: % of cpu usage. - **mem**: % of memory usage. - **ctime**: process CPU (user + system) time in seconds. - **pid**: process id. - **username**: user name that owns the process. - **nice**: process niceness (between -20 and 20) - **cmdline**: the command line the process was run with. """ try: info = get_info(self._worker) except NoSuchProcess: return "No such process (stopped?)" info["children"] = [] for child in self._worker.get_children(): info["children"].append(get_info(child)) return info def children(self): """Return a list of children pids.""" return [child.pid for child in self._worker.get_children()] def is_child(self, pid): """Return True is the given *pid* is a child of that process.""" pids = [child.pid for child in self._worker.get_children()] if pid in pids: return True return False @debuglog def send_signal_child(self, pid, signum): """Send signal *signum* to child *pid*.""" children = dict([(child.pid, child) \ for child in self._worker.get_children()]) children[pid].send_signal(signum) @debuglog def send_signal_children(self, signum): """Send signal *signum* to all children.""" for child in self._worker.get_children(): try: child.send_signal(signum) except OSError as e: if e.errno != errno.ESRCH: raise @property def status(self): """Return the process status as a constant - RUNNING - DEAD_OR_ZOMBIE - OTHER """ try: if self._worker.status in (STATUS_ZOMBIE, STATUS_DEAD): return DEAD_OR_ZOMBIE except NoSuchProcess: return OTHER if self._worker.is_running(): return RUNNING return OTHER @property def pid(self): """Return the *pid*""" return self._worker.pid @property def stdout(self): """Return the *stdout* stream""" return self._worker.stdout @property def stderr(self): """Return the *stdout* stream""" return self._worker.stderr
class MovingBobcatEnv(gym.Env): def __init__(self): self.number_actions = rospy.get_param('/moving_cube/n_actions') self.field_division = rospy.get_param("/moving_cube/field_division") self.action_space = spaces.Discrete(self.number_actions) self.observation_space = spaces.Discrete((self.field_division+1)*(self.field_division+1)*3*3*4) self._seed() #get configuration parameters self.init_roll_vel = rospy.get_param('/moving_cube/init_roll_vel') # Actions self.roll_speed_fixed_value = rospy.get_param('/moving_cube/roll_speed_fixed_value') self.roll_speed_increment_value = rospy.get_param('/moving_cube/roll_speed_increment_value') self.start_point = Point() self.start_point.x = rospy.get_param("/moving_cube/init_cube_pose/x") self.start_point.y = rospy.get_param("/moving_cube/init_cube_pose/y") self.start_point.z = rospy.get_param("/moving_cube/init_cube_pose/z") self.hyd = 0 self.bucket_vel = 0 self.depth = 0 self.m = (2.1213)/(1.9213 + 0.2) # the slope of the pile self.density = 1922 # density of material in kg/m^3 self.z_collision = 0 self.last_volume = 0 self.volume_sum = 0 self.last_z_pile = 0 self.last_x_step = 0 self.last_z_collision = 0 self.tip_position = Point() self.last_reward = 0 self.pitch = 0 self.flag = 0 self.max_volume = 860 # max bucket operation load for tool # Done self.max_pitch_angle = rospy.get_param('/moving_cube/max_pitch_angle') # Rewards self.move_distance_reward_weight = rospy.get_param("/moving_cube/move_distance_reward_weight") self.y_linear_speed_reward_weight = rospy.get_param("/moving_cube/y_linear_speed_reward_weight") self.y_axis_angle_reward_weight = rospy.get_param("/moving_cube/y_axis_angle_reward_weight") self.end_episode_points = rospy.get_param("/moving_cube/end_episode_points") # stablishes connection with simulator self.gazebo = GazeboConnection() self.gazebo.unpauseSim() self.check_all_sensors_ready() self.pub_bucket_tip = rospy.Publisher('/bobcat/tip_position', Point, queue_size=10) rospy.Subscriber('/bobcat/arm/hydraulics', Float64, self.get_hyd) rospy.Subscriber('/WPD/Speed', TwistStamped, self.get_vel) rospy.Subscriber('/bobcat/arm/loader', Float64, self.get_bucket_vel) rospy.Subscriber("/Bobby/joint_states", JointState, self.joints_callback) rospy.Subscriber("/Bobby/odom", Odometry, self.odom_callback) rospy.Subscriber('/robot_bumper', ContactsState, self.get_depth) # rospy.Subscriber('/bobcat/tip_position', Point, self.get_tip_position) rospy.Subscriber("/gazebo/link_states", LinkStates, self.cb_link_states) self.pub = rospy.Publisher('/WPD/Speed', TwistStamped, queue_size=10) self.pub2 = rospy.Publisher('/bobcat/arm/hydraulics', Float64, queue_size=10) self.pub3 = rospy.Publisher('/bobcat/arm/loader', Float64, queue_size=10) self.pub4 = rospy.Publisher('/volume', Float32, queue_size=10) self.pub5 = rospy.Publisher('/penetration_z', Float32, queue_size=10) self.gazebo.pauseSim() self.command = TwistStamped() self.node_process = Popen(shlex.split('rosrun robil_lihi C_response_loader.py')) self.node_process.terminate() def cb_link_states(self, msg): self.link_states = msg self.loader_index = self.link_states.name.index("Bobby::loader") self.loader_pos = self.link_states.pose[self.loader_index] def get_angular_vel(self, msg): self.angular_vel = msg.angular_velocity.y orientation_q = msg.orientation orientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w] (self.roll, self.pitch, self.yaw) = euler_from_quaternion(orientation_list) def _seed(self, seed=None): #overriden function self.np_random, seed = seeding.np_random(seed) return [seed] def step(self, action):#overriden function self.gazebo.unpauseSim() self.set_action(action) self.gazebo.pauseSim() obs = self._get_obs() done = self._is_done(obs) info = {} reward = self.compute_reward(obs, done) simplified_obs = self.convert_obs_to_state(obs) return simplified_obs, reward, done, info def reset(self): self.gazebo.unpauseSim() self.check_all_sensors_ready() self.z_collision = 0 self.depth = 0 self.command.twist.linear.x = 0 self.pub.publish(self.command) self.pub2.publish(0) self.pub3.publish(0) self.node_process.terminate() self.set_init_pose() self.gazebo.pauseSim() self.gazebo.resetSim() self.node_process = Popen(shlex.split('rosrun robil_lihi C_response_loader.py')) self.gazebo.unpauseSim() self.command.twist.linear.x = 0 self.pub.publish(self.command) self.pub2.publish(0) self.pub3.publish(0) self.set_init_pose() self.init_env_variables() self.check_all_sensors_ready() self.gazebo.pauseSim() self.init_env_variables() obs = self._get_obs() simplified_obs = self.convert_obs_to_state(obs) return simplified_obs def _render(self, mode='human', close=False): pass def init_env_variables(self): """ Inits variables needed to be initialised each time we reset at the start of an episode. :return: """ self.depth = 0.0 self.last_volume = 0.0 self.total_distance_moved = 0.0 self.volume_sum = 0 self.last_z_pile = 0 self.last_x_step = 0 self.last_z_collision = 0 self.z_collision = 0 self.last_reward = 0 self.pitch = 0 self.flag = 0 def _is_done(self, observations): # if self.volume_sum > 87 or (self.depth < 0.01 and self.volume_sum > 5): now = rospy.get_rostime() if self.max_volume-5 < self.volume_sum or now.secs > 40: # term to restart the episode done = True else: done = False return done def set_action(self, action): command = TwistStamped() if action == 0: # action bobcat velocity command.twist.linear.x = round(random.uniform(0.1, 0.5), 3) self.pub.publish(command) self.pub2.publish(0) self.pub3.publish(0) elif action == 1: # action bobcat & arm velocity r = round(random.uniform(0.1, 0.5), 3) command.twist.linear.x = round(random.uniform(0.1, 0.5), 3) self.pub.publish(command) self.pub2.publish(r) self.pub3.publish(0) elif action == 2: # bucket velocity r = np.random.uniform(0.1, 0.5) command.twist.linear.x = 0 self.pub.publish(command) self.pub2.publish(0) self.pub3.publish(r) elif action == 3: # action arm velocity r = round(random.uniform(0.1, 0.5), 3) command.twist.linear.x = 0 self.pub.publish(command) self.pub2.publish(r) self.pub3.publish(0) def _get_obs(self): """ Here we define what sensor data defines our robots observations To know which Variables we have acces to, we need to read the MyCubeSingleDiskEnv API DOCS :return: """ # We get the orientation of the bucket in RPY roll, pitch, yaw = self.get_orientation_euler() BobcatPos_x = math.floor((self.loader_pos.position.x + 0.96 * math.cos(self.pitch)+1.495)/(-0.1311+1.495)*self.field_division) if BobcatPos_x > self.field_division: BobcatPos_x = float(self.field_division) elif BobcatPos_x < 0: BobcatPos_x = 0.0 BobcatPos_z = math.floor((self.loader_pos.position.z + 0.96 * math.sin(self.pitch)-0.0924)/(0.4037-0.0924)*self.field_division) if BobcatPos_z > self.field_division: BobcatPos_z = float(self.field_division) elif BobcatPos_z < 0: BobcatPos_z = 0.0 # We get the velocity of the bucket BobcatVel = math.floor((self.odom.twist.twist.linear.x-0)/(0.5-0)*3) if BobcatVel > 3: BobcatVel = 3 if BobcatVel < 0: BobcatVel = 0 ArmVel = math.floor((float(self.hyd))/(0.5-0.0)*2) if ArmVel > 2: ArmVel = 2 if ArmVel < 0: ArmVel = 0 BucketVel = math.floor((float(self.bucket_vel))/(0.5-0.0)*2) if BucketVel > 2: BucketVel = 2 if BucketVel < 0: BucketVel = 0 bucket_observations = [BobcatPos_x, BobcatPos_z, BobcatVel, ArmVel, BucketVel] return bucket_observations def get_orientation_euler(self): # We convert from quaternions to euler orientation_list = [self.odom.pose.pose.orientation.x, self.odom.pose.pose.orientation.y, self.odom.pose.pose.orientation.z, self.odom.pose.pose.orientation.w] roll, pitch, yaw = euler_from_quaternion(orientation_list) return roll, pitch, yaw def get_roll_velocity(self): # We get the current joint roll velocity roll_vel = self.joints.velocity[0] return roll_vel def get_y_linear_speed(self): # We get the current joint roll velocity y_linear_speed = self.odom.twist.twist.linear.y return y_linear_speed def get_y_dir_distance_from_start_point(self, start_point): """ Calculates the distance from the given point and the current position given by odometry. In this case the increase or decrease in y. :param start_point: :return: """ y_dist_dir = self.odom.pose.pose.position.y - start_point.y return y_dist_dir def compute_reward(self, observations, done): if not done: reward = -10 self.calc_volume() self.pub4.publish(self.volume_sum) if self.volume_sum > 0 and self.depth <= 0.01 and self.flag == 0: self.flag = 1 self.pub5.publish(self.loader_pos.position.z + 0.96 * math.sin(self.pitch)) if self.volume_sum > self.max_volume: # failed to take the bucket out, too many soil reward -= self.volume_sum rospy.logwarn("############### Fail=>" + str(self.volume_sum)) elif self.volume_sum > 0.01: reward += 0.1 * self.volume_sum reward = reward - 20 * (self.loader_pos.position.z + 0.96 * math.sin(self.pitch)) else: # The episode didn't success so we need to give a big negative reward reward = 0 self.depth = 0 return reward def joints_callback(self, data): self.joints = data def odom_callback(self, data): self.odom = data roll, pitch, yaw = self.get_orientation_euler() self.tip_position.x = self.odom.pose.pose.position.x + 0.96 * math.cos(pitch) self.tip_position.y = self.odom.pose.pose.position.y self.tip_position.z = self.odom.pose.pose.position.z + 0.96 * math.sin(pitch) self.pub_bucket_tip.publish(self.tip_position) def check_all_sensors_ready(self): self.check_joint_states_ready() self.check_odom_ready() rospy.logdebug("ALL SENSORS READY") def get_hyd(self, data): # subscriber for arm vel self.hyd = data.data def get_bucket_vel(self, data): # subscriber for bucket vel self.bucket_vel = data.data def get_vel(self, msg): # subscriber for bobcat velocity self.bobcat_vel = msg.twist.linear.x def check_joint_states_ready(self): self.joints = None while self.joints is None and not rospy.is_shutdown(): try: self.joints = rospy.wait_for_message("/Bobby/joint_states", JointState, timeout=1.0) rospy.logdebug("Current Bobby/joint_states READY=>" + str(self.joints)) except: rospy.logerr("Current Bobby/joint_states not ready yet, retrying for getting Bobby") return self.joints def check_odom_ready(self): self.odom = None while self.odom is None and not rospy.is_shutdown(): try: self.odom = rospy.wait_for_message("/Bobby/odom", Odometry, timeout=1.0) rospy.logdebug("Current /Bobby/odom READY=>" + str(self.odom)) except: rospy.logerr("Current /Bobby/odom not ready yet, retrying for getting odom") return self.odom def set_init_pose(self): """Sets the Robot in its init pose """ while not rospy.is_shutdown(): self.command.twist.linear.x = 0 self.pub.publish(self.command) while self.odom.pose.pose.position.z > 0.13: self.pub2.publish(-0.5) while self.odom.pose.pose.position.z < 0.03: self.pub2.publish(0.5) self.pub2.publish(0) roll, pitch, yaw = self.get_orientation_euler() while pitch > 0.0: self.pub3.publish(0.3) roll, pitch, yaw = self.get_orientation_euler() while pitch < -0.05: self.pub3.publish(-0.3) roll, pitch, yaw = self.get_orientation_euler() self.pub3.publish(0) break def go_up(self): """get the bucket out of soil """ self.gazebo.unpauseSim() # must in order to get the bucket up self.check_all_sensors_ready() self.command.twist.linear.x = 0 while not rospy.is_shutdown(): self.command.twist.linear.x = 0 self.pub.publish(self.command) self.pub2.publish(0) self.pub3.publish(0) roll, pitch, yaw = self.get_orientation_euler() print("pitch:", pitch) print("z:", self.odom.pose.pose.position.z) while pitch > -0.45: self.pub3.publish(0.3) roll, pitch, yaw = self.get_orientation_euler() self.pub3.publish(0) while self.odom.pose.pose.position.z < 1.3: self.pub2.publish(0.2) self.pub2.publish(0) print("total volume:", self.volume_sum) break def convert_obs_to_state(self, observations): """ Converts the observations used for reward and so on to the essentials for the robot state In this case we only need the orientation of the cube and the speed of the disc. The distance doesnt condition at all the actions """ BobcatPos_x = observations[0] BobcatPos_z = observations[1] BobcatVel = observations[2] ArmVel = observations[3] BucketVel = observations[4] state_converted = [BobcatPos_x, BobcatPos_z, BobcatVel, ArmVel, BucketVel] return state_converted def get_depth(self, data): if (ContactsState.states != []): i = 0 for i in range(len(data.states)): if ('box' in data.states[i].collision2_name) or ('box' in data.states[i].collision1_name): # check that the string exist in collision2_name/1 self.depth = np.mean(data.states[i].depths) self.z_collision = np.mean(data.states[i].contact_positions[0].z) def calc_volume(self): z_pile = self.m * (self.loader_pos.position.x + 0.96 * math.cos( self.pitch) + 0.2) # 0.96 is the distance between center mass of the loader to the end H = z_pile - self.z_collision x = self.loader_pos.position.x + 0.96 * math.cos(self.pitch) z = self.z_collision big_trapezoid = (x-self.last_x_step)*(z_pile+self.last_z_pile)/2 small_trapezoid = (x-self.last_x_step)*(self.z_collision+self.last_z_collision)/2 volume = (big_trapezoid - small_trapezoid) * 1.612 * self.density # 1.612 is the tool width [m] if z_pile > 0 and z > 0 and z_pile > z: self.volume_sum = self.volume_sum + volume self.last_z_pile = z_pile self.last_x_step = x self.last_z_collision = self.z_collision return self.volume_sum
class BackgroundRunner: def __init__(self, log_queue): self.process = None self.process_two = None self.killed = False self.output_file = None self.error_output_file = None self.log_queue = log_queue self.error_detected = False self.success_detected = False self.error_message = [] self.success_message = [] def start_exec(self, command, work_dir: str = None, shell: bool = False, errors=(), successes=()): self.clean() logger.info(f"Running command: {command}") Path(work_dir).mkdir(exist_ok=True, parents=True) self.output_file = Path( work_dir) / f"encoder_output_{secrets.token_hex(6)}.log" self.error_output_file = Path( work_dir) / f"encoder_error_output_{secrets.token_hex(6)}.log" self.output_file.touch(exist_ok=True) self.error_output_file.touch(exist_ok=True) self.error_message = errors self.success_message = successes self.process = Popen( shlex.split(command) if not shell and isinstance(command, str) else command, shell=shell, cwd=work_dir, stdout=open(self.output_file, "w"), stderr=open(self.error_output_file, "w"), stdin=PIPE, # FFmpeg can try to read stdin and wrecks havoc on linux encoding="utf-8", ) Thread(target=self.read_output).start() def start_piped_exec(self, command_one, command_two, work_dir, errors=(), successes=()): self.clean() logger.info( f"Running commands: {' '.join(command_one)} | {' '.join(command_two)}" ) Path(work_dir).mkdir(exist_ok=True, parents=True) self.output_file = Path( work_dir) / f"encoder_output_{secrets.token_hex(6)}.log" self.error_output_file = Path( work_dir) / f"encoder_error_output_{secrets.token_hex(6)}.log" self.output_file.touch(exist_ok=True) self.error_output_file.touch(exist_ok=True) self.error_message = errors self.success_message = successes self.process = Popen( command_one, cwd=work_dir, stdout=PIPE, stderr=PIPE, stdin=PIPE, # FFmpeg can try to read stdin and wrecks havoc on linux ) self.process_two = Popen( command_two, cwd=work_dir, stdout=open(self.output_file, "w"), stderr=open(self.error_output_file, "w"), stdin=self.process.stdout, encoding="utf-8", ) self.error_detected = False Thread(target=self.read_output).start() def read_output(self): with open(self.output_file, "r", encoding="utf-8", errors="ignore") as out_file, open( self.error_output_file, "r", encoding="utf-8", errors="ignore") as err_file: while True: if not self.is_alive(): excess = out_file.read() logger.info(excess) self.log_queue.put(excess) err_excess = err_file.read() logger.info(err_excess) self.log_queue.put(err_excess) break line = out_file.readline().rstrip() if line: logger.info(line) self.log_queue.put(line) if not self.success_detected: for success in self.success_message: if success in line: self.success_detected = True err_line = err_file.readline().rstrip() if err_line: logger.info(err_line) self.log_queue.put(err_line) if "Conversion failed!" in err_line: self.error_detected = True if not self.error_detected: for error in self.error_message: if error in err_line: self.error_detected = True try: self.output_file.unlink() self.error_output_file.unlink() except OSError: pass def read(self, limit=None): if not self.is_alive(): return return self.process.stdout.read(limit) def is_alive(self): if not self.process: return False if self.process_two: # TODO make sure process 1 dies cleanly return True if self.process_two.poll() is None else False return True if self.process.poll() is None else False def clean(self): self.kill(log=False) self.process = None self.process_two = None self.error_detected = False self.success_detected = False self.killed = False def kill(self, log=True): if self.process_two: if log: logger.info(f"Killing worker process {self.process_two.pid}") try: self.process_two.terminate() self.process_two.kill() except Exception as err: if log: logger.exception(f"Couldn't terminate process: {err}") if self.process: if log: logger.info(f"Killing worker process {self.process.pid}") try: # if reusables.win_based: # os.kill(self.process.pid, signal.CTRL_C_EVENT) # else: # os.killpg(os.getpgid(self.process.pid), signal.SIGTERM) self.process.terminate() self.process.kill() except Exception as err: if log: logger.exception(f"Couldn't terminate process: {err}") self.killed = True def pause(self): if self.process_two: return False if not self.process: return False self.process.suspend() def resume(self): if self.process_two: return False if not self.process: return False self.process.resume()
class Process(object): """Wraps a process. Options: - **wid**: the process unique identifier. This value will be used to replace the *$WID* string in the command line if present. - **cmd**: the command to run. May contain any of the variables available that are being passed to this class. They will be replaced using the python format syntax. - **args**: the arguments for the command to run. Can be a list or a string. If **args** is a string, it's splitted using :func:`shlex.split`. Defaults to None. - **executable**: When executable is given, the first item in the args sequence obtained from **cmd** is still treated by most programs as the command name, which can then be different from the actual executable name. It becomes the display name for the executing program in utilities such as **ps**. - **working_dir**: the working directory to run the command in. If not provided, will default to the current working directory. - **shell**: if *True*, will run the command in the shell environment. *False* by default. **warning: this is a security hazard**. - **uid**: if given, is the user id or name the command should run with. The current uid is the default. - **gid**: if given, is the group id or name the command should run with. The current gid is the default. - **env**: a mapping containing the environment variables the command will run with. Optional. - **rlimits**: a mapping containing rlimit names and values that will be set before the command runs. - **use_fds**: if True, will not close the fds in the subprocess. default: False. - **pipe_stdout**: if True, will open a PIPE on stdout. default: True. - **pipe_stderr**: if True, will open a PIPE on stderr. default: True. - **close_child_stdout**: If True, redirects the child process' stdout to /dev/null after the fork. default: False. - **close_child_stderr**: If True, redirects the child process' stdout to /dev/null after the fork. default: False. """ def __init__(self, wid, cmd, args=None, working_dir=None, shell=False, uid=None, gid=None, env=None, rlimits=None, executable=None, use_fds=False, watcher=None, spawn=True, pipe_stdout=True, pipe_stderr=True, close_child_stdout=False, close_child_stderr=False): self.wid = wid self.cmd = cmd self.args = args self.working_dir = working_dir or get_working_dir() self.shell = shell self.uid = to_uid(uid) if uid else None self.gid = to_gid(gid) if gid else None self.env = env or {} self.rlimits = rlimits or {} self.executable = executable self.use_fds = use_fds self.watcher = watcher self.pipe_stdout = pipe_stdout self.pipe_stderr = pipe_stderr self.close_child_stdout = close_child_stdout self.close_child_stderr = close_child_stderr self.stopping = False # sockets created before fork, should be let go after. self._sockets = [] if spawn: self.spawn() def _null_streams(self, streams): devnull = os.open(os.devnull, os.O_RDWR) try: for stream in streams: if not hasattr(stream, 'fileno'): # we're probably dealing with a file-like continue try: stream.flush() os.dup2(devnull, stream.fileno()) except IOError: # some streams, like stdin - might be already closed. pass finally: os.close(devnull) def _get_sockets_fds(self): """Returns sockets dict. If this worker's cmd indicates use of a SO_REUSEPORT socket, a new socket is created and bound. This new socket's FD replaces original socket's FD in returned dict. This method populates `self._sockets` list. This list should be let go after `fork()`. """ sockets_fds = None if self.watcher is not None and self.watcher.sockets is not None: sockets_fds = self.watcher._get_sockets_fds() reuseport_sockets = tuple( (sn, s) for (sn, s) in self.watcher.sockets.items() if s.so_reuseport) for sn, s in reuseport_sockets: # watcher.cmd uses this reuseport socket if 'circus.sockets.%s' % sn in self.watcher.cmd: sock = CircusSocket.load_from_config(s._cfg) sock.bind_and_listen() # replace original socket's fd sockets_fds[sn] = sock.fileno() # keep new socket until fork returns self._sockets.append(sock) return sockets_fds def spawn(self): sockets_fds = self._get_sockets_fds() args = self.format_args(sockets_fds=sockets_fds) def preexec_fn(): streams = [sys.stdin] if self.close_child_stdout: streams.append(sys.stdout) if self.close_child_stderr: streams.append(sys.stderr) self._null_streams(streams) os.setsid() for limit, value in self.rlimits.items(): res = getattr(resource, 'RLIMIT_%s' % limit.upper(), None) if res is None: raise ValueError('unknown rlimit "%s"' % limit) # TODO(petef): support hard/soft limits resource.setrlimit(res, (value, value)) if self.gid: try: os.setgid(self.gid) except OverflowError: if not ctypes: raise # versions of python < 2.6.2 don't manage unsigned int for # groups like on osx or fedora os.setgid(-ctypes.c_int(-self.gid).value) if self.uid: os.setuid(self.uid) extra = {} if self.pipe_stdout: extra['stdout'] = PIPE if self.pipe_stderr: extra['stderr'] = PIPE self._worker = Popen(args, cwd=self.working_dir, shell=self.shell, preexec_fn=preexec_fn, env=self.env, close_fds=not self.use_fds, executable=self.executable, **extra) # let go of sockets created only for self._worker to inherit self._sockets = [] self.started = time.time() def format_args(self, sockets_fds=None): """ It's possible to use environment variables and some other variables that are available in this context, when spawning the processes. """ logger.debug('cmd: ' + bytestring(self.cmd)) logger.debug('args: ' + str(self.args)) current_env = ObjectDict(self.env.copy()) format_kwargs = { 'wid': self.wid, 'shell': self.shell, 'args': self.args, 'env': current_env, 'working_dir': self.working_dir, 'uid': self.uid, 'gid': self.gid, 'rlimits': self.rlimits, 'executable': self.executable, 'use_fds': self.use_fds } if sockets_fds is not None: format_kwargs['sockets'] = sockets_fds if self.watcher is not None: for option in self.watcher.optnames: if option not in format_kwargs\ and hasattr(self.watcher, option): format_kwargs[option] = getattr(self.watcher, option) cmd = replace_gnu_args(self.cmd, **format_kwargs) if '$WID' in cmd or (self.args and '$WID' in self.args): msg = "Using $WID in the command is deprecated. You should use "\ "the python string format instead. In you case, this means "\ "replacing the $WID in your command by $(WID)." warnings.warn(msg, DeprecationWarning) self.cmd = cmd.replace('$WID', str(self.wid)) if self.args is not None: if isinstance(self.args, string_types): args = shlex.split( bytestring(replace_gnu_args(self.args, **format_kwargs))) else: args = [ bytestring(replace_gnu_args(arg, **format_kwargs)) for arg in self.args ] args = shlex.split(bytestring(cmd)) + args else: args = shlex.split(bytestring(cmd)) if self.shell: # subprocess.Popen(shell=True) implies that 1st arg is the # requested command, remaining args are applied to sh. args = [' '.join(quote(arg) for arg in args)] shell_args = format_kwargs.get('shell_args', None) if shell_args and is_win(): logger.warn( "shell_args won't apply for " "windows platforms: %s", shell_args) elif isinstance(shell_args, string_types): args += shlex.split( bytestring(replace_gnu_args(shell_args, **format_kwargs))) elif shell_args: args += [ bytestring(replace_gnu_args(arg, **format_kwargs)) for arg in shell_args ] elif format_kwargs.get('shell_args', False): logger.warn( "shell_args is defined but won't be used " "in this context: %s", format_kwargs['shell_args']) logger.debug("process args: %s", args) return args def returncode(self): return self._worker.returncode @debuglog def poll(self): return self._worker.poll() @debuglog def is_alive(self): return self.poll() is None @debuglog def send_signal(self, sig): """Sends a signal **sig** to the process.""" logger.debug("sending signal %s to %s" % (sig, self.pid)) return self._worker.send_signal(sig) @debuglog def stop(self): """Stop the process and close stdout/stderr If the corresponding process is still here (normally it's already killed by the watcher), a SIGTERM is sent, then a SIGKILL after 1 second. The shutdown process (SIGTERM then SIGKILL) is normally taken by the watcher. So if the process is still there here, it's a kind of bad behavior because the graceful timeout won't be respected here. """ try: try: if self._worker.poll() is None: return self._worker.terminate() finally: if self._worker.stderr is not None: self._worker.stderr.close() if self._worker.stdout is not None: self._worker.stdout.close() except NoSuchProcess: pass def age(self): """Return the age of the process in seconds.""" return time.time() - self.started def info(self): """Return process info. The info returned is a mapping with these keys: - **mem_info1**: Resident Set Size Memory in bytes (RSS) - **mem_info2**: Virtual Memory Size in bytes (VMS). - **cpu**: % of cpu usage. - **mem**: % of memory usage. - **ctime**: process CPU (user + system) time in seconds. - **pid**: process id. - **username**: user name that owns the process. - **nice**: process niceness (between -20 and 20) - **cmdline**: the command line the process was run with. """ try: info = get_info(self._worker) except NoSuchProcess: return "No such process (stopped?)" info["age"] = self.age() info["started"] = self.started info["children"] = [] info['wid'] = self.wid for child in self._worker.get_children(): info["children"].append(get_info(child)) return info def children(self): """Return a list of children pids.""" return [child.pid for child in self._worker.get_children()] def is_child(self, pid): """Return True is the given *pid* is a child of that process.""" pids = [child.pid for child in self._worker.get_children()] if pid in pids: return True return False @debuglog def send_signal_child(self, pid, signum): """Send signal *signum* to child *pid*.""" children = dict( (child.pid, child) for child in self._worker.get_children()) try: children[pid].send_signal(signum) except KeyError: raise NoSuchProcess(pid) @debuglog def send_signal_children(self, signum): """Send signal *signum* to all children.""" for child in self._worker.get_children(): try: child.send_signal(signum) except OSError as e: if e.errno != errno.ESRCH: raise @property def status(self): """Return the process status as a constant - RUNNING - DEAD_OR_ZOMBIE - UNEXISTING - OTHER """ try: if self._worker.status in (STATUS_ZOMBIE, STATUS_DEAD): return DEAD_OR_ZOMBIE except NoSuchProcess: return UNEXISTING if self._worker.is_running(): return RUNNING return OTHER @property def pid(self): """Return the *pid*""" return self._worker.pid @property def stdout(self): """Return the *stdout* stream""" return self._worker.stdout @property def stderr(self): """Return the *stdout* stream""" return self._worker.stderr def __eq__(self, other): return self is other def __lt__(self, other): return self.started < other.started def __gt__(self, other): return self.started > other.started
class FastDaemon(_PeriodicTaskDaemon): """ Danube Cloud internal fast daemon - runs two threads for monitoring VM status changes. """ label = 'FastDaemon' node_uuid = None vm_status_queue = None vm_status_watcher = None vm_status_dispatcher_thread = None vm_status_monitor_thread = None SYSEVENT = ('sysevent', '-j', '-c', 'com.sun:zones:status', 'status') VM_STATUS = frozendict({'running': 'running', 'uninitialized': 'stopped'}) def __init__(self, parent, **kwargs): hostname = parent.hostname self._conf = parent.app.conf self.enabled = self._conf.ERIGONES_FAST_DAEMON_ENABLED and hostname.startswith( Q_FAST + '@') super(FastDaemon, self).__init__(parent, **kwargs) if self.enabled: self._periodic_tasks.append(self._vm_status_thread_check) def _vm_status_dispatcher(self): """THREAD: Reads VM status changes from queue and creates a vm_status_event_cb task for every status change""" from que.utils import task_id_from_string, send_task_forever # Circular imports vm_status_task = self._conf.ERIGONES_VM_STATUS_TASK task_user = self._conf.ERIGONES_TASK_USER queue = self.vm_status_queue logger.info('Emitting VM status changes on node %s via %s', self.node_uuid, vm_status_task) while True: event = queue.get() task_id = task_id_from_string(task_user) logger.info('Creating task %s for event: "%s"', task_id, event) # Create VM status task send_task_forever(self.label, vm_status_task, args=(event, task_id), queue=Q_MGMT, expires=None, task_id=task_id) def _vm_status_monitor(self, sysevent_stdout): """THREAD: Reads line by line from sysevent process and puts relevant VM status changes into queue""" vm_status = self.VM_STATUS node_uuid = self.node_uuid queue = self.vm_status_queue logger.info('Monitoring VM status changes on node %s', node_uuid) for line in iter(sysevent_stdout.readline, ''): line = line.strip() try: event = json.loads(line)['data'] except Exception as e: logger.critical('Could not parse (%s), sysevent line: "%s"', e, line) continue try: state = vm_status[event.get('newstate')] except KeyError: logger.debug('Ignoring event "%s"', event) continue event['node_uuid'] = node_uuid event['state'] = state logger.info('Got new event: "%s"', event) queue.put(event) def _vm_status_thread_check(self): """Check if both vm_status threads are alive. Run periodically.""" if not self._stopping: if self.vm_status_monitor_thread and not self.vm_status_monitor_thread.is_alive( ): err = 'VM status monitoring thread is not running - terminating %s!' % self.label logger.critical(err) raise SystemExit(err) if self.vm_status_dispatcher_thread and not self.vm_status_dispatcher_thread.is_alive( ): err = 'VM status dispatcher thread is not running - terminating %s!' % self.label logger.critical(err) raise SystemExit(err) def _set_node_uuid(self): """Fetch compute node's UUID""" from que.utils import fetch_node_uuid # Circular imports from que.exceptions import NodeError try: self.node_uuid = fetch_node_uuid() except NodeError as exc: err = str(exc) logger.critical(err) raise SystemExit(err) def start(self, parent): self._set_node_uuid() super(FastDaemon, self).start(parent) self.vm_status_queue = Queue() self.vm_status_watcher = Popen(self.SYSEVENT, bufsize=0, close_fds=True, stdout=PIPE, stderr=STDOUT, preexec_fn=os.setsid) self.vm_status_monitor_thread = Thread( target=self._vm_status_monitor, name='VMStatusMonitor', args=(self.vm_status_watcher.stdout, )) self.vm_status_monitor_thread.daemon = True self.vm_status_monitor_thread.start() self.vm_status_dispatcher_thread = Thread( target=self._vm_status_dispatcher, name='VMStatusDispatcher') self.vm_status_dispatcher_thread.daemon = True self.vm_status_dispatcher_thread.start() def stop(self, parent): super(FastDaemon, self).stop(parent) if self.vm_status_watcher: try: self.vm_status_watcher.terminate() except NoSuchProcess: pass else: self.vm_status_watcher.wait()
class Fly(object): def __init__(self, wid, cmd, working_dir, shell, uid=None, gid=None, env=None): self.wid = wid self.working_dir = working_dir self.shell = shell self.env = env self.cmd = cmd.replace("$WID", str(self.wid)) self.uid = to_uid(uid) self.gid = to_gid(gid) def preexec_fn(): os.setsid() if self.gid: try: os.setgid(self.gid) except OverflowError: if not ctypes: raise # versions of python < 2.6.2 don't manage unsigned int for # groups like on osx or fedora os.setgid(-ctypes.c_int(-self.gid).value) if self.uid: os.setuid(self.uid) self._worker = Popen( self.cmd.split(), cwd=self.working_dir, shell=self.shell, preexec_fn=preexec_fn, env=self.env, close_fds=True, ) self.started = time.time() def poll(self): return self._worker.poll() def send_signal(self, sig): return self._worker.send_signal(sig) def stop(self): if self._worker.poll() is None: return self._worker.terminate() def age(self): return time.time() - self.started def info(self): """ return process info """ info = _INFOLINE % get_info(self._worker) lines = ["%s: %s" % (self.wid, info)] for child in self._worker.get_children(): info = _INFOLINE % get_info(child) lines.append(" %s" % info) return "\n".join(lines) def children(self): return ",".join(["%s" % child.pid for child in self._worker.get_children()]) def send_signal_child(self, pid, signum): pids = [child.pid for child in self._worker.get_children()] if pid in pids: child.send_signal(signum) return "ok" else: return "error: child not found" def send_signal_children(self, signum): for child in self._worker.get_children(): child.send_signal(signum) return "ok" @property def pid(self): return self._worker.pid
class Process(object): """Wraps a process. Options: - **wid**: the process unique identifier. This value will be used to replace the *$WID* string in the command line if present. - **cmd**: the command to run. May contain any of the variables available that are being passed to this class. They will be replaced using the python format syntax. - **args**: the arguments for the command to run. Can be a list or a string. If **args** is a string, it's splitted using :func:`shlex.split`. Defaults to None. - **executable**: When executable is given, the first item in the args sequence obtained from **cmd** is still treated by most programs as the command name, which can then be different from the actual executable name. It becomes the display name for the executing program in utilities such as **ps**. - **working_dir**: the working directory to run the command in. If not provided, will default to the current working directory. - **shell**: if *True*, will run the command in the shell environment. *False* by default. **warning: this is a security hazard**. - **uid**: if given, is the user id or name the command should run with. The current uid is the default. - **gid**: if given, is the group id or name the command should run with. The current gid is the default. - **env**: a mapping containing the environment variables the command will run with. Optional. - **rlimits**: a mapping containing rlimit names and values that will be set before the command runs. - **use_fds**: if True, will not close the fds in the subprocess. Must be be set to True on Windows if stdout or stderr are redirected. default: False. - **pipe_stdout**: if True, will open a PIPE on stdout. default: True. - **pipe_stderr**: if True, will open a PIPE on stderr. default: True. - **close_child_stdin**: If True, redirects the child process' stdin to /dev/null after the fork. default: True. - **close_child_stdout**: If True, redirects the child process' stdout to /dev/null after the fork. default: False. - **close_child_stderr**: If True, redirects the child process' stdout to /dev/null after the fork. default: False. """ def __init__(self, name, wid, cmd, args=None, working_dir=None, shell=False, uid=None, gid=None, env=None, rlimits=None, executable=None, use_fds=False, watcher=None, spawn=True, pipe_stdout=True, pipe_stderr=True, close_child_stdin=True, close_child_stdout=False, close_child_stderr=False): self.name = name self.wid = wid self.cmd = cmd self.args = args self.working_dir = working_dir or get_working_dir() self.shell = shell if uid: self.uid = to_uid(uid) self.username = get_username_from_uid(self.uid) else: self.username = None self.uid = None self.gid = to_gid(gid) if gid else None self.env = env or {} self.rlimits = rlimits or {} self.executable = executable self.use_fds = use_fds self.watcher = watcher self.pipe_stdout = pipe_stdout self.pipe_stderr = pipe_stderr self.close_child_stdin = close_child_stdin self.close_child_stdout = close_child_stdout self.close_child_stderr = close_child_stderr self.stopping = False self._worker = None self.redirected = False self.started = 0 if self.uid is not None and self.gid is None: self.gid = get_default_gid(self.uid) if IS_WINDOWS: if not self.use_fds and (self.pipe_stderr or self.pipe_stdout): raise ValueError("On Windows, you can't close the fds if " "you are redirecting stdout or stderr") if spawn: self.spawn() def _null_streams(self, streams): devnull = os.open(os.devnull, os.O_RDWR) try: for stream in streams: if not hasattr(stream, 'fileno'): # we're probably dealing with a file-like continue try: stream.flush() os.dup2(devnull, stream.fileno()) except IOError: # some streams, like stdin - might be already closed. pass finally: os.close(devnull) def spawn(self): self.started = time.time() args = self.format_args() def preexec(): streams = [] if self.close_child_stdin: streams.append(sys.stdin) if self.close_child_stdout: streams.append(sys.stdout) if self.close_child_stderr: streams.append(sys.stderr) self._null_streams(streams) os.setsid() if resource: for limit, value in self.rlimits.items(): res = getattr(resource, 'RLIMIT_%s' % limit.upper(), None) if res is None: raise ValueError('unknown rlimit "%s"' % limit) # TODO(petef): support hard/soft limits # for the NOFILE limit, if we fail to set an unlimited # value then check the existing hard limit because we # probably can't bypass it due to a kernel limit - so just # assume that the caller means they want to use the kernel # limit when they pass the unlimited value. This is better # than failing to start the process and forcing the caller # to always be aware of what the kernel configuration is. # If they do pass in a real limit value, then we'll just # raise the failure as they should know that their # expectations couldn't be met. # TODO - we can't log here as this occurs in the child # process after the fork but it would be very good to # notify the admin of the situation somehow. retry = False try: resource.setrlimit(res, (value, value)) except ValueError: if res == resource.RLIMIT_NOFILE and \ value == resource.RLIM_INFINITY: _soft, value = resource.getrlimit(res) retry = True else: raise if retry: resource.setrlimit(res, (value, value)) if self.gid: try: os.setgid(self.gid) except OverflowError: if not ctypes: raise # versions of python < 2.6.2 don't manage unsigned int for # groups like on osx or fedora os.setgid(-ctypes.c_int(-self.gid).value) if self.username is not None: try: os.initgroups(self.username, self.gid) except (OSError, AttributeError): # not support on Mac or 2.6 pass if self.uid: os.setuid(self.uid) if IS_WINDOWS: # On Windows we can't use a pre-exec function preexec_fn = None else: preexec_fn = preexec extra = {} if self.pipe_stdout: extra['stdout'] = PIPE if self.pipe_stderr: extra['stderr'] = PIPE if isinstance(args, list): if args[0].startswith('./') or args[0].startswith('.\\'): args[0] = os.path.normpath( os.path.join(self.working_dir, args[0])) args = [arg.strip('"') for arg in args] self._worker = Popen(args, cwd=self.working_dir, shell=self.shell, preexec_fn=preexec_fn, env=self.env, close_fds=not self.use_fds, executable=self.executable, **extra) def format_args(self): """ It's possible to use environment variables and some other variables that are available in this context, when spawning the processes. """ logger.debug('cmd: ' + to_string(self.cmd)) logger.debug('args: ' + str(self.args)) current_env = ObjectDict(self.env.copy()) format_kwargs = { 'wid': self.wid, 'shell': self.shell, 'args': self.args, 'env': current_env, 'working_dir': self.working_dir, 'uid': self.uid, 'gid': self.gid, 'rlimits': self.rlimits, 'executable': self.executable, 'use_fds': self.use_fds } if self.watcher is not None: for option in self.watcher.optnames: if option not in format_kwargs\ and hasattr(self.watcher, option): format_kwargs[option] = getattr(self.watcher, option) cmd = replace_gnu_args(self.cmd, **format_kwargs) if '$WID' in cmd or (self.args and '$WID' in self.args): msg = "Using $WID in the command is deprecated. You should use "\ "the python string format instead. In your case, this "\ "means replacing the $WID in your command by $(WID)." warnings.warn(msg, DeprecationWarning) self.cmd = cmd.replace('$WID', str(self.wid)) if self.args is not None: if isinstance(self.args, basestring): args = shlex.split( to_string(replace_gnu_args(self.args, **format_kwargs))) else: args = [ to_string(replace_gnu_args(arg, **format_kwargs)) for arg in self.args ] args = shlex.split(to_string(cmd), posix=not IS_WINDOWS) + args else: args = shlex.split(to_string(cmd), posix=not IS_WINDOWS) if self.shell: # subprocess.Popen(shell=True) implies that 1st arg is the # requested command, remaining args are applied to sh. args = [' '.join(quote(arg) for arg in args)] shell_args = format_kwargs.get('shell_args', None) if shell_args and IS_WINDOWS: logger.warn("shell_args won't apply for " "windows platforms: {}".format(shell_args)) elif isinstance(shell_args, basestring): args += shlex.split( to_string(replace_gnu_args(shell_args, **format_kwargs))) elif shell_args: args += [ to_string(replace_gnu_args(arg, **format_kwargs)) for arg in shell_args ] elif format_kwargs.get('shell_args', False): logger.warn("shell_args is defined but won't be used " "in this context: {}".format( format_kwargs['shell_args'])) logger.debug('Process args: {}'.format(args)) return args def returncode(self): return self._worker.returncode @debuglog def poll(self): return self._worker.poll() @debuglog def is_alive(self): return self.poll() is None @debuglog def send_signal(self, sig): """Sends a signal **sig** to the process.""" logger.debug('sending signal {} to {}'.format(sig, self.pid)) return self._worker.send_signal(sig) @debuglog def stop(self): """Stop the process and close stdout/stderr If the corresponding process is still here (normally it's already killed by the watcher), a SIGTERM is sent, then a SIGKILL after 1 second. The shutdown process (SIGTERM then SIGKILL) is normally taken by the watcher. So if the process is still there here, it's a kind of bad behavior because the graceful timeout won't be respected here. """ try: try: if self.is_alive(): try: return self._worker.terminate() except AccessDenied: # It can happen on Windows if the process # dies after poll returns (unlikely) pass finally: self.close_output_channels() except NoSuchProcess: pass def close_output_channels(self): if self._worker.stderr is not None: self._worker.stderr.close() if self._worker.stdout is not None: self._worker.stdout.close() def wait(self, timeout=None): """ Wait for the process to terminate, in the fashion of waitpid. Accepts a timeout in seconds. """ self._worker.wait(timeout) def age(self): """Return the age of the process in seconds.""" return time.time() - self.started def info(self): """Return process info. The info returned is a mapping with these keys: - **mem_info1**: Resident Set Size Memory in bytes (RSS) - **mem_info2**: Virtual Memory Size in bytes (VMS). - **cpu**: % of cpu usage. - **mem**: % of memory usage. - **ctime**: process CPU (user + system) time in seconds. - **pid**: process id. - **username**: user name that owns the process. - **nice**: process niceness (between -20 and 20) - **cmdline**: the command line the process was run with. """ try: info = get_info(self._worker) except NoSuchProcess: return "No such process (stopped?)" info["age"] = self.age() info["started"] = self.started info["children"] = [] info['wid'] = self.wid for child in get_children(self._worker): info["children"].append(get_info(child)) return info def children(self, recursive=False): """Return a list of children pids.""" return [child.pid for child in get_children(self._worker, recursive)] def is_child(self, pid): """Return True is the given *pid* is a child of that process.""" pids = [child.pid for child in get_children(self._worker)] if pid in pids: return True return False @debuglog def send_signal_child(self, pid, signum): """Send signal *signum* to child *pid*.""" children = dict( (child.pid, child) for child in get_children(self._worker)) try: children[pid].send_signal(signum) except KeyError: raise NoSuchProcess(pid) @debuglog def send_signal_children(self, signum, recursive=False): """Send signal *signum* to all children.""" for child in get_children(self._worker, recursive): try: child.send_signal(signum) except OSError as e: if e.errno != errno.ESRCH: raise @property def status(self): """Return the process status as a constant - RUNNING - DEAD_OR_ZOMBIE - UNEXISTING - OTHER """ try: if get_status(self._worker) in (STATUS_ZOMBIE, STATUS_DEAD): return DEAD_OR_ZOMBIE except NoSuchProcess: return UNEXISTING if self._worker.is_running(): return RUNNING return OTHER @property def pid(self): """Return the *pid*""" return self._worker.pid @property def stdout(self): """Return the *stdout* stream""" return self._worker.stdout @property def stderr(self): """Return the *stdout* stream""" return self._worker.stderr def __repr__(self): return '<Process ({0})> pid={1}, cmd={2}, args={3}'.format( self.name, self.pid, repr(self.cmd), repr(self.args)) def __eq__(self, other): return self is other def __lt__(self, other): return self.started < other.started def __gt__(self, other): return self.started > other.started
class BackgroundRunner: def __init__(self, log_queue): self.process = None self.killed = False self.output_file = None self.error_output_file = None self.log_queue = log_queue self.error_detected = False self.success_detected = False self.error_message = [] self.success_message = [] self.started_at = None def start_exec(self, command, work_dir: str = None, shell: bool = False, errors=(), successes=()): self.clean() logger.debug(f"Using work dir: {work_dir}") work_path = Path(work_dir) work_path.mkdir(exist_ok=True, parents=True) self.output_file = work_path / f"encoder_output_{secrets.token_hex(6)}.log" self.error_output_file = work_path / f"encoder_error_output_{secrets.token_hex(6)}.log" logger.debug(f"command output file set to: {self.output_file}") logger.debug( f"command error output file set to: {self.error_output_file}") self.output_file.touch(exist_ok=True) self.error_output_file.touch(exist_ok=True) self.error_message = errors self.success_message = successes logger.info(f"Running command: {command}") try: self.process = Popen( shlex.split(command.replace("\\", "\\\\")) if not shell and isinstance(command, str) else command, shell=shell, cwd=work_dir, stdout=open(self.output_file, "w"), stderr=open(self.error_output_file, "w"), stdin= PIPE, # FFmpeg can try to read stdin and wrecks havoc on linux encoding="utf-8", ) except PermissionError: logger.error( "Could not encode video due to permissions error." "Please make sure encoder is executable and you have permissions to run it." "Otherwise try running FastFlix as an administrator.") self.error_detected = True return except Exception: logger.exception("Could not start worker process") self.error_detected = True return self.started_at = datetime.datetime.now(datetime.timezone.utc) Thread(target=self.read_output).start() def read_output(self): with open(self.output_file, "r", encoding="utf-8", errors="ignore") as out_file, open( self.error_output_file, "r", encoding="utf-8", errors="ignore") as err_file: while True: if not self.is_alive(): excess = out_file.read() logger.info(excess) self.log_queue.put(excess) err_excess = err_file.read() logger.info(err_excess) self.log_queue.put(err_excess) if self.process.returncode is not None and self.process.returncode > 0: self.error_detected = True break line = out_file.readline().rstrip() if line: logger.info(line) self.log_queue.put(line) if not self.success_detected: for success in self.success_message: if success in line: self.success_detected = True err_line = err_file.readline().rstrip() if err_line: logger.info(err_line) self.log_queue.put(err_line) if "Conversion failed!" in err_line or "Error during output" in err_line: self.error_detected = True if not self.error_detected: for error in self.error_message: if error in err_line: self.error_detected = True try: self.output_file.unlink() self.error_output_file.unlink() except OSError: pass def read(self, limit=None): if not self.is_alive(): return return self.process.stdout.read(limit) def is_alive(self): if not self.process: return False return True if self.process.poll() is None else False def clean(self): self.kill(log=False) self.process = None self.error_detected = False self.success_detected = False self.killed = False self.started_at = None def kill(self, log=True): if self.process and self.process.poll() is None: if log: logger.info(f"Killing worker process {self.process.pid}") try: # if reusables.win_based: # os.kill(self.process.pid, signal.CTRL_C_EVENT) # else: # os.killpg(os.getpgid(self.process.pid), signal.SIGTERM) self.process.terminate() self.process.kill() except Exception as err: if log: logger.exception(f"Couldn't terminate process: {err}") self.killed = True def pause(self): if not self.process: return False self.process.suspend() def resume(self): if not self.process: return False self.process.resume()
class Process(object): """Wraps a process. Options: - **wid**: the process unique identifier. This value will be used to replace the *$WID* string in the command line if present. - **cmd**: the command to run. May contain any of the variables available that are being passed to this class. They will be replaced using the python format syntax. - **args**: the arguments for the command to run. Can be a list or a string. If **args** is a string, it's splitted using :func:`shlex.split`. Defaults to None. - **executable**: When executable is given, the first item in the args sequence obtained from **cmd** is still treated by most programs as the command name, which can then be different from the actual executable name. It becomes the display name for the executing program in utilities such as **ps**. - **working_dir**: the working directory to run the command in. If not provided, will default to the current working directory. - **shell**: if *True*, will run the command in the shell environment. *False* by default. **warning: this is a security hazard**. - **uid**: if given, is the user id or name the command should run with. The current uid is the default. - **gid**: if given, is the group id or name the command should run with. The current gid is the default. - **env**: a mapping containing the environment variables the command will run with. Optional. - **rlimits**: a mapping containing rlimit names and values that will be set before the command runs. - **use_fds**: if True, will not close the fds in the subprocess. Must be be set to True on Windows if stdout or stderr are redirected. default: False. - **pipe_stdout**: if True, will open a PIPE on stdout. default: True. - **pipe_stderr**: if True, will open a PIPE on stderr. default: True. - **close_child_stdin**: If True, redirects the child process' stdin to /dev/null after the fork. default: True. - **close_child_stdout**: If True, redirects the child process' stdout to /dev/null after the fork. default: False. - **close_child_stderr**: If True, redirects the child process' stdout to /dev/null after the fork. default: False. """ def __init__(self, name, wid, cmd, args=None, working_dir=None, shell=False, uid=None, gid=None, env=None, rlimits=None, executable=None, use_fds=False, watcher=None, spawn=True, pipe_stdout=True, pipe_stderr=True, close_child_stdin=True, close_child_stdout=False, close_child_stderr=False): self.name = name self.wid = wid self.cmd = cmd self.args = args self.working_dir = working_dir or get_working_dir() self.shell = shell if uid: self.uid = to_uid(uid) self.username = get_username_from_uid(self.uid) else: self.username = None self.uid = None self.gid = to_gid(gid) if gid else None self.env = env or {} self.rlimits = rlimits or {} self.executable = executable self.use_fds = use_fds self.watcher = watcher self.pipe_stdout = pipe_stdout self.pipe_stderr = pipe_stderr self.close_child_stdin = close_child_stdin self.close_child_stdout = close_child_stdout self.close_child_stderr = close_child_stderr self.stopping = False # sockets created before fork, should be let go after. self._sockets = [] self._worker = None self.redirected = False self.started = 0 if self.uid is not None and self.gid is None: self.gid = get_default_gid(self.uid) if IS_WINDOWS: if not self.use_fds and (self.pipe_stderr or self.pipe_stdout): raise ValueError("On Windows, you can't close the fds if " "you are redirecting stdout or stderr") if spawn: self.spawn() def _null_streams(self, streams): devnull = os.open(os.devnull, os.O_RDWR) try: for stream in streams: if not hasattr(stream, 'fileno'): # we're probably dealing with a file-like continue try: stream.flush() os.dup2(devnull, stream.fileno()) except IOError: # some streams, like stdin - might be already closed. pass finally: os.close(devnull) def _get_sockets_fds(self): """Returns sockets dict. If this worker's cmd indicates use of a SO_REUSEPORT socket, a new socket is created and bound. This new socket's FD replaces original socket's FD in returned dict. This method populates `self._sockets` list. This list should be let go after `fork()`. """ sockets_fds = None if self.watcher is not None and self.watcher.sockets is not None: sockets_fds = self.watcher._get_sockets_fds() reuseport_sockets = tuple((sn, s) for (sn, s) in self.watcher.sockets.items() if s.so_reuseport) for sn, s in reuseport_sockets: # watcher.cmd uses this reuseport socket if 'circus.sockets.%s' % sn in self.watcher.cmd: sock = CircusSocket.load_from_config(s._cfg) sock.bind_and_listen() # replace original socket's fd sockets_fds[sn] = sock.fileno() # keep new socket until fork returns self._sockets.append(sock) return sockets_fds def spawn(self): self.started = time.time() sockets_fds = self._get_sockets_fds() args = self.format_args(sockets_fds=sockets_fds) def preexec(): streams = [] if self.close_child_stdin: streams.append(sys.stdin) if self.close_child_stdout: streams.append(sys.stdout) if self.close_child_stderr: streams.append(sys.stderr) self._null_streams(streams) os.setsid() if resource: for limit, value in self.rlimits.items(): res = getattr( resource, 'RLIMIT_%s' % limit.upper(), None ) if res is None: raise ValueError('unknown rlimit "%s"' % limit) # TODO(petef): support hard/soft limits # for the NOFILE limit, if we fail to set an unlimited # value then check the existing hard limit because we # probably can't bypass it due to a kernel limit - so just # assume that the caller means they want to use the kernel # limit when they pass the unlimited value. This is better # than failing to start the process and forcing the caller # to always be aware of what the kernel configuration is. # If they do pass in a real limit value, then we'll just # raise the failure as they should know that their # expectations couldn't be met. # TODO - we can't log here as this occurs in the child # process after the fork but it would be very good to # notify the admin of the situation somehow. retry = False try: resource.setrlimit(res, (value, value)) except ValueError: if res == resource.RLIMIT_NOFILE and \ value == resource.RLIM_INFINITY: _soft, value = resource.getrlimit(res) retry = True else: raise if retry: resource.setrlimit(res, (value, value)) if self.gid: try: os.setgid(self.gid) except OverflowError: if not ctypes: raise # versions of python < 2.6.2 don't manage unsigned int for # groups like on osx or fedora os.setgid(-ctypes.c_int(-self.gid).value) if self.username is not None: try: os.initgroups(self.username, self.gid) except (OSError, AttributeError): # not support on Mac or 2.6 pass if self.uid: os.setuid(self.uid) if IS_WINDOWS: # On Windows we can't use a pre-exec function preexec_fn = None else: preexec_fn = preexec extra = {} if self.pipe_stdout: extra['stdout'] = PIPE if self.pipe_stderr: extra['stderr'] = PIPE self._worker = Popen(args, cwd=self.working_dir, shell=self.shell, preexec_fn=preexec_fn, env=self.env, close_fds=not self.use_fds, executable=self.executable, **extra) # let go of sockets created only for self._worker to inherit self._sockets = [] def format_args(self, sockets_fds=None): """ It's possible to use environment variables and some other variables that are available in this context, when spawning the processes. """ logger.debug('cmd: ' + bytestring(self.cmd)) logger.debug('args: ' + str(self.args)) current_env = ObjectDict(self.env.copy()) format_kwargs = { 'wid': self.wid, 'shell': self.shell, 'args': self.args, 'env': current_env, 'working_dir': self.working_dir, 'uid': self.uid, 'gid': self.gid, 'rlimits': self.rlimits, 'executable': self.executable, 'use_fds': self.use_fds} if sockets_fds is not None: format_kwargs['sockets'] = sockets_fds if self.watcher is not None: for option in self.watcher.optnames: if option not in format_kwargs\ and hasattr(self.watcher, option): format_kwargs[option] = getattr(self.watcher, option) cmd = replace_gnu_args(self.cmd, **format_kwargs) if '$WID' in cmd or (self.args and '$WID' in self.args): msg = "Using $WID in the command is deprecated. You should use "\ "the python string format instead. In your case, this "\ "means replacing the $WID in your command by $(WID)." warnings.warn(msg, DeprecationWarning) self.cmd = cmd.replace('$WID', str(self.wid)) if self.args is not None: if isinstance(self.args, string_types): args = shlex.split(bytestring(replace_gnu_args( self.args, **format_kwargs))) else: args = [bytestring(replace_gnu_args(arg, **format_kwargs)) for arg in self.args] args = shlex.split(bytestring(cmd), posix=not IS_WINDOWS) + args else: args = shlex.split(bytestring(cmd), posix=not IS_WINDOWS) if self.shell: # subprocess.Popen(shell=True) implies that 1st arg is the # requested command, remaining args are applied to sh. args = [' '.join(quote(arg) for arg in args)] shell_args = format_kwargs.get('shell_args', None) if shell_args and IS_WINDOWS: logger.warn("shell_args won't apply for " "windows platforms: %s", shell_args) elif isinstance(shell_args, string_types): args += shlex.split(bytestring(replace_gnu_args( shell_args, **format_kwargs))) elif shell_args: args += [bytestring(replace_gnu_args(arg, **format_kwargs)) for arg in shell_args] elif format_kwargs.get('shell_args', False): logger.warn("shell_args is defined but won't be used " "in this context: %s", format_kwargs['shell_args']) logger.debug("process args: %s", args) return args def returncode(self): return self._worker.returncode @debuglog def poll(self): return self._worker.poll() @debuglog def is_alive(self): return self.poll() is None @debuglog def send_signal(self, sig): """Sends a signal **sig** to the process.""" logger.debug("sending signal %s to %s" % (sig, self.pid)) return self._worker.send_signal(sig) @debuglog def stop(self): """Stop the process and close stdout/stderr If the corresponding process is still here (normally it's already killed by the watcher), a SIGTERM is sent, then a SIGKILL after 1 second. The shutdown process (SIGTERM then SIGKILL) is normally taken by the watcher. So if the process is still there here, it's a kind of bad behavior because the graceful timeout won't be respected here. """ try: try: if self.is_alive(): try: return self._worker.terminate() except AccessDenied: # It can happen on Windows if the process # dies after poll returns (unlikely) pass finally: self.close_output_channels() except NoSuchProcess: pass def close_output_channels(self): if self._worker.stderr is not None: self._worker.stderr.close() if self._worker.stdout is not None: self._worker.stdout.close() def wait(self, timeout=None): """ Wait for the process to terminate, in the fashion of waitpid. Accepts a timeout in seconds. """ self._worker.wait(timeout) def age(self): """Return the age of the process in seconds.""" return time.time() - self.started def info(self): """Return process info. The info returned is a mapping with these keys: - **mem_info1**: Resident Set Size Memory in bytes (RSS) - **mem_info2**: Virtual Memory Size in bytes (VMS). - **cpu**: % of cpu usage. - **mem**: % of memory usage. - **ctime**: process CPU (user + system) time in seconds. - **pid**: process id. - **username**: user name that owns the process. - **nice**: process niceness (between -20 and 20) - **cmdline**: the command line the process was run with. """ try: info = get_info(self._worker) except NoSuchProcess: return "No such process (stopped?)" info["age"] = self.age() info["started"] = self.started info["children"] = [] info['wid'] = self.wid for child in get_children(self._worker): info["children"].append(get_info(child)) return info def children(self): """Return a list of children pids.""" return [child.pid for child in get_children(self._worker)] def is_child(self, pid): """Return True is the given *pid* is a child of that process.""" pids = [child.pid for child in get_children(self._worker)] if pid in pids: return True return False @debuglog def send_signal_child(self, pid, signum): """Send signal *signum* to child *pid*.""" children = dict((child.pid, child) for child in get_children(self._worker)) try: children[pid].send_signal(signum) except KeyError: raise NoSuchProcess(pid) @debuglog def send_signal_children(self, signum, recursive=False): """Send signal *signum* to all children.""" for child in get_children(self._worker, recursive): try: child.send_signal(signum) except OSError as e: if e.errno != errno.ESRCH: raise @property def status(self): """Return the process status as a constant - RUNNING - DEAD_OR_ZOMBIE - UNEXISTING - OTHER """ try: if get_status(self._worker) in (STATUS_ZOMBIE, STATUS_DEAD): return DEAD_OR_ZOMBIE except NoSuchProcess: return UNEXISTING if self._worker.is_running(): return RUNNING return OTHER @property def pid(self): """Return the *pid*""" return self._worker.pid @property def stdout(self): """Return the *stdout* stream""" return self._worker.stdout @property def stderr(self): """Return the *stdout* stream""" return self._worker.stderr def __eq__(self, other): return self is other def __lt__(self, other): return self.started < other.started def __gt__(self, other): return self.started > other.started
def cmd( # pylint:disable=too-many-branches,too-many-locals,too-many-statements command, user=None, input=None, # pylint:disable=redefined-builtin cli_input=None, cli_output=False, communicate=True, timeout=None, fail=True, log=None, tries=1, delay_min=5, delay_max=10, **kwargs): """ Calls the `command` and returns a dictionary with process, stdout, stderr, and the returncode. Returned returncode, stdout and stderr will be None if `communicate` is set to False. :param user: If set, this will use ``sudo -u <user> ...`` to execute `command` as `user`. :type user: unicode :param input: If set, sended to stdin (if `communicate` is True). :type input: unicode :param cli_input: If set, sended to stdin (no condition). :type cli_input: unicode :param cli_output: Set to True to output (in real-time) stdout to stdout and stderr to stderr. :type cli_output: bool :param fail: Set to False to avoid the exception `subprocess.CalledProcessError`. :type fail: bool :param log: A function to log/print details about what is executed/any failure, can be a logger. :type log: callable, logging.Logger :param communicate: Set to True to communicate with the process, this is a locking call (if timeout is None). :type communicate: bool :param timeout: Time-out for the communication with the process, in seconds. :type timeout: float :param tries: How many times you want the command to be retried ? :type tries: int :param delay_min: Minimum delay to sleep after every attempt communicate must be True. :type delay: float, int :param delay_max: Maximum delay to sleep after every attempt communicate must be True. :type delay: float, int * Delay will be a random number in range (`delay_min`, `delay_max`) * Set kwargs with any argument of the :mod:`subprocess`.Popen constructor excepting stdin, stdout and stderr. """ # convert log argument to logging functions log_debug = log_warning = log_exception = None if isinstance(log, logging.Logger): log_debug, log_warning, log_exception = log.debug, log.warning, log.exception elif hasattr(log, '__call__'): log_debug = log_warning = log_exception = log # create a list and a string of the arguments if isinstance(command, str): if user is not None: command = f'sudo -u {user} {command}' args_list, args_string = shlex.split(command), command else: if user is not None: command = ['sudo', '-u', user] + command args_list = [str(a) for a in command if a is not None] args_string = ' '.join([str(a) for a in command if a is not None]) # log the execution if log_debug: # FIXME simplify this log_debug('Execute {0}{1}{2}'.format( '' if input is None else 'echo {0}|'.format(repr(input)), args_string, '' if cli_input is None else ' < {0}'.format(repr(cli_input)))) for trial in range(tries): # noqa # create the sub-process try: process = Popen(args_list, stdin=subprocess.PIPE, stdout=None if cli_output else subprocess.PIPE, stderr=None if cli_output else subprocess.PIPE, **kwargs) except OSError as e: # unable to execute the program (e.g. does not exist) if log_exception: log_exception(e) if fail: raise return { 'process': None, 'stdout': '', 'stderr': e, 'returncode': 2 } # write to stdin (answer to questions, ...) if cli_input is not None: process.stdin.write(cli_input) process.stdin.flush() # interact with the process and wait for the process to terminate if communicate: data = {} thread = threading.Thread(target=_communicate_with_timeout, kwargs={ 'data': data, 'input': input, 'process': process }) thread.start() thread.join(timeout=timeout) if thread.is_alive(): try: process.terminate() thread.join() except OSError as e: # Manage race condition with process that may terminate just after the call to # thread.is_alive() ! if e.errno != errno.ESRCH: raise stdout, stderr = data['stdout'], data['stderr'] else: # get a return code that may be None of course ... process.poll() stdout = stderr = None result = { 'process': process, 'stdout': stdout, 'stderr': stderr, 'returncode': process.returncode } if process.returncode == 0: break # failed attempt, may retry do_retry = trial < tries - 1 delay = random.uniform(delay_min, delay_max) if log_warning: # FIXME simplify this log_warning('Attempt {0} out of {1}: {2}'.format( trial + 1, tries, 'Will retry in {0} seconds'.format(delay) if do_retry else 'Failed')) # raise if this is the last try if fail and not do_retry: raise subprocess.CalledProcessError(process.returncode, args_string, stderr) if do_retry: time.sleep(delay) return result
class Process(object): """Wraps a process. Options: - **wid**: the process unique identifier. This value will be used to replace the *$WID* string in the command line if present. - **cmd**: the command to run. May contain any of the variables available that are being passed to this class. They will be replaced using the python format syntax. - **args**: the arguments for the command to run. Can be a list or a string. If **args** is a string, it's splitted using :func:`shlex.split`. Defaults to None. - **executable**: When executable is given, the first item in the args sequence obtained from **cmd** is still treated by most programs as the command name, which can then be different from the actual executable name. It becomes the display name for the executing program in utilities such as **ps**. - **working_dir**: the working directory to run the command in. If not provided, will default to the current working directory. - **shell**: if *True*, will run the command in the shell environment. *False* by default. **warning: this is a security hazard**. - **uid**: if given, is the user id or name the command should run with. The current uid is the default. - **gid**: if given, is the group id or name the command should run with. The current gid is the default. - **env**: a mapping containing the environment variables the command will run with. Optional. - **rlimits**: a mapping containing rlimit names and values that will be set before the command runs. - **use_fds**: if True, will not close the fds in the subprocess. default: False. """ def __init__(self, wid, cmd, args=None, working_dir=None, shell=False, uid=None, gid=None, env=None, rlimits=None, executable=None, use_fds=False, watcher=None, spawn=True): self.wid = wid self.cmd = cmd self.args = args self.working_dir = working_dir or get_working_dir() self.shell = shell self.uid = to_uid(uid) if uid else None self.gid = to_gid(gid) if gid else None self.env = env or {} self.rlimits = rlimits or {} self.executable = executable self.use_fds = use_fds self.watcher = watcher if spawn: self.spawn() def spawn(self): args = self.format_args() def preexec_fn(): os.setsid() for limit, value in self.rlimits.items(): res = getattr(resource, 'RLIMIT_%s' % limit.upper(), None) if res is None: raise ValueError('unknown rlimit "%s"' % limit) # TODO(petef): support hard/soft limits resource.setrlimit(res, (value, value)) if self.gid: try: os.setgid(self.gid) except OverflowError: if not ctypes: raise # versions of python < 2.6.2 don't manage unsigned int for # groups like on osx or fedora os.setgid(-ctypes.c_int(-self.gid).value) if self.uid: os.setuid(self.uid) self._worker = Popen(args, cwd=self.working_dir, shell=self.shell, preexec_fn=preexec_fn, env=self.env, close_fds=not self.use_fds, stdout=PIPE, stderr=PIPE, executable=self.executable) self.started = time.time() def format_args(self): """ It's possible to use environment variables and some other variables that are available in this context, when spawning the processes. """ logger.debug('cmd: ' + bytestring(self.cmd)) logger.debug('args: ' + str(self.args)) current_env = ObjectDict(self.env.copy()) format_kwargs = { 'wid': self.wid, 'shell': self.shell, 'args': self.args, 'env': current_env, 'working_dir': self.working_dir, 'uid': self.uid, 'gid': self.gid, 'rlimits': self.rlimits, 'executable': self.executable, 'use_fds': self.use_fds } if self.watcher is not None: for option in self.watcher.optnames: if option not in format_kwargs\ and hasattr(self.watcher, option): format_kwargs[option] = getattr(self.watcher, option) cmd = replace_gnu_args(self.cmd, **format_kwargs) if '$WID' in cmd or (self.args and '$WID' in self.args): msg = "Using $WID in the command is deprecated. You should use "\ "the python string format instead. In you case, this means "\ "replacing the $WID in your command by $(WID)." warnings.warn(msg, DeprecationWarning) self.cmd = cmd.replace('$WID', str(self.wid)) if self.args is not None: if isinstance(self.args, string_types): args = shlex.split( bytestring(replace_gnu_args(self.args, **format_kwargs))) else: args = [bytestring(replace_gnu_args(arg, **format_kwargs))\ for arg in self.args] args = shlex.split(bytestring(cmd)) + args else: args = shlex.split(bytestring(cmd)) logger.debug("process args: %s", args) return args @debuglog def poll(self): return self._worker.poll() @debuglog def send_signal(self, sig): """Sends a signal **sig** to the process.""" logger.debug("sending signal %s to %s" % (sig, self.pid)) return self._worker.send_signal(sig) @debuglog def stop(self): """Terminate the process.""" try: try: if self._worker.poll() is None: return self._worker.terminate() finally: self._worker.stderr.close() self._worker.stdout.close() except NoSuchProcess: pass def age(self): """Return the age of the process in seconds.""" return time.time() - self.started def info(self): """Return process info. The info returned is a mapping with these keys: - **mem_info1**: Resident Set Size Memory in bytes (RSS) - **mem_info2**: Virtual Memory Size in bytes (VMS). - **cpu**: % of cpu usage. - **mem**: % of memory usage. - **ctime**: process CPU (user + system) time in seconds. - **pid**: process id. - **username**: user name that owns the process. - **nice**: process niceness (between -20 and 20) - **cmdline**: the command line the process was run with. """ try: info = get_info(self._worker) except NoSuchProcess: return "No such process (stopped?)" info["age"] = self.age() info["started"] = self.started info["children"] = [] for child in self._worker.get_children(): info["children"].append(get_info(child)) return info def children(self): """Return a list of children pids.""" return [child.pid for child in self._worker.get_children()] def is_child(self, pid): """Return True is the given *pid* is a child of that process.""" pids = [child.pid for child in self._worker.get_children()] if pid in pids: return True return False @debuglog def send_signal_child(self, pid, signum): """Send signal *signum* to child *pid*.""" children = dict([(child.pid, child) \ for child in self._worker.get_children()]) children[pid].send_signal(signum) @debuglog def send_signal_children(self, signum): """Send signal *signum* to all children.""" for child in self._worker.get_children(): try: child.send_signal(signum) except OSError as e: if e.errno != errno.ESRCH: raise @property def status(self): """Return the process status as a constant - RUNNING - DEAD_OR_ZOMBIE - UNEXISTING - OTHER """ try: if self._worker.status in (STATUS_ZOMBIE, STATUS_DEAD): return DEAD_OR_ZOMBIE except NoSuchProcess: return UNEXISTING if self._worker.is_running(): return RUNNING return OTHER @property def pid(self): """Return the *pid*""" return self._worker.pid @property def stdout(self): """Return the *stdout* stream""" return self._worker.stdout @property def stderr(self): """Return the *stdout* stream""" return self._worker.stderr def __eq__(self, other): return self is other def __lt__(self, other): return self.started < other.started def __gt__(self, other): return self.started > other.started
class Process(object): """Wraps a process. Options: - **wid**: the process unique identifier. This value will be used to replace the *$WID* string in the command line if present. - **cmd**: the command to run. May contain any of the variables available that are being passed to this class. They will be replaced using the python format syntax. - **args**: the arguments for the command to run. Can be a list or a string. If **args** is a string, it's splitted using :func:`shlex.split`. Defaults to None. - **executable**: When executable is given, the first item in the args sequence obtained from **cmd** is still treated by most programs as the command name, which can then be different from the actual executable name. It becomes the display name for the executing program in utilities such as **ps**. - **working_dir**: the working directory to run the command in. If not provided, will default to the current working directory. - **shell**: if *True*, will run the command in the shell environment. *False* by default. **warning: this is a security hazard**. - **uid**: if given, is the user id or name the command should run with. The current uid is the default. - **gid**: if given, is the group id or name the command should run with. The current gid is the default. - **env**: a mapping containing the environment variables the command will run with. Optional. - **rlimits**: a mapping containing rlimit names and values that will be set before the command runs. - **use_fds**: if True, will not close the fds in the subprocess. default: False. - **pipe_stdout**: if True, will open a PIPE on stdout. default: True. - **pipe_stderr**: if True, will open a PIPE on stderr. default: True. - **close_child_stdout**: If True, redirects the child process' stdout to /dev/null after the fork. default: False. - **close_child_stderr**: If True, redirects the child process' stdout to /dev/null after the fork. default: False. """ def __init__(self, wid, cmd, args=None, working_dir=None, shell=False, uid=None, gid=None, env=None, rlimits=None, executable=None, use_fds=False, watcher=None, spawn=True, pipe_stdout=True, pipe_stderr=True, close_child_stdout=False, close_child_stderr=False): self.wid = wid self.cmd = cmd self.args = args self.working_dir = working_dir or get_working_dir() self.shell = shell self.uid = to_uid(uid) if uid else None self.gid = to_gid(gid) if gid else None self.env = env or {} self.rlimits = rlimits or {} self.executable = executable self.use_fds = use_fds self.watcher = watcher self.pipe_stdout = pipe_stdout self.pipe_stderr = pipe_stderr self.close_child_stdout = close_child_stdout self.close_child_stderr = close_child_stderr self.stopping = False if spawn: self.spawn() def _null_streams(self, streams): devnull = os.open(os.devnull, os.O_RDWR) try: for stream in streams: if not hasattr(stream, 'fileno'): # we're probably dealing with a file-like continue try: stream.flush() os.dup2(devnull, stream.fileno()) except IOError: # some streams, like stdin - might be already closed. pass finally: os.close(devnull) def spawn(self): args = self.format_args() def preexec_fn(): streams = [sys.stdin] if self.close_child_stdout: streams.append(sys.stdout) if self.close_child_stderr: streams.append(sys.stderr) self._null_streams(streams) os.setsid() for limit, value in self.rlimits.items(): res = getattr(resource, 'RLIMIT_%s' % limit.upper(), None) if res is None: raise ValueError('unknown rlimit "%s"' % limit) # TODO(petef): support hard/soft limits resource.setrlimit(res, (value, value)) if self.gid: try: os.setgid(self.gid) except OverflowError: if not ctypes: raise # versions of python < 2.6.2 don't manage unsigned int for # groups like on osx or fedora os.setgid(-ctypes.c_int(-self.gid).value) if self.uid: os.setuid(self.uid) extra = {} if self.pipe_stdout: extra['stdout'] = PIPE if self.pipe_stderr: extra['stderr'] = PIPE self._worker = Popen(args, cwd=self.working_dir, shell=self.shell, preexec_fn=preexec_fn, env=self.env, close_fds=not self.use_fds, executable=self.executable, **extra) self.started = time.time() def format_args(self): """ It's possible to use environment variables and some other variables that are available in this context, when spawning the processes. """ logger.debug('cmd: ' + bytestring(self.cmd)) logger.debug('args: ' + str(self.args)) current_env = ObjectDict(self.env.copy()) format_kwargs = { 'wid': self.wid, 'shell': self.shell, 'args': self.args, 'env': current_env, 'working_dir': self.working_dir, 'uid': self.uid, 'gid': self.gid, 'rlimits': self.rlimits, 'executable': self.executable, 'use_fds': self.use_fds} if self.watcher is not None: format_kwargs['sockets'] = self.watcher._get_sockets_fds() for option in self.watcher.optnames: if option not in format_kwargs\ and hasattr(self.watcher, option): format_kwargs[option] = getattr(self.watcher, option) cmd = replace_gnu_args(self.cmd, **format_kwargs) if '$WID' in cmd or (self.args and '$WID' in self.args): msg = "Using $WID in the command is deprecated. You should use "\ "the python string format instead. In you case, this means "\ "replacing the $WID in your command by $(WID)." warnings.warn(msg, DeprecationWarning) self.cmd = cmd.replace('$WID', str(self.wid)) if self.args is not None: if isinstance(self.args, string_types): args = shlex.split(bytestring(replace_gnu_args( self.args, **format_kwargs))) else: args = [bytestring(replace_gnu_args(arg, **format_kwargs)) for arg in self.args] args = shlex.split(bytestring(cmd)) + args else: args = shlex.split(bytestring(cmd)) logger.debug("process args: %s", args) return args @debuglog def poll(self): return self._worker.poll() @debuglog def is_alive(self): return self.poll() is None @debuglog def send_signal(self, sig): """Sends a signal **sig** to the process.""" logger.debug("sending signal %s to %s" % (sig, self.pid)) return self._worker.send_signal(sig) @debuglog def stop(self): """Stop the process and close stdout/stderr If the corresponding process is still here (normally it's already killed by the watcher), a SIGTERM is sent, then a SIGKILL after 1 second. The shutdown process (SIGTERM then SIGKILL) is normally taken by the watcher. So if the process is still there here, it's a kind of bad behavior because the graceful timeout won't be respected here. """ try: try: if self._worker.poll() is None: return self._worker.terminate() finally: if self._worker.stderr is not None: self._worker.stderr.close() if self._worker.stdout is not None: self._worker.stdout.close() except NoSuchProcess: pass def age(self): """Return the age of the process in seconds.""" return time.time() - self.started def info(self): """Return process info. The info returned is a mapping with these keys: - **mem_info1**: Resident Set Size Memory in bytes (RSS) - **mem_info2**: Virtual Memory Size in bytes (VMS). - **cpu**: % of cpu usage. - **mem**: % of memory usage. - **ctime**: process CPU (user + system) time in seconds. - **pid**: process id. - **username**: user name that owns the process. - **nice**: process niceness (between -20 and 20) - **cmdline**: the command line the process was run with. """ try: info = get_info(self._worker) except NoSuchProcess: return "No such process (stopped?)" info["age"] = self.age() info["started"] = self.started info["children"] = [] for child in self._worker.get_children(): info["children"].append(get_info(child)) return info def children(self): """Return a list of children pids.""" return [child.pid for child in self._worker.get_children()] def is_child(self, pid): """Return True is the given *pid* is a child of that process.""" pids = [child.pid for child in self._worker.get_children()] if pid in pids: return True return False @debuglog def send_signal_child(self, pid, signum): """Send signal *signum* to child *pid*.""" children = dict([(child.pid, child) for child in self._worker.get_children()]) children[pid].send_signal(signum) @debuglog def send_signal_children(self, signum): """Send signal *signum* to all children.""" for child in self._worker.get_children(): try: child.send_signal(signum) except OSError as e: if e.errno != errno.ESRCH: raise @property def status(self): """Return the process status as a constant - RUNNING - DEAD_OR_ZOMBIE - UNEXISTING - OTHER """ try: if self._worker.status in (STATUS_ZOMBIE, STATUS_DEAD): return DEAD_OR_ZOMBIE except NoSuchProcess: return UNEXISTING if self._worker.is_running(): return RUNNING return OTHER @property def pid(self): """Return the *pid*""" return self._worker.pid @property def stdout(self): """Return the *stdout* stream""" return self._worker.stdout @property def stderr(self): """Return the *stdout* stream""" return self._worker.stderr def __eq__(self, other): return self is other def __lt__(self, other): return self.started < other.started def __gt__(self, other): return self.started > other.started
# Read a precompiled version from a file with open("pr2_description/robots/pr2.urdf.xacro.txt", 'r') as f: import rospy data = f.read().replace('\n', '') rospy.set_param('/robot_description', data) del rospy # Read urdf using xacro # data = " ".join(os.popen("xacro pr2_description/robots/pr2.urdf.xacro").readlines()).replace('\n', '') # rospy.set_param('/robot_description', data) robot_state_publisher = Popen( shlex.split('rosrun robot_state_publisher robot_state_publisher')) #use_gui = Popen(shlex.split('rosparam set use_gui true')) #joint_state_publisher = Popen(shlex.split('rosrun joint_state_publisher joint_state_publisher')) tf2_web_republisher = Popen( shlex.split('rosrun tf2_web_republisher tf2_web_republisher')) rosbridge_server = Popen( shlex.split( 'roslaunch rosbridge_server rosbridge_websocket.launch port:=' + str(ws_port))) except Exception: print("Caught error:") traceback.print_exc() roscore.terminate() robot_state_publisher.terminate() #use_gui.terminate() #joint_state_publisher.terminate() tf2_web_republisher.terminate() rosbridge_server.terminate()