def testPipeline2(self): Banner('ls | cut -d . -f 1 | head') p = process.Pipeline() p.Add(_ExtProc(['ls'])) p.Add(_ExtProc(['cut', '-d', '.', '-f', '1'])) p.Add(_ExtProc(['head'])) print(p.Run(_WAITER)) ex = InitExecutor() # Simulating subshell for each command w1 = ast.CompoundWord() w1.parts.append(ast.LiteralPart(ast.token(Id.Lit_Chars, 'ls'))) node1 = ast.SimpleCommand() node1.words = [w1] w2 = ast.CompoundWord() w2.parts.append(ast.LiteralPart(ast.token(Id.Lit_Chars, 'head'))) node2 = ast.SimpleCommand() node2.words = [w2] w3 = ast.CompoundWord() w3.parts.append(ast.LiteralPart(ast.token(Id.Lit_Chars, 'sort'))) w4 = ast.CompoundWord() w4.parts.append(ast.LiteralPart(ast.token(Id.Lit_Chars, '--reverse'))) node3 = ast.SimpleCommand() node3.words = [w3, w4] p = process.Pipeline() p.Add(Process(process.SubProgramThunk(ex, node1))) p.Add(Process(process.SubProgramThunk(ex, node2))) p.Add(Process(process.SubProgramThunk(ex, node3))) print(p.Run(_WAITER))
def testPipeline2(self): arena = test_lib.MakeArena('testPipeline') ex = test_lib.InitExecutor(arena=arena) Banner('ls | cut -d . -f 1 | head') p = process.Pipeline() p.Add(_ExtProc(['ls'])) p.Add(_ExtProc(['cut', '-d', '.', '-f', '1'])) node = _CommandNode('head', arena) p.AddLast((ex, node)) fd_state = process.FdState() print(p.Run(_WAITER, _FD_STATE)) # Simulating subshell for each command node1 = _CommandNode('ls', arena) node2 = _CommandNode('head', arena) node3 = _CommandNode('sort --reverse', arena) p = process.Pipeline() p.Add(Process(process.SubProgramThunk(ex, node1))) p.Add(Process(process.SubProgramThunk(ex, node2))) p.Add(Process(process.SubProgramThunk(ex, node3))) last_thunk = (ex, _CommandNode('cat', arena)) p.AddLast(last_thunk) print(p.Run(_WAITER, _FD_STATE))
def testPipeline2(self): cmd_ev = test_lib.InitCommandEvaluator(arena=self.arena, ext_prog=self.ext_prog) Banner('ls | cut -d . -f 1 | head') p = process.Pipeline() p.Add(self._ExtProc(['ls'])) p.Add(self._ExtProc(['cut', '-d', '.', '-f', '1'])) node = _CommandNode('head', self.arena) p.AddLast((cmd_ev, node)) print(p.Run(self.waiter, self.fd_state)) # Simulating subshell for each command node1 = _CommandNode('ls', self.arena) node2 = _CommandNode('head', self.arena) node3 = _CommandNode('sort --reverse', self.arena) p = process.Pipeline() p.Add( Process(process.SubProgramThunk(cmd_ev, node1), self.job_state, self.tracer)) p.Add( Process(process.SubProgramThunk(cmd_ev, node2), self.job_state, self.tracer)) p.Add( Process(process.SubProgramThunk(cmd_ev, node3), self.job_state, self.tracer)) last_thunk = (cmd_ev, _CommandNode('cat', self.arena)) p.AddLast(last_thunk) print(p.Run(self.waiter, self.fd_state))
def testPipeline2(self): cmd_ev = test_lib.InitCommandEvaluator(arena=_ARENA, ext_prog=_EXT_PROG) Banner('ls | cut -d . -f 1 | head') p = process.Pipeline() p.Add(_ExtProc(['ls'])) p.Add(_ExtProc(['cut', '-d', '.', '-f', '1'])) node = _CommandNode('head', _ARENA) p.AddLast((cmd_ev, node)) fd_state = process.FdState(_ERRFMT, _JOB_STATE) print(p.Run(_WAITER, _FD_STATE)) # Simulating subshell for each command node1 = _CommandNode('ls', _ARENA) node2 = _CommandNode('head', _ARENA) node3 = _CommandNode('sort --reverse', _ARENA) p = process.Pipeline() p.Add(Process(process.SubProgramThunk(cmd_ev, node1), _JOB_STATE)) p.Add(Process(process.SubProgramThunk(cmd_ev, node2), _JOB_STATE)) p.Add(Process(process.SubProgramThunk(cmd_ev, node3), _JOB_STATE)) last_thunk = (cmd_ev, _CommandNode('cat', _ARENA)) p.AddLast(last_thunk) print(p.Run(_WAITER, _FD_STATE))
def _RunPipeline(self, node): # TODO: Also check for "echo" and "read". Turn them into HereDocRedirect() # and p.CaptureOutput() # NOTE: First or last one can use the "main" shell thread. Doesn't have to # run in subshell. Although I guess it's simpler if it always does. pi = process.Pipeline() for child in node.children: p = self._GetProcessForNode(child) pi.Add(p) #print(pi) # TODO: Set PipeStatus() in self.mem pipe_status = pi.Run() #log('pipe_status %s', pipe_status) if self.exec_opts.pipefail: # If any process failed, the status of the entire pipeline is 1. status = 0 for st in pipe_status: if st != 0: status = 1 else: status = pipe_status[-1] # last one determines status if node.negated: if status == 0: return 1 else: return 0 return status
def _RunPipeline(self, node): pi = process.Pipeline() # First n-1 processes (which is empty when n == 1) n = len(node.children) for i in xrange(n - 1): p = self._MakeProcess(node.children[i]) pi.Add(p) # Last piece of code is in THIS PROCESS. 'echo foo | read line; echo $line' pi.AddLast((self, node.children[n-1])) pipe_status = pi.Run(self.waiter, self.fd_state) state.SetGlobalArray(self.mem, 'PIPESTATUS', [str(p) for p in pipe_status]) if self.exec_opts.pipefail: # The status is that of the last command that is non-zero. status = 0 for st in pipe_status: if st != 0: status = st else: status = pipe_status[-1] # status of last one is pipeline status return status
def RunPipeline(self, node): # type: (command__Pipeline) -> int pi = process.Pipeline() # First n-1 processes (which is empty when n == 1) n = len(node.children) for i in xrange(n - 1): p = self._MakeProcess(node.children[i], parent_pipeline=pi) pi.Add(p) # Last piece of code is in THIS PROCESS. 'echo foo | read line; echo $line' pi.AddLast((self.cmd_ev, node.children[n - 1])) pipe_status = pi.Run(self.waiter, self.fd_state) self.mem.SetPipeStatus(pipe_status) if self.exec_opts.pipefail(): # The status is that of the last command that is non-zero. status = 0 for st in pipe_status: if st != 0: status = st else: status = pipe_status[-1] # status of last one is pipeline status return status
def _RunJobInBackground(self, node): # Special case for pipeline. There is some evidence here: # https://www.gnu.org/software/libc/manual/html_node/Launching-Jobs.html#Launching-Jobs # # "You can either make all the processes in the process group be children # of the shell process, or you can make one process in group be the # ancestor of all the other processes in that group. The sample shell # program presented in this chapter uses the first approach because it # makes bookkeeping somewhat simpler." if node.tag == command_e.Pipeline: pi = process.Pipeline() for child in node.children: pi.Add(self._MakeProcess(child, job_state=self.job_state)) job_id = pi.StartInBackground(self.waiter, self.job_state) self.mem.last_job_id = job_id # for $! self.job_state.Register(job_id, pi) log('Started background pipeline with job ID %d', job_id) else: # Problem: to get the 'set -b' behavior of immediate notifications, we # have to register SIGCHLD. But then that introduces race conditions. # If we haven't called Register yet, then we won't know who to notify. #log('job state %s', self.job_state) p = self._MakeProcess(node, job_state=self.job_state) pid = p.Start() self.mem.last_job_id = pid # for $! self.job_state.Register(pid, p) self.waiter.Register(pid, p.WhenDone) log('Started background job with pid %d', pid) return 0
def _MakePipeline(self, node, job_state=None): # NOTE: First or last one could use the "main" shell thread. Doesn't have # to run in subshell. Although I guess it's simpler if it always does. # I think bash has an option to control this? echo hi | read x; should # test it. pi = process.Pipeline(job_state=job_state) for child in node.children: p = self._MakeProcess(child) # NOTE: evaluates, does errexit guard pi.Add(p) return pi
def testPipeline(self): print('BEFORE', os.listdir('/dev/fd')) p = process.Pipeline() p.Add(Process(process.ExternalThunk(['ls']))) p.Add(Process(process.ExternalThunk(['cut', '-d', '.', '-f', '2']))) p.Add(Process(process.ExternalThunk(['sort']))) p.Add(Process(process.ExternalThunk(['uniq', '-c']))) p.Run() print('AFTER', os.listdir('/dev/fd'))
def testPipeline(self): print('BEFORE', os.listdir('/dev/fd')) p = process.Pipeline() p.Add(_ExtProc(['ls'])) p.Add(_ExtProc(['cut', '-d', '.', '-f', '2'])) p.Add(_ExtProc(['sort'])) p.Add(_ExtProc(['uniq', '-c'])) pipe_status = p.Run(_WAITER) log('pipe_status: %s', pipe_status) print('AFTER', os.listdir('/dev/fd'))
def testPipeline(self): node = _CommandNode('uniq -c', _ARENA) cmd_ev = test_lib.InitCommandEvaluator(arena=_ARENA, ext_prog=_EXT_PROG) print('BEFORE', os.listdir('/dev/fd')) p = process.Pipeline() p.Add(_ExtProc(['ls'])) p.Add(_ExtProc(['cut', '-d', '.', '-f', '2'])) p.Add(_ExtProc(['sort'])) p.AddLast((cmd_ev, node)) pipe_status = p.Run(_WAITER, _FD_STATE) log('pipe_status: %s', pipe_status) print('AFTER', os.listdir('/dev/fd'))
def testPipeline(self): node = _CommandNode('uniq -c', self.arena) cmd_ev = test_lib.InitCommandEvaluator(arena=self.arena, ext_prog=self.ext_prog) print('BEFORE', os.listdir('/dev/fd')) p = process.Pipeline() p.Add(self._ExtProc(['ls'])) p.Add(self._ExtProc(['cut', '-d', '.', '-f', '2'])) p.Add(self._ExtProc(['sort'])) p.AddLast((cmd_ev, node)) pipe_status = p.Run(self.waiter, self.fd_state) log('pipe_status: %s', pipe_status) print('AFTER', os.listdir('/dev/fd'))
def __init__(self, images): self.hbox = Gtk.Box(Gtk.Orientation.HORIZONTAL) self.menu_revealer = self._build_revealer() self.video_monitor = Gtk.DrawingArea() self.video_monitor.set_margin_left(6) self.video_monitor.set_margin_right(6) self.video_monitor.set_margin_bottom(6) self.video_monitor.set_halign(Gtk.Align.FILL) self.video_monitor.set_valign(Gtk.Align.FILL) self.video_monitor.set_size_request(700, 400) self.placeholder_pipeline = process.PlaceholderPipeline() self.placeholder_bus = self.create_gstreamer_bus( self.placeholder_pipeline.pipeline) self.pipeline = process.Pipeline() self.bus = self.create_gstreamer_bus(self.pipeline.pipeline) self.xid = None self.video_menu = menus.VideoMenu(self.pipeline, self.menu_revealer, self.placeholder_pipeline) self.audio_menu = menus.AudioMenu(self.pipeline, self.menu_revealer, self.placeholder_pipeline) self.stream_menu = menus.StreamMenu(self.pipeline, self.menu_revealer) self.store_menu = menus.StoreMenu(self.pipeline, self.menu_revealer) self.settings_menu = menus.SettingsMenu(self.pipeline, self.menu_revealer) self.images = images self.controls = ControlBar(self.pipeline, self.menu_revealer, self.images, self.video_menu, self.audio_menu, self.stream_menu, self.store_menu, self.settings_menu, self.placeholder_pipeline) self.controls.overlay_container.add(self.video_monitor) self.controls.display_controls() self.audio_level_display = audio_displays.AudioLevelDisplay( Gtk.DrawingArea()) self.audio_level_box = self._build_audio_level_box() self.controls.overlay_container.add_overlay(self.audio_level_box) self.hbox.pack_start(self.controls.overlay_container, True, True, 0) self.hbox.pack_start(self.menu_revealer, False, False, 0)
def testPipeline(self): arena = test_lib.MakeArena('testPipeline') node = _CommandNode('uniq -c', arena) ex = test_lib.InitExecutor(arena=arena) print('BEFORE', os.listdir('/dev/fd')) p = process.Pipeline() p.Add(_ExtProc(['ls'])) p.Add(_ExtProc(['cut', '-d', '.', '-f', '2'])) p.Add(_ExtProc(['sort'])) p.AddLast((ex, node)) pipe_status = p.Run(_WAITER, _FD_STATE) log('pipe_status: %s', pipe_status) print('AFTER', os.listdir('/dev/fd'))
def RunBackgroundJob(self, node): # type: (command_t) -> int """ for & etc. """ # Special case for pipeline. There is some evidence here: # https://www.gnu.org/software/libc/manual/html_node/Launching-Jobs.html#Launching-Jobs # # "You can either make all the processes in the process group be children # of the shell process, or you can make one process in group be the # ancestor of all the other processes in that group. The sample shell # program presented in this chapter uses the first approach because it # makes bookkeeping somewhat simpler." UP_node = node if UP_node.tag_() == command_e.Pipeline: node = cast(command__Pipeline, UP_node) pi = process.Pipeline() for child in node.children: p = self._MakeProcess(child) p.Init_ParentPipeline(pi) pi.Add(p) pi.Start(self.waiter) last_pid = pi.LastPid() self.mem.last_bg_pid = last_pid # for $! job_id = self.job_state.AddJob(pi) # show in 'jobs' list # TODO: Put in tracer #log('[%%%d] Started Pipeline with PID %d', job_id, last_pid) else: # Problem: to get the 'set -b' behavior of immediate notifications, we # have to register SIGCHLD. But then that introduces race conditions. # If we haven't called Register yet, then we won't know who to notify. #log('job state %s', self.job_state) p = self._MakeProcess(node) pid = p.Start(trace.Fork()) self.mem.last_bg_pid = pid # for $! job_id = self.job_state.AddJob(p) # show in 'jobs' list # TODO: Put in tracer #log('[%%%d] Started PID %d', job_id, pid) return 0
def RunPipeline(self, node, status_out): # type: (command__Pipeline, CompoundStatus) -> None pi = process.Pipeline() # First n-1 processes (which is empty when n == 1) n = len(node.children) for i in xrange(n - 1): child = node.children[i] # TODO: maybe determine these at parse time? status_out.spids.append(location.SpanForCommand(child)) p = self._MakeProcess(child) p.Init_ParentPipeline(pi) pi.Add(p) last_child = node.children[n-1] # Last piece of code is in THIS PROCESS. 'echo foo | read line; echo $line' pi.AddLast((self.cmd_ev, last_child)) status_out.spids.append(location.SpanForCommand(last_child)) status_out.codes = pi.Run(self.waiter, self.fd_state)