def test_check_output_keyword_error(self): try: subprocess.check_output([sys.executable, '-c', 'import sys; sys.exit(44)']) except subprocess.CalledProcessError as e: self.assertEqual(e.returncode, 44) else: raise AssertionError('must fail with CalledProcessError')
def run(name): cmd = [POTREECONVERTOR, '--overwrite', '--outdir', '{0}{1}{2}'.format(TILEDIR, os.path.sep, name), '{0}{1}{2}.las'.format(LASDIR, os.path.sep, name), ] subprocess.check_output(cmd)
def cmd_camera_stream_start_stop(aConfig, start_stop): if start_stop.lower() == 'start': start_stop = 'Start' if start_stop.lower() == 'stop': start_stop = 'Stop' app = aConfig['gConfig']['wsgi']['application'] if 'uv4l' in aConfig['gConfig']['applications'][app] and 'janus' in aConfig['gConfig']['applications'][app]: uv4l = aConfig['gConfig']['applications'][app]['uv4l'] janus = aConfig['gConfig']['applications'][app]['janus'] url = '' if start_stop == 'Start': url = '%s://%s:%s/janus?gateway_url=%s://%s:%s&gateway_root=%s&room=%s&room_pin=%s&username=%s&reconnect=%s&action=%s' % \ (uv4l['protocol'], uv4l['host'], uv4l['port'], janus['protocol'], janus['host'], janus['port'], janus['base_path'], janus['room'], janus['room_pin'], janus['username'], janus['reconnect'], start_stop) elif start_stop == 'Stop': url = '%s://%s:%s/janus?action=%s' % \ (uv4l['protocol'], uv4l['host'], uv4l['port'], start_stop) if url: print(url) check_output(['curl', '-s', url, '>/dev/null'])
def test_check_output_keyword_error(self): try: subprocess.check_output([sys.executable, '-c', 'import sys; sys.exit(44)']) except subprocess.CalledProcessError as e: self.assertEqual(e.returncode, 44) else: raise AssertionError('must fail with CalledProcessError')
def run_simple(tender_file_path): with open(tender_file_path) as _file: auction_id = json.load(_file).get('data', {}).get('id') if auction_id: with update_auctionPeriod(tender_file_path, auction_type='simple') as auction_file: check_output(TESTS['simple']['worker_cmd'].format(CWD, auction_id, auction_file).split()) sleep(30)
def run_dutch(tender_file_path, auction_id): with update_auctionPeriod(tender_file_path, auction_type='dutch') as auction_file: check_output( '{0}/bin/auction_esco planning {1}' ' {0}/etc/auction_worker_dutch.yaml --planning_procerude partial_db --auction_info {2}' .format(CWD, auction_id, auction_file).split()) sleep(30)
def run_multilot(worker_cmd, tender_file_path, auction_id): with open(tender_file_path) as _file: data = json.load(_file).get('data', {}) lot_id = data.get('lots', [])[0].get('id') with update_auctionPeriod(tender_file_path, auction_type='multilot') as auction_file: command_line = worker_cmd.format(CWD, auction_id, auction_file, lot_id) check_output(command_line.split()) sleep(10)
def switch_video(self, onoff): xenvs = { 'DISPLAY': ':1', } try: cmd = 'nofb' if onoff else 'fb' gsp.check_output(['x11vnc', '-remote', cmd], env=xenvs) except gsp.CalledProcessError as e: log.warn('failed to set x11vnc fb: ' + str(e))
def switch_video(self, onoff): xenvs = { 'DISPLAY': ':1', } try: cmd = 'nofb' if onoff else 'fb' gsp.check_output(['x11vnc', '-remote', cmd], env=xenvs) except gsp.CalledProcessError as e: log.warn('failed to set x11vnc fb: ' + str(e))
def run_dutch(tender_file_path): with open(tender_file_path) as _file: auction_id = json.load(_file).get('data').get('id') with update_auctionPeriod(tender_file_path, auction_type='dutch') as auction_file: check_output( '{0}/bin/auction_insider planning {1}' ' {0}/etc/auction_worker_insider.yaml --planning_procerude partial_db --auction_info {2}' .format(CWD, auction_id, auction_file).split()) sleep(30)
def test_admin_setSolc(web3, skip_if_testrpc): skip_if_testrpc(web3) try: solc_path = subprocess.check_output(['which', 'solc']).strip() except subprocess.CalledProcessError: pytest.skip('solc binary not found') solc_version = subprocess.check_output(['solc', '--version']).strip() actual = web3.admin.setSolc(solc_path) assert force_text(solc_version) in actual assert force_text(solc_path) in actual
def createinitialrules(): # The only initial rule we need is an allow rule for # the login server args = [ 'c:\\windows\\system32\\Netsh.exe', 'advfirewall', 'firewall', 'add', 'rule', 'name="TAserverfirewall"', 'protocol=udp', 'dir=in', 'enable=yes', 'profile=any', 'localport=9000', 'action=allow' ] try: sp.check_output(args, text=True) except sp.CalledProcessError as e: print('Failed to add initial rule to firewall:\n%s' % e.output)
def removerule(ip, port): args = [ 'c:\\windows\\system32\\Netsh.exe', 'advfirewall', 'firewall', 'delete', 'rule', 'name="TAserverfirewall"', 'protocol=udp', 'dir=in', 'profile=any', 'localport=%d' % port, 'remoteip=%d.%d.%d.%d' % ip ] try: sp.check_output(args, text=True) except sp.CalledProcessError as e: print('Failed to remove rule from firewall:\n%s' % e.output)
def disablerulesforprogramname(programname): args = [ 'c:\\windows\\system32\\Netsh.exe', 'advfirewall', 'firewall', 'set', 'rule', 'name=all', 'dir=in', 'program="%s"' % programname, 'new', 'enable=no' ] try: print('Disabling rule for %s' % programname) sp.check_output(args, text=True) except sp.CalledProcessError as e: print('Failed to remove firewall rules for program %s. Output:\n%s' % (programname, e.output))
def run_auction(tender_file_path, auction_id): update_auctionPeriod(tender_file_path) with open(tender_file_path) as file: data = json.loads(file.read()) lot_id = data['data']['lots'][0]['id'] if 'lots' in data['data'].keys( ) else None lot_cli_append = ' --lot {lot_id}'.format(lot_id=lot_id) if lot_id else '' command_line = '{0}/bin/auction_worker planning {1} {0}/etc/auction_worker_defaults.yaml --planning_procerude partial_db --auction_info {2}' + lot_cli_append check_output( command_line.format(CWD, auction_id, tender_file_path).split()) sleep(30)
def disable_rules_for_program_name(self, programname): args = [ 'c:\\windows\\system32\\Netsh.exe', 'advfirewall', 'firewall', 'set', 'rule', 'name=all', 'dir=in', 'program=%s' % programname, 'new', 'enable=no' ] try: self.logger.info('Disabling rule for %s' % programname) sp.check_output(args, text=True) except sp.CalledProcessError as e: self.logger.error( 'Failed to remove firewall rules for program %s. Output:\n%s' % (programname, e.output))
def check_command_deps(): """Checks for the presence of our prerequisite commands such as iptables and conntrack. :raises SystemExit if commands are missing.""" _log.info("Checking for iptables") try: ipt_version = check_output(["iptables", "--version"]) except (CalledProcessError, OSError): _log.critical("Failed to execute iptables; Calico requires iptables " "to be installed.") sys.exit(1) else: _log.info("iptables version: %s", ipt_version) _log.info("Checking for iptables-save") try: check_call(["which", "iptables-save"]) except (FailedSystemCall, OSError): _log.critical("Failed to find iptables-save; Calico requires " "iptables-save to be installed.") sys.exit(1) _log.info("Checking for iptables-restore") try: check_call(["which", "iptables-restore"]) except (FailedSystemCall, OSError): _log.critical("Failed to find iptables-restore; Calico requires " "iptables-restore to be installed.") sys.exit(1) _log.info("Checking for ipset") try: ipset_version = check_output(["ipset", "--version"]) except (CalledProcessError, OSError): _log.critical("Failed to execute ipset; Calico requires ipset " "to be installed.") sys.exit(1) else: _log.info("ipset version: %s", ipset_version) _log.info("Checking for conntrack") try: conntrack_version = check_output(["conntrack", "--version"]) except (CalledProcessError, OSError): _log.critical("Failed to execute conntrack; Calico requires conntrack " "to be installed.") sys.exit(1) else: _log.info("conntrack version: %s", conntrack_version)
def run_texas(tender_file_path): with open(tender_file_path) as _file: auction_json = json.load(_file).get('data', {}) auction_id = uuid4().hex bids = auction_json.get('bids', []) if auction_id: check_output(TESTS['texas']['worker_cmd'].format( CWD, auction_id).split()) for bid in bids: print 'texas-auctions/{}/login?bidder_id={}&hash={}'.format( auction_id, bid["id"], calculate_hash(bid["id"], HASH_SECRET)) sleep(30) return auction_id
def get_remote_url(path, remote_name): remotes = subprocess.check_output( ['git', 'remote'], cwd=path ).strip().split('\n') if remote_name not in remotes: raise NoSuchRemote() url = subprocess.check_output( ['git', 'config', 'remote.%s.url' % remote_name], cwd=path ) return url.strip()
def check_command_deps(): """Checks for the presence of our prerequisite commands such as iptables and conntrack. :raises SystemExit if commands are missing.""" _log.info("Checking for iptables") try: ipt_version = check_output(["iptables", "--version"]) except (CalledProcessError, OSError): _log.critical("Failed to execute iptables; Calico requires iptables " "to be installed.") sys.exit(1) else: _log.info("iptables version: %s", ipt_version) _log.info("Checking for iptables-save") try: check_call(["which", "iptables-save"]) except (FailedSystemCall, OSError): _log.critical("Failed to find iptables-save; Calico requires " "iptables-save to be installed.") sys.exit(1) _log.info("Checking for iptables-restore") try: check_call(["which", "iptables-restore"]) except (FailedSystemCall, OSError): _log.critical("Failed to find iptables-restore; Calico requires " "iptables-restore to be installed.") sys.exit(1) _log.info("Checking for ipset") try: ipset_version = check_output(["ipset", "--version"]) except (CalledProcessError, OSError): _log.critical("Failed to execute ipset; Calico requires ipset " "to be installed.") sys.exit(1) else: _log.info("ipset version: %s", ipset_version) _log.info("Checking for conntrack") try: conntrack_version = check_output(["conntrack", "--version"]) except (CalledProcessError, OSError): _log.critical("Failed to execute conntrack; Calico requires conntrack " "to be installed.") sys.exit(1) else: _log.info("conntrack version: %s", conntrack_version)
def addrule(ip, port, allow_or_block): if allow_or_block not in ('allow', 'block'): raise RuntimeError('Invalid argument provided: %s' % allow_or_block) args = [ 'c:\\windows\\system32\\Netsh.exe', 'advfirewall', 'firewall', 'add', 'rule', 'name="TAserverfirewall"', 'protocol=udp', 'dir=in', 'enable=yes', 'profile=any', 'localport=%d' % port, 'action=%s' % allow_or_block, 'remoteip=%d.%d.%d.%d' % ip ] try: sp.check_output(args, text=True) except sp.CalledProcessError as e: print('Failed to add rule to firewall:\n%s' % e.output)
def get_thumbs(filename, size=(300, 300)): orig_thumb = BytesIO(check_output(['ffmpegthumbnailer', '-i', filename, '-c', 'png', '-o', '-', '-s', '0'], stderr=DEVNULL)) thumb = BytesIO() im = Image.open(orig_thumb) im.thumbnail(size) im.save(thumb, format='PNG') return orig_thumb.getvalue(), thumb.getvalue()
def on_cmd(self, command): cmd = command['cmd'] if not authenticated.get(command['id']): self.emit('needlogin', {'output':'You need to authenticate first.', 'id':command['id']}) return self.log('cmd : ' + cmd + ' id=' + command['id']) if not cmd: return if cmd.startswith('cd '): cwd = command['cwd'].rstrip('>') if platform == 'win32': cmd = '%s && cd' % cmd else: cmd = '%s && pwd' % cmd try: result = sub.check_output(cmd, stderr=sub.STDOUT, shell=True, cwd=cwd) cwd = result.rstrip() if cwd: self.cwd(cwd, command['id']) except CalledProcessError as e: result = e.output self.emit('data', {'output':self.safe_encode(result), 'id':command['id']}) elif cmd == 'reset': self.reset(command['id']) elif cmd == 'reset_all': self.reset_all() else: self.do(command)
def run(self): """ Run the actual app which generates parseable output """ if not self._started or not self._loaded: raise ValueError('Not started or loaded yet') # Create list of cores we are allowed to run on cores = ','.join(map(lambda x: str(x), self._run_cores)) # Build our command as required cmd = ['taskset', '-c', cores] cmd = cmd + ["%s/run.sh" % (self._script_dir)] cmd = cmd + self._interface_params cmd = cmd + self._run_params # Run the command and process the output as needed logging.info('Running application: %s', str(self)) try: output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) logging.info('Output: %s', output) except subprocess.CalledProcessError as e: logging.error('Failed to run application %s, output: %s', str(self), e.output) raise except Exception as e: logging.exception('Failed to run applicaiton %s', str(self)) raise features = self._process_output(output) return features
def start_agent(self, agent_uuid): self.logit('Starting agent {}'.format(agent_uuid)) self.logit("VOLTTRON_HOME SETTING: {}".format( self.env['VOLTTRON_HOME'])) cmd = ['volttron-ctl'] if self.opts.get('developer_mode', False): cmd.append('--developer-mode') cmd.extend(['start', agent_uuid]) p = Popen(cmd, env=self.env, stdout=sys.stdout, stderr=sys.stderr) p.wait() # Confirm agent running cmd = ['volttron-ctl'] if self.opts.get('developer_mode', False): cmd.append('--developer-mode') cmd.extend(['status', agent_uuid]) res = subprocess.check_output(cmd, env=self.env) #776 TODO: Timing issue where check fails time.sleep(.1) self.logit("Subprocess res is {}".format(res)) assert 'running' in res pidpos = res.index('[') + 1 pidend = res.index(']') pid = int(res[pidpos:pidend]) self.started_agent_pids.append(pid) return int(pid)
def findtribesascendrules(): args = [ 'c:\\windows\\system32\\Netsh.exe', 'advfirewall', 'firewall', 'show', 'rule', 'name=all', 'dir=in', 'status=enabled', 'verbose' ] try: output = sp.check_output(args, text=True) except sp.CalledProcessError as e: print('Failed to request firewall rules.') output = '' tarules = [] for line in output.splitlines(): if line.startswith('Rule Name:'): newrule = {} elif ':' in line: key, value = line.split(':', maxsplit=1) key = key.strip() value = value.strip() newrule[key] = value if key == 'Program' and value.lower().endswith('tribesascend.exe'): tarules.append(newrule) return tarules
def on_cmd(self, command): cmd = command['cmd'] self.log('cmd : ' + cmd + ' id=' + command['id']) if not cmd: self.emit('return', {'output':'', 'id':command['id']}) return if cmd.startswith('cd '): if platform == 'win32': cmd = '%s && cd' % cmd else: cmd = '%s && pwd' % cmd try: result = sub.check_output(cmd, stderr=sub.STDOUT, shell=True, cwd=command['cwd']) cwd = result.rstrip() if cwd: self.emit('cwd', {'output':self.safe_encode(cwd), 'id':command['id']}) except CalledProcessError as e: result = e.output self.emit('return', {'output':self.safe_encode(result), 'id':command['id']}) elif cmd == 'reset': self.reset_all(command['id']) else: self.do(command)
def get_info(filename): output = check_output(['mediainfo', '-f', '--Output=XML', filename]) stat = os.stat(filename) info = dict(file_modified=datetime.fromtimestamp(stat.st_mtime), tracks=[]) xml = etree.fromstring(output) for track in xml.iter('track'): track_type = track.attrib['type'].lower() # Put the "general" track in the top level if track_type == 'general': subsection = info else: subsection = dict(track=track_type) info['tracks'].append(subsection) for value in track: tag = tag_mapping(value.tag.lower()) if tag == SkipTag: continue # Use first tag only. # mediatype -f repeats tags; first tag is the one with the best information if tag not in subsection: try: value = value_mapping(tag)(value.text) except ValueError: print('ValueError for {}; {} isn\'t {}:able'.format(tag, value_mapping(tag), value.text)) value = value.text subsection[tag] = value info['complete_name'] = filename info.setdefault('title', info.get('file_name', info.get('complete_name'))) return info
def get_info(filename): output = check_output(['mediainfo', '-f', '--Output=XML', filename]) stat = os.stat(filename) info = dict(file_modified=datetime.fromtimestamp(stat.st_mtime), tracks=[]) xml = etree.fromstring(output) for track in xml.iter('track'): track_type = track.attrib['type'].lower() # Put the "general" track in the top level if track_type == 'general': subsection = info else: subsection = dict(track=track_type) info['tracks'].append(subsection) for value in track: tag = tag_mapping(value.tag.lower()) if tag == SkipTag: continue # Use first tag only. # mediatype -f repeats tags; first tag is the one with the best information if tag not in subsection: try: value = value_mapping(tag)(value.text) except ValueError: print('ValueError for {}; {} isn\'t {}:able'.format( tag, value_mapping(tag), value.text)) value = value.text subsection[tag] = value info['complete_name'] = filename info.setdefault('title', info.get('file_name', info.get('complete_name'))) return info
def start_agent(self, agent_uuid): self.logit('Starting agent {}'.format(agent_uuid)) self.logit("VOLTTRON_HOME SETTING: {}".format( self.env['VOLTTRON_HOME'])) cmd = ['volttron-ctl'] cmd.extend(['start', agent_uuid]) p = Popen(cmd, env=self.env, stdout=sys.stdout, stderr=sys.stderr) p.wait() # Confirm agent running cmd = ['volttron-ctl'] cmd.extend(['status', agent_uuid]) res = subprocess.check_output(cmd, env=self.env) # 776 TODO: Timing issue where check fails time.sleep(.1) self.logit("Subprocess res is {}".format(res)) assert 'running' in res pidpos = res.index('[') + 1 pidend = res.index(']') pid = int(res[pidpos: pidend]) assert psutil.pid_exists(pid), \ "The pid associated with agent {} does not exist".format(pid) self.started_agent_pids.append(pid) return pid
def reset(self): self.logger.info('Resetting blacklist to initial state') self.remove_all() args = [ 'c:\\windows\\system32\\Netsh.exe', 'advfirewall', 'firewall', 'add', 'rule', 'name="%s"' % self.name, 'protocol=tcp', 'dir=in', 'enable=yes', 'profile=any', 'localport=9000', 'action=allow' ] try: sp.check_output(args, text=True) except sp.CalledProcessError as e: self.logger.error( 'Failed to add initial rule to firewall during reset of blacklist:\n' '%s' % e.output)
def stop_platform(self): """ Stop the platform without cleaning up any agents or context of the agent. This should be paired with restart platform in order to maintain the context of the platform. :return: """ if not self.is_running(): return cmd = ['volttron-ctl'] cmd.extend(['shutdown', '--platform']) try: res = subprocess.check_output(cmd, env=self.env) except CalledProcessError: if self.p_process is not None: try: gevent.sleep(0.2) self.p_process.terminate() gevent.sleep(0.2) except OSError: self.logit('Platform process was terminated.') else: self.logit("platform process was null") gevent.sleep(1)
def start_agent(self, agent_uuid): self.logit('Starting agent {}'.format(agent_uuid)) self.logit("VOLTTRON_HOME SETTING: {}".format( self.env['VOLTTRON_HOME'])) cmd = ['volttron-ctl'] cmd.extend(['start', agent_uuid]) p = Popen(cmd, env=self.env, stdout=sys.stdout, stderr=sys.stderr) p.wait() # Confirm agent running cmd = ['volttron-ctl'] cmd.extend(['status', agent_uuid]) res = subprocess.check_output(cmd, env=self.env) # 776 TODO: Timing issue where check fails time.sleep(.1) self.logit("Subprocess res is {}".format(res)) assert 'running' in res pidpos = res.index('[') + 1 pidend = res.index(']') pid = int(res[pidpos:pidend]) assert psutil.pid_exists(pid), \ "The pid associated with agent {} does not exist".format(pid) self.started_agent_pids.append(pid) return pid
def _load_unreferenced_chains(self): """ :returns list[str]: list of chains currently in the dataplane that are not referenced by other chains. """ raw_ipt_output = subprocess.check_output( [self.iptables_cmd, "--wait", "--list", "--table", self.table]) return extract_unreffed_chains(raw_ipt_output)
def _git_cmd(self, *args) -> List[str]: wrapper_logger.debug('Executing git with %s' % ' '.join(args)) arguments = [self._git_bin, '--git-dir', self._git_dir, *args] output = subprocess.check_output(arguments, stderr=subprocess.STDOUT) return [ line.decode(self._encoding, errors='replace') for line in output.split(b'\n') if line ]
def planning(worker_directory_path, tender_file_path, worker, auction_id, config, start_time, time_offset, wait_for_result=False): with update_auctionPeriod(tender_file_path, auction_type='simple', start_time=start_time, time_offset_sec=time_offset) \ as auction_file: command = '{0}/bin/{1} planning {2} {0}/etc/{3} ' \ '--planning_procerude partial_db --auction_info {4}'\ .format(worker_directory_path, worker, auction_id, config, auction_file) check_output(command.split())
def _load_unreferenced_chains(self): """ :returns list[str]: list of chains currently in the dataplane that are not referenced by other chains. """ raw_ipt_output = subprocess.check_output( [self.iptables_cmd, "--wait", "--list", "--table", self.table]) return extract_unreffed_chains(raw_ipt_output)
def shell_execute(command): LOGGER.info('execute: %s' % command) try: output = subprocess.check_output(shlex.split(command), stderr=subprocess.STDOUT) LOGGER.info('succeed, output: %s' % output) except subprocess.CalledProcessError, e: LOGGER.error('failed, output: %s' % e.output) raise
def remove_rule(self, name, ip, port, protocol, allow_or_block): self.logger.info('remove %sing firewall rule for %s to %s port %s' % (allow_or_block, ip, protocol, port)) args = [ 'c:\\windows\\system32\\Netsh.exe', 'advfirewall', 'firewall', 'delete', 'rule', 'name=%s' % name, 'protocol=%s' % protocol, 'dir=in', 'profile=any', 'localport=%s' % port, 'remoteip=%s' % ip ] try: sp.check_output(args, text=True) except sp.CalledProcessError as e: self.logger.error('Failed to remove rule from firewall:\n%s' % e.output)
def get_current_branch(path): try: branch = subprocess.check_output( ['git', 'symbolic-ref', '--short', 'HEAD'], stderr=open(os.devnull, 'w'), cwd=path ) return branch.strip() except subprocess.CalledProcessError as e: if e.returncode == 128: pass else: raise commit_hash = subprocess.check_output( ['git', 'rev-parse', '--short', 'HEAD'], cwd=path ) return commit_hash.strip()
def follow_github(): """ Responds to triggers initiated by GitHub webhooks (if activated in the configuration) Make sure to set the appropriate parameters in `config.py` --- tags: - Base consumes: - text/json parameters: - name: data in: body description: The GitHub webhook payload required: true type: object responses: '200': description: Git repository updated type: object schema: $ref: "#/definitions/Message" default: description: Unexpected error schema: id: Message type: object properties: code: type: integer format: int32 message: type: string """ if not config.FOLLOW_GITHUB: raise (Exception( "This application is not setup to respond to GitHub webhook data")) # Retrieve the data from POST data = json.loads(request.data) # Check whether the data is about the repository & branch we're # trying to track if (str(data['ref']) != config.FOLLOW_REF or str(data['repository']['url']) != config.FOLLOW_REPO): raise ( Exception("""This application is not setup to respond to pushes to this particular repository or branch""")) log.info("New commit by: {}".format(data['commits'][0]['author']['name'])) log.info("Updating code repo") # Run the git pull command from the `src` directory (one up) message = sp.check_output(['git', 'pull'], cwd='..') # Format a response response = {'message': message, 'code': 200} return jsonify(response)
def stop_agent(self, agent_uuid): # Confirm agent running _log.debug("STOPPING AGENT: {}".format(agent_uuid)) try: cmd = ['volttron-ctl', 'stop', agent_uuid] res = subprocess.check_output(cmd, env=self.env) except CalledProcessError as ex: _log.error("Exception: {}".format(ex)) return self.agent_status(agent_uuid)
def remove_agent(self, agent_uuid): """Remove the agent specified by agent_uuid""" _log.debug("REMOVING AGENT: {}".format(agent_uuid)) try: cmd = ['volttron-ctl', 'remove', agent_uuid] res = subprocess.check_output(cmd, env=self.env) except CalledProcessError as ex: _log.error("Exception: {}".format(ex)) return self.agent_status(agent_uuid)
def shell_execute(command): LOGGER.info('execute: %s' % command) try: output = subprocess.check_output(shlex.split(command), stderr=subprocess.STDOUT) LOGGER.info('succeed, output: %s' % output) except subprocess.CalledProcessError, e: LOGGER.error('failed, output: %s' % e.output) raise
def stop_agent(self, agent_uuid): # Confirm agent running _log.debug("STOPPING AGENT: {}".format(agent_uuid)) try: cmd = ['volttron-ctl', 'stop', agent_uuid] res = subprocess.check_output(cmd, env=self.env) except CalledProcessError as ex: _log.error("Exception: {}".format(ex)) return self.agent_status(agent_uuid)
def remove_agent(self, agent_uuid): """Remove the agent specified by agent_uuid""" _log.debug("REMOVING AGENT: {}".format(agent_uuid)) try: cmd = ['volttron-ctl', 'remove', agent_uuid] res = subprocess.check_output(cmd, env=self.env) except CalledProcessError as ex: _log.error("Exception: {}".format(ex)) return self.agent_status(agent_uuid)
def get_proper_current_branch(path): try: return subprocess.check_output( ['git', 'symbolic-ref', '--short', 'HEAD'], stderr=open(os.devnull, 'w'), cwd=path ).strip() except subprocess.CalledProcessError: pass return None
def execute_upnpc(args): if spi_upnp: return spi_upnp['execute_upnpc'](args) LOGGER.info('upnpc %s' % args) try: output = subprocess.check_output('upnpc %s' % args, shell=True) LOGGER.info('succeed, output: %s' % output) except subprocess.CalledProcessError, e: LOGGER.error('failed, output: %s' % e.output) raise
def get_response_time(ip): try: out = subprocess.check_output('ping -c 1 -W 1 {}.{}.{}.{}'.format(*ip).split()) for line in out.splitlines(): if '0% packet loss' in line: return ip except subprocess.CalledProcessError: pass return None
def follow_github(): """ Responds to triggers initiated by GitHub webhooks (if activated in the configuration) Make sure to set the appropriate parameters in `config.py` --- tags: - Base consumes: - text/json parameters: - name: data in: body description: The GitHub webhook payload required: true type: object responses: '200': description: Git repository updated type: object schema: $ref: "#/definitions/Message" default: description: Unexpected error schema: id: Message type: object properties: code: type: integer format: int32 message: type: string """ if not config.FOLLOW_GITHUB: raise(Exception("This application is not setup to respond to GitHub webhook data")) # Retrieve the data from POST data = json.loads(request.data) # Check whether the data is about the repository & branch we're # trying to track if (str(data['ref']) != config.FOLLOW_REF or str(data['repository']['url']) != config.FOLLOW_REPO): raise(Exception("""This application is not setup to respond to pushes to this particular repository or branch""")) log.info("New commit by: {}".format(data['commits'][0]['author']['name'])) log.info("Updating code repo") # Run the git pull command from the `src` directory (one up) message = sp.check_output(['git', 'pull'], cwd='..') # Format a response response = {'message': message, 'code': 200} return jsonify(response)
def _load_chain_names_from_iptables(self): """ Loads the set of (our) chains that already exist from iptables. Populates self._chains_in_dataplane. """ self._stats.increment("Refreshed chain list") raw_ipt_output = subprocess.check_output([self._save_cmd, "--table", self.table]) self._chains_in_dataplane = _extract_our_chains(self.table, raw_ipt_output)
def check_output(args): if USE_SU: proc = subprocess.Popen('su', stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE) proc.stdin.write(' '.join(args)) proc.stdin.write('\nexit\n') output = proc.communicate()[0] retcode = proc.poll() if retcode: raise subprocess.CalledProcessError(retcode, args, output=output) return output else: return subprocess.check_output(args, stderr=subprocess.STDOUT)
def _get_unreferenced_chains(self): """ Reads the list of chains in the dataplane which are not referenced. :returns list[str]: list of chains currently in the dataplane that are not referenced by other chains. """ raw_ipt_output = subprocess.check_output( [self._iptables_cmd, "--list", # Action to perform. "--numeric", # Avoid DNS lookups. "--table", self.table]) return _extract_our_unreffed_chains(raw_ipt_output)
def _update_health(self): health = True output = gsp.check_output([ 'supervisorctl', '-c', '/etc/supervisor/supervisord.conf', 'status' ]) for line in output.strip().split('\n'): if not line.startswith('web') and line.find('RUNNING') < 0: health = False break if self._health != health: self._health = health self.notify() return self._health
def check_output(args): if USE_SU: proc = subprocess.Popen("su", stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE) proc.terminate = functools.partial(sudo_kill, proc.pid) proc.stdin.write("exec ") proc.stdin.write(" ".join(args)) proc.stdin.write("\n") output = proc.communicate()[0] retcode = proc.poll() if retcode and retcode != -11: raise subprocess.CalledProcessError(retcode, args, output=output) return output else: return subprocess.check_output(args, stderr=subprocess.STDOUT)
def __init__(self, zookeeper, amqp_channel, queue_name, logs_handler): # Initialize the connection to ZooKeeper. super(EngineOrControllerRunner, self).__init__(zookeeper) # Save local state. self.amqp_channel = amqp_channel self.queue_name = queue_name self.logs_handler = logs_handler self.watchdog = None self.dequeue_this_task = False self.time = 0 # Broadcast the current Git revision and AMQP channel. self["release"] = subprocess.check_output( ["/usr/bin/env", "git", "rev-parse", "HEAD"], env={"GIT_DIR": os.path.join(os.path.dirname(__file__), ".git")}) self["queue_name"] = queue_name
def agent_status(self, agent_uuid): _log.debug("AGENT_STATUS: {}".format(agent_uuid)) # Confirm agent running cmd = ['volttron-ctl', 'status', agent_uuid] pid = None try: res = subprocess.check_output(cmd, env=self.env) try: pidpos = res.index('[') + 1 pidend = res.index(']') pid = int(res[pidpos: pidend]) except: pid = None except CalledProcessError as ex: _log.error("Exception: {}".format(ex)) return pid
def list_branches(path): output = subprocess.check_output( ['git', 'branch'], cwd=path ) branches = [] for branch in output.split('\n'): branch = branch.strip() if not branch: continue if branch.startswith('* '): current_branch = branch[2:] else: branches.add(branch) return current_branch, branches