def settings_page(): context = {'flash': None} if request.method == "POST": for field, default in CONFIGURABLE_SETTINGS.items(): value = request.POST.get(field, default) if isinstance(default, bool): value = value == 'on' settings[field] = value try: settings.save() sh.sudo('systemctl', 'kill', '--signal=SIGUSR2', 'screenly-viewer.service') context['flash'] = { 'class': "success", 'message': "Settings were successfully saved." } except IOError as e: context['flash'] = {'class': "error", 'message': e} except sh.ErrorReturnCode_1 as e: context['flash'] = {'class': "error", 'message': e} else: settings.load() for field, default in DEFAULTS['viewer'].items(): context[field] = settings[field] return template('settings', **context)
def main(): setup() if not path.isfile(path.join(HOME, '.screenly/wifi_set')): if not gateways().get('default'): url = 'http://{0}:{1}/hotspot'.format(LISTEN, PORT) load_browser(url=url) while not gateways().get('default'): sleep(2) if LISTEN == '127.0.0.1': sh.sudo('nginx') with open(path.join(HOME, '.screenly/wifi_set'), 'a'): pass url = 'http://{0}:{1}/splash_page'.format( LISTEN, PORT) if settings['show_splash'] else 'file://' + BLACK_PAGE browser_url(url=url) if settings['show_splash']: sleep(SPLASH_DELAY) global scheduler scheduler = Scheduler() subscriber = ZmqSubscriber() subscriber.daemon = True subscriber.start() logging.debug('Entering infinite loop.') while True: asset_loop(scheduler)
def down(): print("Cleaning up") sh.sudo("ghost", "empty") kill() sh.rm(haproxy_config) manage_minikube_svc('delete') shutil.rmtree(svctmp)
def refreshrepo(url, path, branch="master", local=False): logger.info("Getting %s to %s" % (url, path)) if not os.path.exists(path): sh.git.clone(url, path) git = sh.git.bake(_cwd=path, _tty_out=False, _timeout=3600) if local is False: try: git.fetch("origin") except Exception: # Sometimes hg repositories get into a invalid state leaving them # unusable, to avoid a looping error just remove it so it will be # recloned. logger.error("Error fetching into %s, deleting." % (path)) sh.sudo("rm", "-rf", path) raise try: git.checkout(branch) except sh.ErrorReturnCode_1: if "master" in branch: # Do not try fallback if already on master branch raise else: # Fallback to master if branch.startswith("rpm-"): # TODO(apevec) general distro branch detection branch = "rpm-master" else: branch = "master" logger.info("Falling back to %s" % branch) git.checkout(branch) git.reset("--hard", "origin/%s" % branch) repoinfo = str(git.log("--pretty=format:%H %ct", "-1")).strip().split(" ") repoinfo.insert(0, branch) return repoinfo
def start_all(): print 'starting hadoop...' try: sh.sudo('start-mapred.sh') sh.sudo('start-dfs.sh') except: pass
def main(): setup() if not gateways().get('default') and not settings['enable_offline_mode']: url = 'http://{0}:{1}/hotspot'.format(LISTEN, PORT) load_browser(url=url) while not gateways().get('default'): sleep(2) if LISTEN == '127.0.0.1': sh.sudo('nginx') url = 'http://{0}:{1}/splash_page'.format(LISTEN, PORT) if settings['show_splash'] else 'file://' + BLACK_PAGE browser_url(url=url) if settings['show_splash']: sleep(SPLASH_DELAY) global scheduler scheduler = Scheduler() subscriber = ZmqSubscriber() subscriber.daemon = True subscriber.start() logging.debug('Entering infinite loop.') while True: asset_loop(scheduler)
def restart_connection(self): """ Restart the wifi """ print "restart_connection" print "Entering a while loop until I'm connected to an SSID" i = 0 while not self.get_current_ssid(): print "waiting... {}/{}".format(i, 20) time.sleep(1) if i > 20: print "Timed out waiting for confirmation I was conncted to any AP" return False current_ssid = self.get_current_ssid() if current_ssid: print "Connected with {}. Previous was {}".format( current_ssid, self.cached_ssid) if current_ssid != self.cached_ssid: print "Detected an SSID change. restarting DHCP" self.write_cached_ssid(current_ssid) try: sh.sudo('killall', 'dhclient') except Exception as e: print e.message try: sh.sudo('dhclient', self.camera_interface) except Exception as e: print e.message return False return True
def _mount_device(device): try: product_name = device.getProduct() except: log.info("Unable to get product name of USB device, ignoring") return if not product_name or not product_name.startswith("Panono"): return log.info("Panono detected: %s", product_name) tmpdirname = MOUNT_POINT # Create mount directories if needed tmpdir = pathlib.Path(tmpdirname) tmpdir.mkdir(parents=True, exist_ok=True) # Mount the device mtp_process = sh.go_mtpfs("-android=false", tmpdirname, _bg=True) log.info("Device mounted on: %s", tmpdirname) time.sleep(1.5) # Synchronize the files synchronize(tmpdirname, args.destination, args.email, args.password) # Unmount the device time.sleep(1.0) try: mtp_process.terminate() finally: sh.sudo("umount", tmpdirname) log.info("Device unmounted")
def stop_all(): print 'stopping hadoop...' try: sh.sudo('stop-mapred.sh') sh.sudo('stop-dfs.sh') except: pass
def update(): """ Updates the Osso source code from GitHub. """ report('Updating Osso\'s source code.', report_type='JUST TRYNA MAINTAIN') os.chdir(BUILD_PATH) sudo('git', 'pull', 'origin', 'master')
def test_sudo_rights(self: object, sudo_password) -> bool: # Test for sudo rights if some password was specified try: sh.sudo("-S", "true", _in=sudo_password) self.sudo_password = sudo_password return True except: return False
def _start(self): logger.info('Starting minikube without a VM...') sh.sudo('-S', *self._start_env_as_arguments, 'minikube', 'start', '--vm-driver', 'none', '--extra-config', 'apiserver.service-node-port-range=1-32767', '--extra-config', 'kubelet.resolv-conf=/run/systemd/resolve/resolv.conf', _in=self._sudo_password, _out=sys.stdout.buffer, _err=sys.stdout.buffer)
def restart(interface='wlan0'): """ Utility function to restart the network interface. """ logging.error('Internet connection problem') logging.info('Bringing wlan0 up') try: sudo('ifup', '--force', 'wlan0') logging.info('Restart succeeded') except ErrorReturnCode: logging.error('Restart failed')
def recover(file_path): with tarfile.open(file_path, "r:gz") as tar: for directory in directories: if directory not in tar.getnames(): raise Exception("Archive is wrong.") sh.sudo('/usr/local/bin/screenly_utils.sh', 'recover', path.abspath(file_path)) remove(file_path)
def apt_get_install(software): if isinstance(software, list): print_msg("安装软件") for items in software: print(' install: ' + items) for item in items.split(' '): sudo("apt", "install", "-y", item) elif isinstance(software, str): print(' install: ' + software) sudo("apt", "install", "-y", software)
def apt_get_remove(software): if isinstance(software, list): print_msg("安装软件") for items in software: print(' remove: ' + items) for item in items.split(' '): sudo("apt", "purge", "-y", item) elif isinstance(software, str): print(' remove: ' + software) sudo("apt", "purge", "-y", software)
def clean(self): sudo_setup = getattr(sh.sudo.python, "setup.py") sudo_setup.clean() sh.sudo("rm", "-rf", "dist", "build") cwd = path.path(".") tmp_files = cwd.files("*~") + cwd.files("*.pyc") sh.sudo("rm", "-rf", *tmp_files) egg_infos = cwd.dirs("*.egg-info") if egg_infos: sh.sudo.rm("-rf", *egg_infos)
def run_update_hosts(self): for domain in self.custom_domains_to_be_configured: hosts_entry = self.host_format.format(minikube_ip=self.minikube_ip, domain=domain) sh.sudo( '-S', 'tee', '--append', self.hosts_filename, _in=self._sudo_password + self.hosts_watermark + hosts_entry, )
def tearDownClass(cls): cls.log.debug("\n"+"#"*90) # Getting back original site.pp cls.log.debug("Restoring original site.pp ...") if os.geteuid() != 0: sh.sudo('/bin/mv', cls.bck_manifest_name, cls.manifest_path + "/site.pp") else: sh.mv(cls.bck_manifest_name, cls.manifest_path + "/site.pp") return
def restart_wpasupplicant(self): print "Restarting wpa_supplicant" try: sh.sudo('killall', 'wpa_supplicant') except Exception as e: print e.message sleep(1) sh.sudo('wpa_supplicant', '-s', '-B', '-P', '/run/wpa_supplicant.{}.pid'.format(self.camera_interface), '-i', self.camera_interface, '-D', 'nl80211,wext', '-c', '/etc/wpa_supplicant/wpa_supplicant.conf') sleep(10)
def _handle_exception(e, section=None): global should_reboot, cache if section is not None: logging.info("Exception found in {section}: {ex}".format( section=section, ex=e.message )) else: logging.info("Exception found: {ex}".format(ex=e.message)) cache['url'] = piurl['url'] cache['landscape'] = piurl['landscape'] sudo('service', 'lightdm', 'restart') should_reboot = True
def command(*args, require=True): try: sudo(*args) except sh.ErrorReturnCode as e: if require: error(f"Command failed: {e.full_cmd}") click.secho("STDOUT", fg="green") click.echo(e.stdout) click.secho("STDERR", fg="red") click.echo(e.stderr, file=sys.stderr) sys.exit(e.exit_code) return e.exit_code return 0
def update_now(action_id=None, **args): if action_id: serverboards.rpc.call( "action.update", action_id, { "label": """Serverboards is updating. It may restart but should reconnect shortly. Page reload is highly encouraged.""" }) sh.sudo( "-n", "./serverboards-updater.sh", _out=serverboards.info, _err=serverboards.error) return True
def update_now(action_id=None, **args): if action_id: serverboards.rpc.call( "action.update", action_id, { "label": """Serverboards is updating. It may restart but should reconnect shortly. Page reload is highly encouraged.""" }) sh.sudo("-n", "./serverboards-updater.sh", _out=serverboards.info, _err=serverboards.error) return True
def post(self, *args, **kwargs): form = ConfigForm(request.form, csrf_enabled=False) if form.validate(): with open('config.json', 'w') as config_file: config_file.write(json.dumps(form.data)) flash('Sucess, updated config') # Restart the service # this will kill the process sh.sudo('supervisorctl restart chute-client') return redirect('/config/') return render_template('config.html', form=form)
def cmd(goal, cmd, *args, root=True, side_effects=True, dbg_result=None, dbg_func=None): # TODO: 1.x: Better debug mode: # - run command as-is if side_effects is False # - run dbg_func if side_effects is True # - blindly return return dbg_result if dbg_func is None VERBOSITY >= 0 and msg("Doing task: %s." % goal) # TODO: '\e[33m' formatting VERBOSITY >= 1 and msg("sudo %s %s" % (cmd, ' '.join(args))) if not DEBUG: if VERBOSITY >= 2: print(sudo(cmd, *args)) else: sudo(cmd, *args)
def setUpClass(cls): # Prepare logger cls.log = logging.getLogger('netapp_puppet_module_tests') cls.log.setLevel(logging.DEBUG) cls.ch = logging.StreamHandler() cls.ch.setLevel(logging.DEBUG) cls.log.addHandler(cls.ch) cls.log.debug("\n"+"-"*45 +" Tests is starting "+"-"*45 + '\n') # Check if 'puppet agent --configprint usecacheonfailure' if false cls.log.debug("Puppet agent option 'usecacheonfailure' is set to: " + sh.puppet('agent','--configprint','usecacheonfailure').upper().strip()) if sh.puppet('agent','--configprint','usecacheonfailure').upper().strip()!='FALSE': raise Exception("You need to set Puppet agent option 'usecacheonfailure' on 'false'!") # Read config cls.log.debug("Reading configuration...") cls.url = configuration.server_root_url cls.manifest_path = configuration.manifest_path cls.first_system_id = configuration.first_system_id cls.first_system_ip1 = configuration.first_system_ip1 cls.first_system_ip2 = configuration.first_system_ip2 cls.first_system_pass = configuration.first_system_pass cls.first_system_test_pass = configuration.first_system_test_pass cls.first_system_test_ip = configuration.first_system_test_ip cls.second_system_id = configuration.second_system_id cls.second_system_ip1 = configuration.second_system_ip1 cls.second_system_ip2 = configuration.second_system_ip2 cls.second_system_pass = configuration.second_system_pass # Save current site.pp cls.bck_manifest_name = cls.manifest_path + \ '/site.pp.' + \ datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S') + \ '.bck' cls.log.debug("Saving original site.pp to {0}...".format(cls.bck_manifest_name)) # Hack for local running if os.geteuid() != 0: sh.sudo('/bin/cp', cls.manifest_path + "/site.pp", cls.bck_manifest_name) sh.sudo('/bin/chmod', '664', cls.bck_manifest_name) else: sh.cp(cls.manifest_path + "/site.pp", cls.bck_manifest_name) sh.chmod('664', cls.bck_manifest_name) return
class SignatureHandler(SocketServer.BaseRequestHandler): def do_signature(self): # 1 -- mount nfs action, path, packname = self.data.split(",") nfs_dir = self.ipaddr + ":" + path work_dir = "/home/android/signature/" + self.ipaddr + '/' + packname exit_code = 0 try: sh.sudo.mkdir(work_dir, "-p") except sh.ErrorReturnCode_1: logging.warning("mkdir failed, just ignore it") try: sh.sudo.mount("-t", "nfs", "-o", "hard,intr", nfs_dir, work_dir) except sh.ErrorReturnCode, e: logging.warning("mount failed: cmd=%s, exit_code = %d" % (e.full_cmd, e.exit_code)) self.request.sendall(str(e.exit_code)) return logging.warning("start sign....") try: sign_ret = sh.sudo("/bin/bash", sign_action[action], work_dir, packname) exit_code = sign_ret.exit_code logging.warning("--------------sign success, stdout-----------------") logging.warning(sign_ret.stdout) logging.warning("--------------sign success, stdout end-----------------") except sh.ErrorReturnCode,e: logging.warning("--------------sign failed-----------------") logging.warning(e.stdout) logging.warning("~~~~~~~~~~~~~~~~~~~~~~~~") logging.warning(e.stderr) logging.warning("--------------sign failed end-----------------") exit_code = e.exit_code
def give_user_ztanesh(unix_user): """ Make sure our UNIX user runs ZtaneSH shell it is more productive to work with Plone sites. https://github.com/miohtama/ztanesh """ from sh import git from sh import chsh home = get_unix_user_home(unix_user) # Install ZtaneSH if not os.path.exists("%s/tools" % home): print "Installing ZtaneSH for user %s" % unix_user with sudo(i=True, u=unix_user, _with=True): cd(home) git("clone", "git://github.com/miohtama/ztanesh.git", "tools") setup = "%s/tools/zsh-scripts/setup.zsh" % home run = Command(setup) run() # Set user default shell with sudo: chsh("-s", "/bin/zsh", unix_user)
def restart_all(root_folder): """ Restart all sites installed on the server. If sites are in ZEO front end clusters try to do soft restarts so that there is at least one client up all the time. """ for folder, zeo_type, name in find_plone_sites(root_folder): processes = get_plone_processes(folder, zeo_type) # Restart processes one by one # so that there should be always for p in processes: unix_user = find_owner(p) print "Restarting process %s for user %s" % (p, unix_user) with sudo(H=True, i=True, u=unix_user, _with=True): cmd = Command(p) cmd("stop") if not p.endswith("zeoserver"): # Don't mess with database server too long time.sleep(2) cmd("start")
def ppcoind(self, argv): get = sh.ppcoind("getinfo", _ok_code=[0,3,5,87]).stdout pos_diff = sh.ppcoind("getdifficulty", _ok_code=[0,3,5,87]).stdout pid = sh.sudo("pidof", "ppcoind").stdout.strip() try: getinfo = json.loads(get) pos = json.loads(pos_diff)['proof-of-stake'] getinfo["difficulty proof-of-stake"] = pos except: if pid != None: return("ppcoind is running but not ready") else: return("ppcoind inactive") ## When posting in public, hide IP and balance. if argv == "private": del getinfo['balance'] del getinfo['ip'] return(getinfo) else: return(getinfo)
def ppcoind(self, argv): get = sh.ppcoind("getinfo", _ok_code=[0, 3, 5, 87]).stdout pos_diff = sh.ppcoind("getdifficulty", _ok_code=[0, 3, 5, 87]).stdout pid = sh.sudo("pidof", "ppcoind").stdout.strip() try: getinfo = json.loads(get) pos = json.loads(pos_diff)['proof-of-stake'] getinfo["difficulty proof-of-stake"] = pos except: if pid != None: return ("ppcoind is running but not ready") else: return ("ppcoind inactive") ## When posting in public, hide IP and balance. if argv == "private": del getinfo['balance'] del getinfo['ip'] return (getinfo) else: return (getinfo)
def check_cpu_temp(self): os.environ['PATH'] += ':/opt/vc/bin' result = sh.sudo('vcgencmd', 'measure_temp') temp = float(result.stdout.rstrip().split('=')[1].split("'")[0]) if temp > 70: return (False, 4004) return (True, 3004)
def findif(iface): if not re.search(re.escape(iface), str(ifaces), re.IGNORECASE): try: output = sh.sudo("ifconfig", "lo0", "alias", iface) return (output.exit_code) except: return ("# ERROR: Failed to add lo0 alias!")
def hotspot_page(): if LISTEN == '127.0.0.1': sh.sudo('nginx', '-s', 'stop') wifi_connect = sh.sudo('wifi-connect', _bg=True, _err_to_out=True) while 'Starting HTTP server' not in wifi_connect.process.stdout: sleep(1) network = None for line in wifi_connect.process.stdout.split('\n'): if 'Access point ' in line: network = line.split("'")[-2] break return template('hotspot.html', network=network, address='screenly.io/wifi')
def build_rip(self: object) -> None: src_directory = "{0}/src".format(self.normalized_path) number_of_cores_string = sh.nproc(_tty_out=False).strip() autogen_path = "{0}/autogen.sh".format(src_directory) sh.Command(autogen_path)(_cwd=src_directory, _out=sys.stdout.buffer) configure_path = "{0}/configure".format(src_directory) sh.Command(configure_path)(_cwd=src_directory, _out=sys.stdout.buffer) sh.make("-j", number_of_cores_string, _cwd=src_directory, _out=sys.stdout.buffer) sh.sudo("-S", "make", "setuid", _in=self.sudo_password, _cwd=src_directory, _out=sys.stdout.buffer)
def unmount(self, devices, sudo = False): log.debug('unmount(%r, %r)', devices, sudo) mounted = filter(lambda mount: mount.spec in devices, self._mounts) if mounted: if len(mounted) > 1: for m in mounted: log.critical(str(m)) raise Exception('Found more than one mount for device "%s"', devices[0]) mount = mounted.pop() log.info('Unmounting %s mounted at %s', mount.spec, mount.file) if sudo: sh.sudo('-n', 'umount', mount.spec) else: sh.umount(mount.spec) self._orig_mount = mount return mount log.info('Device %s not found in mounted filesystems', devices[0]) return
def format(device, fs_type, fs_options, sudo=False): mkfs_args = ['--type=%s' % fs_type] if fs_options: mkfs_args.extend(fs_options.split()) if sudo: mkfs = sh.sudo('-n', 'mkfs', *mkfs_args) else: mkfs = sh.mkfs(*mkfs_args) return mkfs
def refreshrepo(url, path, branch="master", local=False): logger.info("Getting %s to %s" % (url, path)) if not os.path.exists(path): sh.git.clone(url, path, "-b", branch) git = sh.git.bake(_cwd=path, _tty_out=False, _timeout=3600) if local is False: try: git.fetch("origin") except Exception: # Sometimes hg repositories get into a invalid state leaving them # unusable, to avoid a looping error just remove it so it will be # recloned. logger.error("Error fetching into %s, deleting." % (path)) sh.sudo("rm", "-rf", path) raise git.checkout(branch) git.reset("--hard", "origin/%s" % branch) return str(git.log("--pretty=format:%H %ct", "-1")).strip().split(" ")
def switch_to_custom_manifest(cls, manifest_body): """ Helper to overwrite original manifest by custom manifest :param manifest_body: :return: None """ with open("/var/tmp/netapp_test_suite_tmp_site.pp", 'w') as temp_site_pp: temp_site_pp.write(manifest_body) if os.geteuid() != 0: sh.sudo('/bin/mv', '/var/tmp/netapp_test_suite_tmp_site.pp', cls.manifest_path + "/site.pp") sh.sudo('/bin/chmod', '664', cls.manifest_path + "/site.pp") else: sh.mv('/var/tmp/netapp_test_suite_tmp_site.pp', cls.manifest_path + "/site.pp") sh.chmod('664', cls.manifest_path + "/site.pp") # Show how looks like site.pp for now cls.log.debug("How looks site.pp for now (by 'cat {0}'):".format(cls.manifest_path + "/site.pp")) cls.log.debug(sh.cat(cls.manifest_path + "/site.pp"))
def run_trial(fs, trial_num, run_type): print "%s: trial %d, %s" % (fs, trial_num, run_type) trace_filename = fs + '.' + run_type + '.' + str(trial_num) if run_type == 'write': while True: try: sh.sudo('hadoop fs -rmr -skipTrash /benchmarks'.split()) break except sh.ErrorReturnCode_255: print "failed to delete benchmark data; retrying in 5 seconds..." sh.sleep(5) for server in servers: server.start_blktrace(server.hostname + '.' + trace_filename) #server.start_blktrace('data/blktrace/%s/%s.%d' % (fs, server.hostname, trial_num)) sh.sleep(1) try: """ sh.time( 'sudo hadoop jar /usr/share/hadoop/hadoop-examples-1.0.4.jar teragen -Dmapred.map.tasks=10 1000000000 /tera'.split(), _err = 'data/%s.gen.%d' % (fs, trial_num) ) sh.time( 'sudo hadoop jar /usr/share/hadoop/hadoop-examples-1.0.4.jar terasort -Dmapred.map.tasks=10 -Dmapred.reduce.tasks=10 /tera /sorted'.split(), _err = 'data/%s.sort.%d' % (fs, trial_num) ) """ sh.time( ('sudo hadoop jar /usr/share/hadoop/hadoop-test-1.0.4.jar TestDFSIO -%s -fileSize 100 -nrFiles 100' % run_type).split(), _err = 'data/%s/%s.%d' % (run_type, fs, trial_num) ) except sh.ErrorReturnCode_255: print "WARNING: test failed" for server in servers: server.end_blktrace() sh.scp(('root@%s:%s.* %s' % (server.hostname, server.hostname + '.' + trace_filename, 'data/blktrace/%s/' % fs)).split())
def settings_page(): context = {'flash': None} if request.method == "POST": for field, default in CONFIGURABLE_SETTINGS.items(): value = request.POST.get(field, default) if isinstance(default, bool): value = value == 'on' settings[field] = value try: settings.save() sh.sudo('systemctl', 'kill', '--signal=SIGUSR2', 'screenly-viewer.service') context['flash'] = {'class': "success", 'message': "Settings were successfully saved."} except IOError as e: context['flash'] = {'class': "error", 'message': e} except sh.ErrorReturnCode_1 as e: context['flash'] = {'class': "error", 'message': e} else: settings.load() for field, default in DEFAULTS['viewer'].items(): context[field] = settings[field] return template('settings', **context)
def migrate_site(name, folder, unix_user, source, python): """ Migrate a Plone site from another server. :param name: New site installation id :param source: SSH source path :param python: Python interpreter used for the new installation """ require_ssh_agent() allow_ssh_agent_thru_sudo() create_site_base(name, folder, unix_user) allow_non_root_user_to_share_ssh_agent_forwarding(unix_user) with sudo(H=True, i=True, u=unix_user, _with=True): # rsync may have bad permission settings leftover # from the previous run which was interruped reset_permissions(unix_user, folder) copy_site_files(source, folder) # rsync may have bad permission settings leftover # from the previous server reset_permissions(unix_user, folder) # Apply automatic buildout fixes. # Must be done before boostrapping so that egg cache # e.g. does not point to previous server fix_buildout(folder) # Reinstall bootstrap which might have been worn out by time fix_bootstrap_py(folder) print "Rebootstrapping site %s" % folder rebootstrap_site(name, folder, python, mr_developer=True) # Make sure all file permissions are sane after migration reset_permissions(unix_user, folder) check_startup(name, folder, unix_user) print "Migrated site %s and it appears to be working" % name
def ui_command_iozone(self, atime='off', compress='on', size='1M', pool=None): if pool: self.test_pool = pool fs = self._create_test_filesystem(atime=atime, compress=compress) cwd = os.curdir os.chdir(str(fs.properties['mountpoint'])) try: with sh.sudo('-u', 'nobody', _with=True): ret = sh.iozone('-a', '-g', size, _iter=True, _err_to_out=True) finally: os.chdir(cwd) time.sleep(1) self._cleanup_test_filesystem() return ret
def arp_dump(): '''[Internal] Uses arp -a to dump mac adresses Returns a dict {mac:ip,...}''' with sh.sudo(k=True, _with=True): dump = sh.arp('-a') lines = dump.strip().split('\n') ret = {} for line in lines: split = line.strip().split(' ') ip = split[1][1:-1] mac = split[3] if split[4] == 'on': iface = split[5] else: iface = split[6] if iface == INTERFACE: ret[mac] = ip return ret
def _reqcmd(): logging.info("Found arbitrary commands to run.") try: commands = json.loads(piurl['requested_commands']) except: logging.info("jsonnot acceptable for requested commands.") return {'err': "pifm_client.py: malformed json for commands"} output = [] for command in commands: tmpout = sudo(command['cmd'], *command['args']) output.append({'stdout': tmpout.stdout, 'stderr': tmpout.stderr}) output = json.dumps(output) return {'data': output}
def push(name): # a terrible CIDR check if not any([request.remote_addr.startswith(x) for x in ALLOWED_RANGES]): return jsonify({'status': 'access denied'}), 401 site = mongo.db.sites.find_one({'_id': name}) if site == None: return jsonify({'status': 'invalid repo spec'}), 404 rev = str(git('rev-parse', 'origin/master', _cwd=site['path'])) sudo.git.fetch(_cwd=site['path']) after_rev = str(git('rev-parse', 'origin/master', _cwd=site['path'])) if rev != after_rev: git_output, deploy, restart, err = [''] * 4 try: git_output = str(sudo.git.pull(_cwd=site['path'])).strip() deploy = str(sh('./deploy.sh', _cwd=site['path'])).strip() restart = str(sudo('service', name, 'restart')) except ErrorReturnCode as e: err = str(e) print('weird error', err) output = { 'site': name, 'git_revs': [rev, after_rev], 'git_output': git_output, 'deploy': deploy, 'restart': restart, 'err': err, } mongo.db.deploys.insert(output) output['_id'] = str(output['_id']) return jsonify(output), 201 return jsonify({ 'status': 'same', 'rev': rev or '...', }), 200
def worker(username): with sh.sudo('-u', username, _with=True): sleep(3) print username, sh.id()
def test_sub_commands(): from sh import sudo # resolves to "sudo /bin/ls /root print sudo.ls('/root') print sudo('/bin/ls', '/root') # the same command
from os import getenv, path from pwgen import pwgen import sh def generate_page(ssid, pswd, address): home = getenv('HOME') template_path = path.join(home, 'screenly/templates/hotspot.html') with open(template_path) as f: template = Template(f.read()) context = { 'network': ssid, 'ssid_pswd': pswd, 'address': address } with open('/tmp/hotspot.html', 'w') as out_file: out_file.write(template.render(context=context)) if __name__ == "__main__": if not gateways().get('default'): ssid = 'ScreenlyOSE-{}'.format(pwgen(4, symbols=False)) ssid_password = pwgen(8, symbols=False) generate_page(ssid, ssid_password, 'screenly.io/wifi') wifi_connect = sh.sudo('wifi-connect', '-s', ssid, '-p', ssid_password, '-o', '9090') else: pass
import sh from sh import sudo ### TCP Slow Start Optimization ### TCP Increase Segment size for Kernel <= 2.6.39 ### Optimize Slow Start Restart sysctl = sh.Command('sysctl') tcp_slow_start = sysctl('net.ipv4.tcp_slow_start_after_idle').strip().replace(' ','') enabled_tcp_slow_start = 'net.ipv4.tcp_slow_start_after_idle=1' disabled_tcp_slow_start = 'net.ipv4.tcp_slow_start_after_idle=0' if tcp_slow_start == enabled_tcp_slow_start: print 'Optimizing TCP Slow Start' sudo('sysctl', '-w', disabled_tcp_slow_start) elif tcp_slow_start == disabled_tcp_slow_start: print 'TCP Slow Start already optimized' else: print 'Error Occurred with Slow Start Optimization'
def hadoop_mkdir(): print 'making hadoop directory...' sh.sudo('hadoop fs -mkdir /benchmarks'.split())
def run(arg): # Some info from the plugin dispatcher. environ = arg['environ'] plugin_config = arg['config'] config = RawConfigParser(defaults=plugin_config) config.add_section('iptables') config._sections['iptables'] = plugin_config # Setup plugin logging l = getLogger('plugin_iptables') l.addHandler(logHandler) if config.getboolean('iptables', 'debug'): l.setLevel(DEBUG) l.debug('debug logging enabled') # Get client IP from webapp, try HTTP_X_FORWARDED_FOR and fallback on # REMOTE_ADDR. client_ip = environ.get( 'HTTP_X_FORWARDED_FOR', environ.get('REMOTE_ADDR') ) client_mac = None error_msg = None iptables_failed = False # Verify client IP try: socket.inet_aton(client_ip) except socket.error: l.error('Client ip:{ip} is invalid'.format( ip=repr(client_ip) )) return { 'error': str(e), 'failed': True } # Attempt to get client HW address with arping if use_arping: try: client_mac = mac_from_ip( l, config.get('iptables', 'arping'), client_ip ) except Exception as e: l.info('Failed to get client HW address: {error}'.format( error=str(e) )) error_msg = str(e) pass if client_ip: iptables_cmd = config.get('iptables', 'iptables_cmd').format( ip_address=client_ip, mac_address=client_mac ) output = BytesIO() error = BytesIO() try: # The two arguments must not contain spaces of course. rc = sudo(tuple(iptables_cmd.split(' ')), _out=output, _err=error) except ErrorReturnCode: error.seek(0) error_msg = error.read() l.warn('{cmd}: exited badly: {error}'.format( cmd=('iptables', iptables_cmd), error=error_msg )) iptables_failed = True raise except Exception as e: l.warn('{cmd}: failed: {error}'.format( cmd=('iptables', iptables_cmd), error=str(e) )) error_msg = str(e) iptables_failed = True raise if rc.exit_code == 0: l.debug('Created iptables rule for client:{ip}'.format( ip=client_ip )) # If all else fails, error! This will be shown to end users. return { 'error': error_msg, 'failed': iptables_failed }
|- Number of jail: 9 `- Jail list: nginx-noscript, postfix, nginx-login, nginx-proxy, nginx-auth, nginx-badbots, fail2ban, sasl, ssh """ t1.action.ie = InfoExtractor('get names') p1 = re.compile(r'([a-zA-Z0-9_-]+(?=, |\n$))+') match = re.findall(p1, t1.action.get_output()) """ >>> match ['nginx-noscript', 'postfix', 'nginx-login', 'nginx-proxy', 'nginx-auth', 'nginx-badbots', 'fail2ban', 'sasl', 'ssh'] """ t2 = Task("Get the banned ips for each jail") d = {} ips_line = [] for jail in match: o = sudo("fail2ban-client", "status", jail) # TODO: eliminate sh module, but implement 'sudo' with subprocess """ >>> o Status for the jail: postfix |- filter | |- File list: /var/log/mail.log | |- Currently failed: 17 | `- Total failed: 140 `- action |- Currently banned: 7 | `- IP list: 217.196.2.132 212.235.31.158 80.174.199.161 74.164.14.171 178.15.66.18 217.92.137.209 121.212.240.175 `- Total banned: 24 """ o | grep('IP list:') > ips_line # TODO: eliminate pyxshell module, replace grep with str.find('IP list:')