def initSvn(self, file_content): from sh import svn, chmod, chown repourl = Config.SVN_ADDR + self.name hook_content = r'''#!/bin/bash export LC_CTYPE=en_US.UTF-8 export LANG=en_US.UTF-8 svn up %s ''' % self.userhome #auto update with hook os.chdir(os.path.join(self.user_repo, 'hooks')) with open('post-commit', 'w') as f: f.write(hook_content) chmod('-R', 777, self.user_repo) #checkout add ci for init svn('co', '--non-interactive', '--trust-server-cert', repourl, self.userhome) chown('-R', Config.HTTPD_USER + ':' + Config.HTTPD_GROUP, self.userhome) os.chdir(self.userhome) with open('index.html', 'w') as f: f.write(file_content) svn('add', 'index.html') svn('ci', '--username', self.name, '--password', self.passwd, '--non-interactive', '--trust-server-cert', '-m', 'init commit', '--force-log')
def virtualtap(ictx, dev, ip): """Create a virtual tap device """ # ------------------------------------------------------------------------- if not which('openvpn'): raise click.ClickException( 'OpenVPN (openvpn) not found. Please install it and execute the command again.' ) # ------------------------------------------------------------------------- lCmds = ''' sudo openvpn --mktun --dev {0} sudo /sbin/ifconfig {0} up {1} sudo chmod a+rw /dev/net/tun '''.format( dev, ip ) pwd = getpass.getpass("[sudo] password for %s: " % getpass.getuser()) with sh.contrib.sudo(password=pwd, _with=True): # with sh.contrib.sudo( _with=True): sh.openvpn('--mktun', '--dev', dev, _out=sys.stdout) sh.ifconfig(dev, 'up', ip, _out=sys.stdout) sh.chmod('a+rw', '/dev/net/tun', _out=sys.stdout)
def put_file(self, path): temp_file = str(sh.mktemp("-p", self.tmp).stdout,'utf8').strip() path = path.strip() if "'" in path: returncode, stdout, stderr = launch_command( "dd if=\"{0}\" iflag=nofollow bs=4k | tee {1} | sha1sum".format( path, temp_file ) ) else: returncode, stdout, stderr = launch_command( "dd if='{0}' iflag=nofollow bs=4k | tee {1} | sha1sum".format( path, temp_file ) ) if returncode != 0: print(stdout) print(stderr) raise UnableToHashFile("File : {0}".format(path)) hash_str = re.search("^[0-9a-f]*", str(stdout,'utf8')).group(0) destination_folder = self.create_destination_folder(hash_str) destination_path = os.path.join(destination_folder, hash_str) if not self.is_stored(hash_str): sh.mv(temp_file, destination_path) sh.chmod("444", destination_path) else: sh.rm(temp_file) return destination_path
def _install_php(self): for x in ['conf', 'fcgi']: os.makedirs(os.path.join(self.vhost_dir, x), 0755) global_updates = os.path.join(self.globals_dir, 'vhost.ini') vhost_updates = os.path.join(self.vhost_dir, 'conf/vhost.ini') vhost_ini = os.path.join(self.vhost_dir, 'conf/php.ini') if os.path.isfile(global_updates): shutil.copy(global_updates, vhost_updates) #merge(None, vhost_updates, vhost_ini, vhost_dir=self.vhost_dir) mergeini("-v", self.vhost_dir, "-o", vhost_ini, global_updates, vhost_updates) d = {'vhost_dir': self.vhost_dir} with open(os.path.join(self.vhost_dir, 'fcgi/fcgi.conf'), 'w') as f: f.write(fcgi_conf_template % d) starter_file = os.path.join(self.vhost_dir, 'fcgi/fcgi-starter') with open(starter_file, 'w') as f: f.write(fcgi_starter_template % d) for x in ['conf', 'fcgi']: chown("root:root", os.path.join(self.vhost_dir, x), '-R') chmod(555, starter_file) owner = "%s:%s" % (self.user, self.group) chown(owner, os.path.join(self.vhost_dir, 'fcgi')) chown(owner, starter_file) chattr('+i', starter_file)
def load(self, mode="ro"): try: self.create_output_path() sh.chmod("700", self.output_path) except sh.ErrorReturnCode as e: ## Already mounted readonly. pass try: log.debug("Loading {0}".format(self)) self.loaded = self.mount_compat("rw") if self.loaded: try: sh.rm( "-rf", os.path.join(self.output_path, '._.Trashes'), os.path.join(self.output_path, '.Spotlight-V100'), os.path.join(self.output_path, 'lost+found'), os.path.join(self.output_path, '$RECYCLE.BIN'), os.path.join(self.output_path, 'System Volume Information')) except: pass try: sh.umount(self.output_path) except: self.loaded = False self.loaded = self.mount_compat(mode) return self.loaded else: return False except sh.ErrorReturnCode as e: self.unload() log.exception(e) return False
def update_file(fname, who): config = configparser.ConfigParser() config['ODBC Data Sources'] = {} drivers = config['ODBC Data Sources'] drivers[CH_DSN] = CH_DRIVER config[CH_DSN] = { 'driver': f'/usr/local/opt/clickhouse-odbc/lib/libclickhouseodbc{CH_VARIANT}.dylib', 'description': 'Connection to criminals ClickHouse DB', 'url': f'http://{CH_HOST}:{CH_PORT}/?database={CH_DB}&user={CH_USER}&password={CH_PASSWORD}', 'server': CH_HOST, 'password': CH_PASSWORD, 'port': CH_PORT, 'database': CH_DB, 'uid': CH_USER, 'sslmode': 'no' } with open(fname, 'w') as configfile: config.write(configfile) sh.chown(f"{who}:staff", fname) sh.chmod("644", fname)
def install(installDir, domain, db_server, db_name, db_user, db_password): log( "Installing from index_cli.php ... " ) cli = installDir + 'install/index_cli.php' r = php(cli, "--domain={}".format(domain), "--db_server={}".format(db_server), "--db_name={}".format(db_name), "--db_user={}".format(db_user), "--db_password={}".format(db_password), "--db_create=1", "--ssl=0", "[email protected]", "--password=admin", "--language=fr", "--country=fr") print( r ) log( "Removing install dir ... " ) rm("-rf", installDir + 'install') log( "Removing var/cache/prod ...") rm("-rf", installDir + 'var/cache/prod') chown("-R", APP_OWNER, installDir) chmod("-R", "777", installDir + 'var/')
def create_sh_script(filename, commands=None, outfile=None): """ # Not strictly a phasing function, but so commonly used, may as well put here! :param filename: name of file to write :param commands: list of strings that will be system executed :param outfile: optional, creates an 'ok' version of this file :return: None """ # create script file if outfile is None: outfile = filename touch_cmd = "touch {FILE}" script = open(filename, 'w') script.write("#! /bin/bash" + "\n") script.write("set -e" + "\n") script.write("set -o pipefail" + "\n") for cmd in commands: script.write(cmd + "\n") script.write(touch_cmd.format(FILE=outfile + '.ok') + "\n") sh.chmod('+x', filename) script.close()
def check_config(): """ A function that checks an existence of configuration files. In case the file does not exist, it is created. In case of ipf file, backup configuration is copied from backup file. """ mod = sh.stat('-c %a', CONF_DIR).strip() sh.chmod('666', CONF_DIR) print('Checking configuration files.') path = ''.join([CONF_DIR, 'ipf.conf']) add_file_to_db('ipf', path) # set different boot ipf.conf location sh.svccfg('-s', 'ipfilter:default', 'setprop', 'firewall_config_default/policy = astring: "custom"') sh.svccfg('-s', 'ipfilter:default', 'setprop', 'firewall_config_default/custom_policy_file = astring: "{}"'\ .format(path)) sh.svcadm('refresh', 'ipfilter') if exists(path): print('ipf.conf.............................................OK') else: copyfile(''.join([BCK_DIR, '.ipf.bck']), path) print('ipf.conf has been created............................OK') path = ''.join([CONF_DIR, 'ipf6.conf']) add_file_to_db('ipf6', path) if exists(path): print('ipf6.conf............................................OK') else: copyfile(''.join([BCK_DIR, '.ipf6.bck']), path) print('ipf6.conf has been created...........................OK') path = ''.join([CONF_DIR, 'ipnat.conf']) add_file_to_db('ipnat', path) if exists(path): print('ipnat.conf...........................................OK') else: copyfile(''.join([BCK_DIR, '.ipnat.bck']), path) print('ipnat.conf has been created..........................OK') path = ''.join([CONF_DIR, 'ippool.conf']) add_file_to_db('ippool', path) if exists(path): print('ippool.conf..........................................OK') else: copyfile(''.join([BCK_DIR, '.ippool.bck']), path) with open(path, 'a') as f: f.write('\n\n{}'.format(CONF_WARN)) print('ippool.conf has been created.........................OK') sh.chmod(mod, CONF_DIR) print('Startup configuration done.\n')
def chmod(mode, path, recursive=False): ''' Change permissions of path. mode is a string or octal integer for the chmod command. Examples:: 'o+rw,g+rw' 0660 ''' # arg order used to be chmod(path, mode, ...), so check types # delete this assert if no assertion errors 2015-01-01 # after we remove this assert, mode can be a string assert is_string(path) if isinstance(mode, int): # chmod wants an octal int, not decimal # so if it's an int we convert to an octal string mode = '0'+oct(mode) try: if recursive: sh.chmod('--recursive', mode, path) else: sh.chmod(mode, path) except sh.ErrorReturnCode as sh_exception: log.error('unable to chmod: path={}, mode={}'.format(path, mode)) log.error(sh_exception) raise
def upload_script(name, data): full_path = conf.SCRIPTS_PATH + name sh.touch(full_path) shu.write_file(full_path, data) sh.chmod('u+x', full_path) return conf.mk_succ({'status': 'success'})
def fix_binfile(src, dest=None): _, outfile = mkstemp() logging.info("Updating {} (writing temporary file to {}).".format(src, outfile)) with open(outfile, 'w') as outf: with open(src) as inf: for line in inf: if line.startswith('declare -r app_classpath='): outf.write(LINE_REPLACEMENT) else: outf.write(line) if not dest: infile_bak = '.'.join([src, 'orig']) logging.warning("Overwriting original file {} (backup copy kept in {})".format( src, infile_bak)) try: cp(src, infile_bak) dest = src except ErrorReturnCode as error: logging.error("Failed to make backup copy of {}; did you have the necessary " "permissions? (Error: {})".format(src, error.stderr)) exit(1) mv(outfile, dest) chmod('ug+x', dest)
def cleanup_and_restore_files(process): timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H%M%S') # Compress and timestamp the existing files minus the repos with sh.pushd('/opt/anaconda'): sh.tar( "--exclude=storage/object/anaconda-repository", "-czvf", f"git_pgdata.snapshot_{timestamp}.tar.gz", "storage" ) # Cleanup directories as things will get restored sh.rm('-Rf', '/opt/anaconda/storage/git') sh.rm('-Rf', '/opt/anaconda/storage/pgdata') sh.rm('-Rf', '/opt/anaconda/storage/object/anaconda-objects') sh.rm('-Rf', '/opt/anaconda/storage/object/anaconda-projects') # Restore the files file_backup_restore(process, 'restore') # Recreate the postgres directory and set permissions sh.mkdir(process.postgres_system_backup) sh.chown('999:root', f'{process.postgres_system_backup}') sh.chmod('700', f'{process.postgres_system_backup}') return
def initGit(self, file_content): from sh import git,chmod,chown git_repourl = 'git@' + Config.GIT_SVR + ':' + self.user_gitrepo os.chdir(Config.SDP_USER_DATA_HOME) git('clone', git_repourl) #git of add ci push os.chdir(self.userhome) with open('index.html', 'w') as f: f.write(file_content) git('add', 'index.html') git('commit', '-m', 'init commit') git('push', 'origin', 'master') #git of hooks, for update code post_update_content = """#!/bin/bash #Automatically update the project code, if there is an automatic update error, will re deploy the code. unset $(git rev-parse --local-env-vars) DeployPath=%s echo -e "\033[33mDeploy user is => %s\033[0m" [ ! -d $DeployPath ] && echo -e "\033[31mDirectory $DeployPath does not exist!\033[0m" && exit 1 cd $DeployPath git pull if test $? -ne 0;then echo -e "\033[31mAutomatic pull fail, try to re deploy!\033[0m" cd ~ rm -rf ${DeployPath}/* rm -rf ${DeployPath}/.git git clone %s $DeployPath [ $? -ne 0 ] && echo -e "\033[31mRedeploy fail, quit!\033[0m" && exit 1 fi echo -e "\033[32mAutomatic deployment complete.\033[0m" exit 0""" %(self.userhome, self.name, git_repourl) with open(os.path.join(self.user_gitrepo, 'hooks/post-update'), 'w') as f: f.write(post_update_content) chmod('a+x', os.path.join(self.user_gitrepo, 'hooks/post-update')) chown('-R', Config.GIT_USER, self.userhome)
def check_config(): """ A function that checks an existence of configuration files. In case the file does not exist, it is created. In case of ipf file, backup configuration is copied from backup file. """ mod = sh.stat('-c %a', CONF_DIR).strip() sh.chmod('666', CONF_DIR) print('Checking configuration files.') path = ''.join([CONF_DIR, 'ipf.conf']) add_file_to_db('ipf', path) # set different boot ipf.conf location sh.svccfg('-s', 'ipfilter:default', 'setprop', 'firewall_config_default/policy = astring: "custom"') sh.svccfg('-s', 'ipfilter:default', 'setprop', 'firewall_config_default/custom_policy_file = astring: "{}"'\ .format(path)) sh.svcadm('refresh', 'ipfilter') if exists(path): print('ipf.conf.............................................OK') else: copyfile(''.join([BCK_DIR, '.ipf.bck']), path) print('ipf.conf has been created............................OK') path = ''.join([CONF_DIR, 'ipf6.conf']) add_file_to_db('ipf6', path) if exists(path): print('ipf6.conf............................................OK') else: copyfile(''.join([BCK_DIR, '.ipf6.bck']), path) print('ipf6.conf has been created...........................OK') path = ''.join([CONF_DIR, 'ipnat.conf']) add_file_to_db('ipnat', path) if exists(path): print('ipnat.conf...........................................OK') else: copyfile(''.join([BCK_DIR, '.ipnat.bck']), path) print('ipnat.conf has been created..........................OK') path = ''.join([CONF_DIR, 'ippool.conf']) add_file_to_db('ippool', path) if exists(path): print('ippool.conf..........................................OK') else: copyfile(''.join([BCK_DIR, '.ippool.bck']), path) with open(path, 'a') as f: f.write('\n\n{}'.format(CONF_WARNING)) print('ippool.conf has been created.........................OK') sh.chmod(mod, CONF_DIR) print('Startup configuration done.\n')
def _install_php(self): for x in ['conf', 'fcgi']: os.makedirs(os.path.join(self.vhost_dir, x), 0755) global_updates = os.path.join(self.globals_dir, 'vhost.ini') vhost_updates = os.path.join(self.vhost_dir, 'conf/vhost.ini') vhost_ini = os.path.join(self.vhost_dir, 'conf/php.ini') if os.path.isfile(global_updates): shutil.copy(global_updates, vhost_updates) #merge(None, vhost_updates, vhost_ini, vhost_dir=self.vhost_dir) mergeini("-v", self.vhost_dir, "-o", vhost_ini, global_updates, vhost_updates) d = { 'vhost_dir': self.vhost_dir } with open(os.path.join(self.vhost_dir, 'fcgi/fcgi.conf'), 'w') as f: f.write(fcgi_conf_template % d) starter_file = os.path.join(self.vhost_dir, 'fcgi/fcgi-starter') with open(starter_file, 'w') as f: f.write(fcgi_starter_template % d) for x in ['conf', 'fcgi']: chown("root:root", os.path.join(self.vhost_dir, x), '-R') chmod(555, starter_file) owner = "%s:%s" % (self.user, self.group) chown(owner, os.path.join(self.vhost_dir, 'fcgi')) chown(owner, starter_file) chattr('+i', starter_file)
def add_sudoers_option(line): """ Adds a option to /etc/sudoers file in safe manner. Generate a bash script which will be invoke itself as visudo EDITOR. http://stackoverflow.com/a/3706774/315168 """ from sh import chmod, rm with sudo: if not has_line("/etc/sudoers", line): print "Updating /etc/sudoers to enable %s" % line tmp = tempfile.NamedTemporaryFile(mode="wt", delete=False) # Generate visudo EDITOR which adds the line # https://www.ibm.com/developerworks/mydeveloperworks/blogs/brian/entry/edit_sudoers_file_from_a_script4?lang=en script = ADD_LINE_VISUDO.format(line=line) tmp.write(script) tmp.close() chmod("u+x", tmp.name) Command(tmp.name)() rm(tmp.name)
def write_url_sh(repo_url, *, verbose: int, ): url_template = generate_url_template(url=repo_url) with open("url.sh", 'x', encoding='utf8') as fh: fh.write(url_template) sh.chmod('+x', 'url.sh')
def create_output_path(self): if not self.output_path: sh.mkdir("-p", WORK_DIR) sh.chmod("700", WORK_DIR) self.output_path = str( sh.mktemp("-p", WORK_DIR, "-d").stdout, 'utf8').split("\n")[0] log.debug("Output path -> {0}".format(self.output_path)) sh.chmod("700", self.output_path)
def get_env(self): include_dirs = [ "-I{}/{}".format( self.ctx.include_dir, d.format(arch=self)) for d in self.ctx.include_dirs] env = {} ccache = sh.which('ccache') cc = sh.xcrun("-find", "-sdk", self.sdk, "clang").strip() if ccache: ccache = ccache.strip() use_ccache = environ.get("USE_CCACHE", "1") if use_ccache != '1': env["CC"] = cc else: if not self._ccsh: self._ccsh = ccsh = sh.mktemp().strip() with open(ccsh, 'w') as f: f.write('#!/bin/sh\n') f.write(ccache + ' ' + cc + ' "$@"\n') sh.chmod('+x', ccsh) else: ccsh = self._ccsh env["USE_CCACHE"] = '1' env["CCACHE"] = ccache env["CC"] = ccsh env.update({k: v for k, v in environ.items() if k.startswith('CCACHE_')}) env.setdefault('CCACHE_MAXSIZE', '10G') env.setdefault('CCACHE_HARDLINK', 'true') env.setdefault('CCACHE_SLOPPINESS', ('file_macro,time_macros,' 'include_file_mtime,include_file_ctime,file_stat_matches')) else: env["CC"] = cc env["AR"] = sh.xcrun("-find", "-sdk", self.sdk, "ar").strip() env["LD"] = sh.xcrun("-find", "-sdk", self.sdk, "ld").strip() env["OTHER_CFLAGS"] = " ".join(include_dirs) env["OTHER_LDFLAGS"] = " ".join([ "-L{}/{}".format(self.ctx.dist_dir, "lib"), ]) env["CFLAGS"] = " ".join([ "-arch", self.arch, "-pipe", "-no-cpp-precomp", "--sysroot", self.sysroot, #"-I{}/common".format(self.ctx.include_dir), #"-I{}/{}".format(self.ctx.include_dir, self.arch), "-O3", self.version_min ] + include_dirs) env["LDFLAGS"] = " ".join([ "-arch", self.arch, "--sysroot", self.sysroot, "-L{}/{}".format(self.ctx.dist_dir, "lib"), "-lsqlite3", self.version_min ]) return env
def run(self, cache, args=[]): if self.url.fragment is None: raise Invalid('Arx can not execute tarball URLs that have no ' 'fragment.') program = cache.join('program', self.url.fragment.split('/')[-1]) self.place(cache, program) chmod('a+rx', str(program)) cmd = Command(str(program)) cmd(*args)
def run(self, cache, args=[]): if not True: raise Invalid('Directories can not be run as commands.') if not os.access(str(self.resolved), os.X_OK): if self.dot: chmod('a+rx', str(self.resolved)) else: log.error('Not able to mark `%s` as executable :/' % self.resolved) cmd = Command(str(self.resolved)) cmd(*args)
def create_key_pair(name): """ Create the key pair for the instances """ ec2 = boto3.client('ec2') response = ec2.create_key_pair(KeyName=name) path = Security.get_key_path(name) key_file = open(path,"w+") key_file.write(response["KeyMaterial"]) key_file.close() print("Modify the right on the local key : ", path) sh.chmod("400", path)
def main(): data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..', 'data')) log('data dir: {}'.format(data_dir)) # gpg insists on these permissions oce_gpg_dir = '{}/oce/.gnupg'.format(data_dir) if os.path.exists(oce_gpg_dir): sh.chmod('--recursive', 'go-rwx', oce_gpg_dir) log('prepared oce') if USE_POSTGRESQL: # postgres must own its config dir sh.chown('-h', '--recursive', 'postgres:postgres', '/etc/postgresql') # we need a dir for other persistent data that is not normally in data_dir) persistent_dir = '{}/persistent'.format(data_dir) if not os.path.exists(persistent_dir): sh.mkdir('--parents', persistent_dir) sh.chown('goodcrypto:goodcrypto', persistent_dir) sh.chmod('g+rx', persistent_dir) sh.chmod('o-rwx', persistent_dir) log('prepared {}'.format(persistent_dir)) # root should own these subdirectories django_dir = '{}/django'.format(persistent_dir) if not os.path.exists(django_dir): sh.mkdir(django_dir) sh.chown('goodcrypto:goodcrypto', django_dir) sh.chmod('go-rwx', django_dir) return 0
def setupNginx(domain, username, confTemplate, sitePublic, siteLogs): nginxSitesAvailable = '%s/%s' % (NGINX_CONFIG, domain) nginxEnabled = '%s/%s' % (NGINX_SITES_ENABLED, domain) nginxTemplate = open(confTemplate).read() nginxTemplate = nginxTemplate.replace("@@HOSTNAME@@", domain) nginxTemplate = nginxTemplate.replace("@@PATH@@", sitePublic) nginxTemplate = nginxTemplate.replace("@@LOG_PATH@@", siteLogs) nginxTemplate = nginxTemplate.replace("@@SOCKET@@", username) nginxConf = open(nginxSitesAvailable, "w") nginxConf.write(nginxTemplate) nginxConf.close() sh.chmod('600', nginxSitesAvailable) os.symlink(nginxSitesAvailable, nginxEnabled) sh.service("nginx", "restart")
def test_executable_bit(git_dir, hg_repo): sh.cd(hg_repo) write_to_test_file("b") sh.chmod('644', 'test_file') sh.hg.add('test_file') sh.hg.commit(message='add file') sh.chmod('755', 'test_file') sh.hg.commit(message='make executable') sh.chmod('644', 'test_file') sh.hg.commit(message='make unexecutable') git_repo = clone_repo(git_dir, hg_repo) sh.cd(git_repo) assert git_repo.joinpath('test_file').access(os.X_OK) == False sh.git.checkout('HEAD^') assert git_repo.joinpath('test_file').access(os.X_OK) == True sh.git.checkout('HEAD^') assert git_repo.joinpath('test_file').access(os.X_OK) == False sh.git.checkout('master') sh.chmod('755', 'test_file') sh.git.add('test_file') sh.git.commit(message="make executable again") sh.git.push() sh.cd(hg_repo) sh.update() assert git_repo.joinpath('test_file').access(os.X_OK) == True
def setupNginx(domain,username,confTemplate,sitePublic,siteLogs): nginxSitesAvailable = '%s/%s'%(NGINX_CONFIG,domain) nginxEnabled = '%s/%s'%(NGINX_SITES_ENABLED,domain) nginxTemplate = open(confTemplate).read() nginxTemplate = nginxTemplate.replace("@@HOSTNAME@@", domain) nginxTemplate = nginxTemplate.replace("@@PATH@@", sitePublic) nginxTemplate = nginxTemplate.replace("@@LOG_PATH@@", siteLogs) nginxTemplate = nginxTemplate.replace("@@SOCKET@@", username) nginxConf = open(nginxSitesAvailable, "w") nginxConf.write(nginxTemplate) nginxConf.close() sh.chmod('600', nginxSitesAvailable) os.symlink(nginxSitesAvailable, nginxEnabled) sh.service("nginx","restart")
def run(self, input_paths, desired_outputs, output_names=None): """ Given a list of paths to input files (possibly dummies), desired output files and desired names of these output files, the method constructs a shell script which runs SAMMY and answers the prompt such that it is fed with the input, and then obtains the desired output files at their respective paths. """ # Set up and move to temporary directory # tempdir = temp_file_gen('Sammy_run', directory=True) if output_names == None: output_names = desired_outputs assert len(desired_outputs) == len(output_names) for k in range(len(input_paths)): # SAMMY doesn't accept too long variable names # try: shutil.copy(input_paths[k], tempdir) except IOError: pass # may be dummy file input_paths[k] = re.sub('.*/', '', input_paths[k]) # for k in range(len(output_names)): output_names[k] = absolute_path(output_names[k]) origdir = os.getcwd() os.chdir(tempdir) # try: # Combined w. finally to ensure moving back to original directory # # Construct shell script 'run_file' # run_file = 'run_file' with open(run_file, 'w') as f: f.write('#! /bin/sh\n\nsammy<<EOF\n') for path in input_paths + ['', 'EOF']: f.write(path + '\n') # # Change permissions # sh.chmod('755', run_file) # # Run shell script running SAMMY and answering prompts # call('./%s > terminal.out 2>&1' % run_file, shell=True) # # Rename the desired outputs # for k in range(len(desired_outputs)): shutil.move(desired_outputs[k], output_names[k]) # # Move back to original directory even if error has been raised # finally: os.chdir(origdir) # # Clean up # if self.cleanup: sh.rm('-rf', tempdir)
def setupWP(domain,username,password): siteRoot = '/webapps/%s'%(domain) siteLogs = '/webapps/%s/logs'%(domain) sitePublic = '/webapps/%s/public'%(domain) wpConfTemplate = 'wp.nginx.vhost.conf.template' sh.useradd('-m','-d',siteRoot, username,'-s', '/bin/bash','-p', password) sh.usermod('-aG', username, WEB_SERVER_GROUP) sh.mkdir('-p', siteLogs) sh.mkdir('-p', sitePublic) sh.cp('index.php', sitePublic) sh.chmod('-R','750', siteRoot) sh.chmod('-R','770', siteLogs) sh.chown('-R',"%s:%s"%(username,username), siteRoot) setupNginx(domain,username,wpConfTemplate,sitePublic,siteLogs) setupPhpFpm(username)
def setUpClass(cls): # Prepare logger cls.log = logging.getLogger('netapp_puppet_module_tests') cls.log.setLevel(logging.DEBUG) cls.ch = logging.StreamHandler() cls.ch.setLevel(logging.DEBUG) cls.log.addHandler(cls.ch) cls.log.debug("\n"+"-"*45 +" Tests is starting "+"-"*45 + '\n') # Check if 'puppet agent --configprint usecacheonfailure' if false cls.log.debug("Puppet agent option 'usecacheonfailure' is set to: " + sh.puppet('agent','--configprint','usecacheonfailure').upper().strip()) if sh.puppet('agent','--configprint','usecacheonfailure').upper().strip()!='FALSE': raise Exception("You need to set Puppet agent option 'usecacheonfailure' on 'false'!") # Read config cls.log.debug("Reading configuration...") cls.url = configuration.server_root_url cls.manifest_path = configuration.manifest_path cls.first_system_id = configuration.first_system_id cls.first_system_ip1 = configuration.first_system_ip1 cls.first_system_ip2 = configuration.first_system_ip2 cls.first_system_pass = configuration.first_system_pass cls.first_system_test_pass = configuration.first_system_test_pass cls.first_system_test_ip = configuration.first_system_test_ip cls.second_system_id = configuration.second_system_id cls.second_system_ip1 = configuration.second_system_ip1 cls.second_system_ip2 = configuration.second_system_ip2 cls.second_system_pass = configuration.second_system_pass # Save current site.pp cls.bck_manifest_name = cls.manifest_path + \ '/site.pp.' + \ datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S') + \ '.bck' cls.log.debug("Saving original site.pp to {0}...".format(cls.bck_manifest_name)) # Hack for local running if os.geteuid() != 0: sh.sudo('/bin/cp', cls.manifest_path + "/site.pp", cls.bck_manifest_name) sh.sudo('/bin/chmod', '664', cls.bck_manifest_name) else: sh.cp(cls.manifest_path + "/site.pp", cls.bck_manifest_name) sh.chmod('664', cls.bck_manifest_name) return
def setupWP(domain, username, password): siteRoot = '/webapps/%s' % (domain) siteLogs = '/webapps/%s/logs' % (domain) sitePublic = '/webapps/%s/public' % (domain) wpConfTemplate = 'wp.nginx.vhost.conf.template' sh.useradd('-m', '-d', siteRoot, username, '-s', '/bin/bash', '-p', password) sh.usermod('-aG', username, WEB_SERVER_GROUP) sh.mkdir('-p', siteLogs) sh.mkdir('-p', sitePublic) sh.cp('index.php', sitePublic) sh.chmod('-R', '750', siteRoot) sh.chmod('-R', '770', siteLogs) sh.chown('-R', "%s:%s" % (username, username), siteRoot) setupNginx(domain, username, wpConfTemplate, sitePublic, siteLogs) setupPhpFpm(username)
def reset_permissions(username, folder): """ Reset UNIX file permissions on a Plone installation folder. We set files readable only by the owner. """ from sh import chmod print "Re(setting) file permissions on %s" % folder # Disable read access for other UNIX users chown("-R", "%s:%s" % (username, username), folder) chmod("-R", "o-rwx", folder) # In the case someone has run the buildout as root and # left badly owned, non-writable files around chmod("-R", "u+rwx", folder)
def write_pgpass(self, db_settings, old_db): pgpass = [] for db in [db_settings['NAME'], old_db]: pgpass.append("{0}:{1}:{2}:{3}:{4}".format( db_settings['HOST'], db_settings['PORT'], db, db_settings['USER'], db_settings['PASSWORD'], )) pgpass_file = '{0}/.pgpass'.format(home) with open(pgpass_file, 'w') as f: f.write('\n'.join(pgpass)) sh.chmod('0600', pgpass_file) return pgpass_file
def _create_docroot(self): if os.path.exists(self.vhost_dir): raise Exception("Directory %s already exist!" % self.vhost_dir) print "Creating vhost %s in %s" % (self.fqdn, self.vhost_dir) os.makedirs(self.vhost_dir, 0755) for x in ['logs', 'htdocs', 'temp/uploads', 'temp/sessions']: os.makedirs(os.path.join(self.vhost_dir, x), 0755) chmod(550, os.path.join(self.vhost_dir, 'temp/')) owner = "%s:%s" % (self.user, self.group) chown(owner, self.vhost_dir, '-R') if self.enable_php: self._install_php()
def setup(): # clone the pandoc-types repository sh.rm("-rf", "pandoc-types") sh.git("clone", "https://github.com/jgm/pandoc-types.git") sh.cd("pandoc-types") #sh.rm("-rf", ".git") # don't want to see the folder as a git submodule # install the GHCI script script = open(".ghci", "w") script.write(GHCI_SCRIPT) script.close() # conform to stack requirements sh.chmod("go-w", "../pandoc-types") sh.chmod("go-w", ".ghci") sh.cd("..")
def installfile(f_, dest_): """Install a file in a destination directory.""" dstf_ = "{0}/{1}".format(dest_, f_) try: sudo.cp(f_, dstf_) except Exception as exception_: print u"[!] Exception: {0}".format(exception_) print "[!] Failed to install {0} as {1}".format(f_, dstf_) cleanexit(1) if mem.options.verbose: print "[+] Installed {0} to {1}".format(f_, dstf_) try: chmod("755", dstf_) except Exception as exception_: print u"[!] Exception: {0}".format(exception_) print "[!] Failed to set permissions on {0}".format(dstf_) cleanexit(1)
def _create_bootstrap_img(self): self._bootstrap_dir = tempfile.mkdtemp() sh.chmod("o+rwx", self._bootstrap_dir) self._bootstrap_files = os.path.join(self._bootstrap_dir, "files") os.makedirs(self._bootstrap_files) with open(os.path.join(os.path.dirname(__file__), "bootstrap.py"), "r") as f: bootstrap_contents = f.read() with open(os.path.join(self._bootstrap_files, "bootstrap.py"), "wb") as f: f.write(bootstrap_contents) with open(os.path.join(self._bootstrap_files, "config.json"), "wb") as f: f.write(self._make_config()) self._bootstrap_img = os.path.join(self._bootstrap_dir, "bootstrap.img") sh.dd("if=/dev/null", "bs=1K", "of={}".format(self._bootstrap_img), "seek=2040") sh.Command("mkfs.ntfs")("-F", self._bootstrap_img) #sh.Command("mkfs.vfat")(self._bootstrap_img) mounted_dir = os.path.join(self._bootstrap_dir, "mounted") os.makedirs(mounted_dir) output = sh.mount("-t", "ntfs", "-o", "loop", self._bootstrap_img, mounted_dir) #output = sh.mount("-t", "vfat", "-o", "loop", self._bootstrap_img, mounted_dir) #self._log.debug("mount output: " + str(output)) shutil.copy(os.path.join(self._bootstrap_files, "bootstrap.py"), mounted_dir) shutil.copy(os.path.join(self._bootstrap_files, "config.json"), mounted_dir) try: sh.umount(mounted_dir) except: pass return self._bootstrap_img
def start1(usrID, proID): try: sh.mkdir("/home/" + usrID) sh.chmod("777", "/home/" + usrID) sh.docker("run", "-itd", "-v", "/home/" + usrID + ":/home/" + usrID, "--name", proID + usrID, proID) #创建容器 sh.docker("exec", proID + usrID, 'service', 'ssh', 'start') #容器开启ssh服务 #以下为返回容器id,用户id,容器ip的操作 containerID = str( sh.docker("inspect", "-f", "{{.Config.Hostname}}", proID + usrID)).replace('\n', '').replace('\r', '') containerIP = str( sh.docker("inspect", "-f", "{{.NetworkSettings.IPAddress}}", proID + usrID)).replace('\n', '').replace('\r', '') return {'containerID': containerID, 'containerIP': containerIP} except Exception as e: error["error"] = str(e) return error
def _hotplug_empty_disk(self): self._tmpdir = tempfile.mkdtemp() sh.chmod("o+rwx", self._tmpdir) self._tmpdisk = os.path.join(self._tmpdir, "tmpdisk.img") sh.dd("if=/dev/null", "bs=1K", "of={}".format(self._tmpdisk), "seek=1030") sh.Command("mkfs.ntfs")("-F", self._tmpdisk) disk_file = os.path.join(self._tmpdir, "disk.xml") with open(disk_file, "wb") as f: f.write(""" <disk type="file" device="disk"> <driver name="qemu" type="raw" cache="none" io="native"/> <source file="{}"/> <target dev="sda" bus="usb"/> </disk> """.format(self._tmpdisk)) sh.virsh("attach-device", self._domain, disk_file)
def setup(self, path): ''' Sets up a component repository so it can be served This method is used by `self.init()` and `self.clone()` when a new component is created using either of these methods. ''' # Copy post-receive into .git/hooks so necessary operations can # be run on the repository after pushes # This script must be executable, that is done below cp( '-f', os.path.join(HOME_DIR, 'git-post-receive.sh'), os.path.join(path, '.git/hooks/post-receive') ) # Allow the (non-bare) repostory to be pushed to by setting # receive.denyCurrentBranch to ignore # By default Git does not allow pushing to a repo with a working dir # because the working tree can become out of sync. # Our post-receive hook (above) deals with that self.git(path, 'config', 'receive.denyCurrentBranch', 'ignore') # Set the default user for the repository # If you don't do this, then when doing a git commit as user www-data, # Git will complain that these config values are not set. That could be done globally # for the www-data user, but doing this seems cleaner self.git(path, 'config', 'user.name', 'Stencila Hub') self.git(path, 'config', 'user.email', '*****@*****.**') # These filesystem permissions fail when `/srv/stencila/store` is a NFS mount # So currently commented out until we work out best way to deal with permissions # on the store # Change the group of the new component recursively so that # users including www-data can modify it chgrp('-R', 'stencila', path) # Set permissions on the repository so that post-receive.sh is executable # and content can be read by Nginx for serving chmod('-R', '775', path)
def test_custom_yaml(): from sh import ErrorReturnCode, chmod, ldap2pg, rm LDAP2PG_CONFIG = 'my-test-ldap2pg.yml' rm('-f', LDAP2PG_CONFIG) with pytest.raises(ErrorReturnCode): ldap2pg(_env=dict(os.environ, LDAP2PG_CONFIG=LDAP2PG_CONFIG)) yaml = YAML_FMT % os.environ with open(LDAP2PG_CONFIG, 'w') as fo: fo.write(yaml) # Purge env from value set in file. Other are reads from ldaprc. # Ensure world readable password is denied with pytest.raises(ErrorReturnCode): ldap2pg(config=LDAP2PG_CONFIG, _env=ldapfree_env()) # And that fixing file mode do the trick. chmod('0600', LDAP2PG_CONFIG) ldap2pg('--config', LDAP2PG_CONFIG, _env=ldapfree_env())
def switch_to_custom_manifest(cls, manifest_body): """ Helper to overwrite original manifest by custom manifest :param manifest_body: :return: None """ with open("/var/tmp/netapp_test_suite_tmp_site.pp", 'w') as temp_site_pp: temp_site_pp.write(manifest_body) if os.geteuid() != 0: sh.sudo('/bin/mv', '/var/tmp/netapp_test_suite_tmp_site.pp', cls.manifest_path + "/site.pp") sh.sudo('/bin/chmod', '664', cls.manifest_path + "/site.pp") else: sh.mv('/var/tmp/netapp_test_suite_tmp_site.pp', cls.manifest_path + "/site.pp") sh.chmod('664', cls.manifest_path + "/site.pp") # Show how looks like site.pp for now cls.log.debug("How looks site.pp for now (by 'cat {0}'):".format(cls.manifest_path + "/site.pp")) cls.log.debug(sh.cat(cls.manifest_path + "/site.pp"))
def _copy_files(self): for path in self.setup_dict['copy']: src_path = os.path.join(self.root, path) dst_path = os.path.join(self.home, path) if os.path.exists(dst_path): if filecmp.cmp(src_path, dst_path, shallow=False): self.debug("Skipping {}; it was already installed".format(path)) else: raise Exception("File exists at destination {} but doesn't match".format(dst_path)) else: self.debug("Copying {} to {}".format(src_path, dst_path)) copy_dir = os.path.dirname(dst_path) if not os.path.exists(copy_dir): self.debug("Creating parent directories for copy") os.makedirs(copy_dir) shutil.copy(src_path, dst_path) if 'ssh' in src_path: self.debug("Updating permissions for ssh config file {}".format(dst_path)) sh.chmod('600', dst_path)
def gen_cert(domain, dirname, private_key_name, public_cert_name, days): ''' Generate the public certificate. ''' log('generating certificate') private_key = os.path.join(dirname, 'private', private_key_name) public_cert = os.path.join(dirname, public_cert_name) csr = os.path.join(dirname, '{}.csr'.format(domain)) sh.openssl('x509', '-req', '-days', days, '-in', csr, '-signkey', private_key, '-out', public_cert) assert os.path.exists(public_cert), 'could not generate {}'.format(public_cert) os.remove(csr) # only the owner should be able to read the private key sh.chmod('u+r', private_key) sh.chmod('u-wx', private_key) sh.chmod('go-rwx', private_key) # everyone can read the public certificate sh.chmod('ugo+r', public_cert) sh.chmod('ugo-wx', public_cert)
def IsConfigured(self): if self.IsConnected() and self.IsInstalled(): dev_path = '/dev/bus/usb/%s/%s' % (self.usb_bus_num, self.usb_dev_num) ret = sh.ls(dev_path,'-l') with sh.sudo: ret = sh.chmod('777',dev_path) ret = sh.modprobe('-r', 'ds2490') self._configured = True else: self._configured = False return self._configured
def generate_certificate( domain, dirname, private_key_name=PRIVATE_KEY, public_cert_name=PUBLIC_CERT, name=None, days=365): ''' Generate a self-signed SSL certficate. Writes the public cert to the file dirname/public_cert_name. Creates a dir dirname/private. Writes the private key to dirname/private/private_key_name. >>> generate_certificate('test.domain.com', '/tmp') ''' if name is None: name = domain log('starting to generate certificate for {}'.format(name)) if not os.path.exists(dirname): os.mkdir(dirname) log('created {}'.format(dirname)) private_dirname = os.path.join(dirname, 'private') if not os.path.exists(private_dirname): os.mkdir(private_dirname) log('created {}'.format(private_dirname)) try: sh.chown('root:ssl-cert', private_dirname) except: try: sh.chown('root:root', private_dirname) except: pass sh.chmod('go-rwx', private_dirname) delete_old_cert(domain, dirname, private_key_name, public_cert_name) gen_private_key(domain, dirname, private_key_name) gen_csr(domain, dirname, name, private_key_name) gen_cert(domain, dirname, private_key_name, public_cert_name, days) log('created certificate for {}'.format(domain))
def create_site_initd_script(name, folder, username): """ Install /etc/init.d boot script for a Plone site. We do this Ubuntu style, not sure if works 100% on Debian. http://wiki.debian.org/LSBInitScripts http://developer.plone.org/hosting/restarts.html#lsbinitscripts-starting-with-debian-6-0 """ from sh import chmod updaterc = Command("/usr/sbin/update-rc.d") script_body = DEBIAN_BOOT_TEMPLATE % dict(user=username, folder=folder, name=name) initd_script = "/etc/init.d/%s" % name print "Creating start/stop script %s" % initd_script with sudo: echo(script_body, _out=initd_script) chmod("u+x", initd_script) updaterc(name, "defaults")
def log(message, filename=None, mode=None): ''' Log message that syr.log can't. ''' if filename is None: filename = '/tmp/_log.{}.log'.format(whoami()) if mode is None: mode = '0666' # print(message) sh.touch(filename) try: sh.chmod(mode, filename) except sh.ErrorReturnCode_1: # hopefully the perms are already ok pass with open(filename, 'a') as logfile: try: logfile.write('{} {}\n'.format(timestamp(), message)) except UnicodeDecodeError: from syr.python import is_string logfile.write('unable to write message because it is a type: {}'.format(type(message))) if not is_string(message): logfile.write('{} {}\n'.format(timestamp(), message.decode(errors='replace')))
def run(self, cache, args=[]): f = cache.join('data') self.place(cache, f) chmod('a+rx', str(f)) cmd = Command(str(f)) cmd(*args)
def get_words(output_file): sh.cp("/usr/share/dict/words", output_file) sh.chmod("u+w", output_file)