def check(self): if linux.os['family'] in ('RedHat', 'Oracle') and linux.os['version'] >= (6, 0): # Avoid "Can't locate Time/HiRes.pm in @INC" # with InnoDB Backup Utility v1.5.1-xtrabackup pkgmgr.installed('perl-Time-HiRes') mgr = pkgmgr.package_mgr() if not 'percona' in mgr.repos(): if linux.os['family'] in ('RedHat', 'Oracle'): url = 'http://www.percona.com/downloads/percona-release/percona-release-0.0-1.%s.rpm' % linux.os['arch'] pkgmgr.YumPackageMgr().localinstall(url) else: try: codename = linux.os['lsb_codename'] except KeyError: codename = linux.ubuntu_release_to_codename[linux.os['lsb_release']] pkgmgr.apt_source( 'percona.list', ['deb http://repo.percona.com/apt %s main' % codename], gpg_keyserver='hkp://keys.gnupg.net', gpg_keyid='CD2EFD2A') mgr.updatedb() return super(PerconaExec, self).check()
def enabled(): # amazon linux doesn't have iptables service installed by default, # which makes "chkconfig --list iptables" fail # update: amzn >= 6.4 doesn't allow installing iptables-services; # however, the latest version of iptables itself suits all our needs if linux.os["name"] == "Amazon": if linux.os["release"] == "6.3": pkgmgr.installed("iptables-services") else: # 6.4 and higher # Upgrading iptables-1.4.18-1.16 to iptables-1.4.18-1.19 did # the job # Reverted: cannot reproduce the error # # We need to remove iptables first because it can be i686 version # # installed and installing x86_64 over it causes error # pkgmgr.remove("iptables") pkgmgr.latest("iptables") if linux.os['family'] in ('RedHat', 'Oracle'): try: out = redhat.chkconfig(list="iptables")[0] return bool(re.search(r"iptables.*?\s\d:on", out)) except linux.LinuxError, e: if 'not referenced in any runlevel' in str(e): return False else: raise
def __init__(self): if not os.path.exists(MDADM_EXEC): if linux.os.redhat_family: system2(('/usr/bin/yum', '-d0', '-y', 'install', 'mdadm', '-x', 'exim'), raise_exc=False) else: pkgmgr.installed('mdadm') if not os.path.exists('/proc/mdstat'): coreutils.modprobe('md_mod') for location in ['/etc ', '/lib']: path = os.path.join(location, 'udev/rules.d/85-mdadm.rules') if os.path.exists(path): rule = None with open(path, 'r') as fp: rule = fp.read() if rule: rule = re.sub(re.compile('^([^#])', re.M), '#\\1', rule) with open(path, 'w') as fp: fp.write(rule) self._raid_devices_re = re.compile('Raid\s+Devices\s+:\s+(?P<count>\d+)') self._total_devices_re = re.compile('Total\s+Devices\s+:\s+(?P<count>\d+)') self._state_re = re.compile('State\s+:\s+(?P<state>.+)') self._rebuild_re = re.compile('Rebuild\s+Status\s+:\s+(?P<percent>\d+)%') self._level_re = re.compile('Raid Level : (?P<level>.+)')
def check(self): if linux.os['family'] in ('RedHat', 'Oracle') and linux.os['version'] >= (6, 0): # Avoid "Can't locate Time/HiRes.pm in @INC" # with InnoDB Backup Utility v1.5.1-xtrabackup pkgmgr.installed('perl-Time-HiRes') mgr = pkgmgr.package_mgr() if not 'percona' in mgr.repos(): if linux.os['family'] in ('RedHat', 'Oracle'): url = 'http://www.percona.com/downloads/percona-release/percona-release-0.0-1.%s.rpm' % linux.os['arch'] pkgmgr.YumPackageMgr().localinstall(url) if linux.os.amazon: linux.system(("sed -i 's/\$releasever/latest/g' " "/etc/yum.repos.d/Percona.repo"), shell=True) else: try: codename = linux.os['lsb_codename'] except KeyError: codename = linux.ubuntu_release_to_codename[linux.os['lsb_release']] pkgmgr.apt_source( 'percona.list', ['deb http://repo.percona.com/apt %s main' % codename], gpg_keyserver='hkp://keys.gnupg.net', gpg_keyid='CD2EFD2A') mgr.updatedb() if software.mysql_software_info().version < (5, 5): self.package = 'percona-xtrabackup-21' else: self.package = 'percona-xtrabackup' return super(PerconaExec, self).check()
def check(self): if linux.os["family"] in ("RedHat", "Oracle") and linux.os["version"] >= (6, 0): # Avoid "Can't locate Time/HiRes.pm in @INC" # with InnoDB Backup Utility v1.5.1-xtrabackup pkgmgr.installed("perl-Time-HiRes") mgr = pkgmgr.package_mgr() if not "percona" in mgr.repos(): if linux.os["family"] in ("RedHat", "Oracle"): url = "http://www.percona.com/downloads/percona-release/percona-release-0.0-1.%s.rpm" % linux.os["arch"] pkgmgr.YumPackageMgr().localinstall(url) else: try: codename = linux.os["lsb_codename"] except KeyError: codename = linux.ubuntu_release_to_codename[linux.os["lsb_release"]] pkgmgr.apt_source( "percona.list", ["deb http://repo.percona.com/apt %s main" % codename], gpg_keyserver="hkp://keys.gnupg.net", gpg_keyid="CD2EFD2A", ) mgr.updatedb() return super(PerconaExec, self).check()
def innobackupex(*params, **long_kwds): if not os.path.exists('/usr/bin/innobackupex'): pkgmgr.installed('percona-xtrabackup') return linux.system(linux.build_cmd_args( executable='/usr/bin/innobackupex', long=long_kwds, params=params))
def create(self, volume, snapshot, tranzit_path, complete_cb=None): try: if snapshot.id in self._state_map: raise StorageError('Snapshot %s is already %s. Cannot create it again' % ( snapshot.id, self._state_map[snapshot.id])) clear_queue(self._upload_queue) if not os.path.exists(self._pigz_bin): if linux.os['family'] == 'Debian' and linux.os['release'] >= (10, 4): pkgmgr.installed('pigz') elif linux.os['family'] == 'RedHat' and linux.os['release'] >= (6, 0): pkgmgr.epel_repository() pkgmgr.installed('pigz') self._chunks_md5 = {} self._state_map[snapshot.id] = Snapshot.CREATING #self.prepare_tranzit_vol(volume.tranzit_vol) snap_lv = self._lvm.create_lv_snapshot(volume.device, self.SNAPSHOT_LV_NAME, extents='100%FREE') self._logger.info('Created LVM snapshot %s for volume %s', snap_lv, volume.device) self._return_ev.clear() t = threading.Thread(name='%s creator' % snapshot.id, target=self._create, args=(volume, snapshot, snap_lv, tranzit_path, complete_cb)) t.start() self._return_ev.wait() except: if complete_cb: complete_cb() raise snapshot.snap_strategy = 'data' snapshot.path = os.path.join(volume.snap_backend['path'], '%s.%s' % (snapshot.id, self.MANIFEST_NAME)) return snapshot
def __init__(self): if not os.path.exists('/sbin/mkfs.%s' % self.type): coreutils.modprobe(self.type) if self.os_packages: LOG.debug('Installing OS packages') for package in self.os_packages: pkgmgr.installed(package)
def _prepare_software(self): # windows has no ami tools. Bundle is made by scalr if linux.os['family'] != 'Windows': pkgmgr.updatedb() self._install_sg3_utils() self._install_ami_tools() if linux.os['family'] == 'RedHat': pkgmgr.installed('parted') pkgmgr.installed('kpartx')
def check(self): if not os.access(self.executable, os.X_OK): if self.package: pkgmgr.installed(self.package) else: msg = 'Executable %s is not found, you should either ' \ 'specify `package` attribute or install the software ' \ 'manually' % (self.executable) raise linux.LinuxError(msg)
def rsync(src, dst, **long_kwds): if not os.path.exists('/usr/bin/rsync'): pkgmgr.installed('rsync') system(['sync']) output = system(build_cmd_args(executable='/usr/bin/rsync', long=long_kwds, params=[src, dst], duplicate_keys=True)) system(['sync']) return output
def on_host_init_response(self, hir_message): if not os.path.exists(self.service.initd_script): tomcat = 'tomcat{0}'.format(self.tomcat_version) pkgs = [tomcat] if linux.os.debian_family: pkgs.append('{0}-admin'.format(tomcat)) elif linux.os.redhat_family or linux.os.oracle_family: pkgs.append('{0}-admin-webapps'.format(tomcat)) for pkg in pkgs: pkgmgr.installed(pkg) pkgmgr.installed('augeas-tools' if linux.os.debian_family else 'augeas')
def __init__(self): if not os.path.exists("/sbin/mkfs.%s" % self.name): try: coreutils.modprobe(self.name) except: e = sys.exc_info()[1] error_text = "Cannot load '%s' kernel module: %s" % (self.name, e) raise Exception(error_text) if self.os_packages: for package in self.os_packages: pkgmgr.installed(package)
def update(self, workdir): log = bus.init_op.logger if bus.init_op else self._logger if not os.access(self.executable, os.X_OK): log.info('Installing Git SCM...') if linux.os['family'] == 'Debian': package = 'git-core' else: package = 'git' pkgmgr.installed(package) #if not os.path.exists(workdir): # self._logger.info('Creating destination directory') # os.makedirs(workdir) tmpdir = tempfile.mkdtemp() env = {} try: if self.private_key: pk_path = os.path.join(tmpdir, 'pk.pem') with open(pk_path, 'w') as fp: fp.write(self.private_key) os.chmod(pk_path, 0400) git_ssh_path = os.path.join(tmpdir, 'git_ssh.sh') with open(git_ssh_path, 'w') as fp: fp.write(self.ssh_tpl % pk_path) os.chmod(git_ssh_path, 0755) env.update(dict(GIT_SSH=git_ssh_path)) if os.path.exists(os.path.join(workdir, '.git')): origin_url = system2(('git', 'config', '--get', 'remote.origin.url'), cwd=workdir, raise_exc=False)[0] if origin_url.strip() != self.url.strip(): self._logger.info('%s is not origin of %s (%s is)', self.url, workdir, origin_url) self._logger.info('Remove all files in %s and checkout from %s', workdir, self.url ) shutil.rmtree(workdir) os.mkdir(workdir) out, err, ret_code = system2(('git', 'clone', self.url, workdir), env=env) else: log.info('Updating directory %s (git-pull)', workdir) out, err, ret_code = system2(('git', 'pull'), env=env, cwd=workdir) else: log.info('Checkout from %s', self.url) out, err, ret_code = system2(('git', 'clone', '--recursive', self.url, workdir), env=env) if ret_code: raise Exception('Git failed to clone repository. %s' % out) log.info('Successfully deployed %s from %s', workdir, self.url) finally: shutil.rmtree(tmpdir)
def dmsetup(action, *params, **long_kwds): if not os.path.exists('/sbin/dmsetup'): if linux.os.debian_family: package = 'dmsetup' else: package = 'device-mapper' pkgmgr.installed(package) return linux.system(linux.build_cmd_args( executable='/sbin/dmsetup', short=[action], long=long_kwds, params=params))
def dmsetup(action, *params, **long_kwds): if not os.path.exists('/sbin/dmsetup'): from scalarizr.linux import pkgmgr if linux.os.debian_family: package = 'dmsetup' else: package = 'device-mapper' pkgmgr.installed(package) return linux.system( linux.build_cmd_args(executable='/sbin/dmsetup', short=[action], long=long_kwds, params=params))
def _configure(self): pkgmgr.installed( 'augeas-tools' if linux.os['family'] == 'Debian' else 'augeas', updatedb=True) augscript = '\n'.join([ 'set /files/etc/sysctl.conf/net.ipv4.ip_forward 1', 'rm /files/etc/sysctl.conf/net.bridge.bridge-nf-call-ip6tables', 'rm /files/etc/sysctl.conf/net.bridge.bridge-nf-call-iptables', 'rm /files/etc/sysctl.conf/net.bridge.bridge-nf-call-arptables', 'save' ]) linux.system(('augtool', ), stdin=augscript) linux.system(('sysctl', '-p')) if self._data.get('cidr'): iptables.ensure({ 'POSTROUTING': [{ 'table': 'nat', 'source': self._data['cidr'], 'not_destination': self._data['cidr'], 'jump': 'MASQUERADE' }] }) solo_home = '/tmp/chef' solo_rb = '%s/solo.rb' % solo_home solo_attr = '%s/attr.json' % solo_home pkgmgr.installed('git') if os.path.exists(solo_home): shutil.rmtree(solo_home) linux.system('git clone https://github.com/Scalr/cookbooks.git %s' % solo_home, shell=True) with open(solo_attr, 'w+') as fp: json.dump( { 'run_list': ['recipe[scalarizr_proxy]'], 'scalarizr_proxy': { 'scalr_addr': self._data['scalr_addr'], 'whitelist': self._data['whitelist'] } }, fp) with open(solo_rb, 'w+') as fp: fp.write('file_cache_path "%s"\n' 'cookbook_path "%s/cookbooks"' % (solo_home, solo_home)) linux.system(('chef-solo', '-c', solo_rb, '-j', solo_attr), close_fds=True, preexec_fn=os.setsid, log_level=logging.INFO)
def _install_ruby(self): packages = None if linux.os['family'] == 'RedHat': packages = ['unzip', 'gcc-c++', 'patch', 'readline', 'readline-devel', 'zlib', 'zlib-devel', 'libyaml-devel', 'libffi-devel', 'openssl-devel', 'make', 'bzip2', 'autoconf', 'automake', 'libtool', 'bison'] if linux.os['name'] == 'CentOS' and linux.os['release'] < (5, 4): packages.append('iconv-devel') else: packages = ['unzip', 'build-essential', 'zlib1g-dev', 'libssl-dev', 'libreadline6-dev', 'libyaml-dev'] for package in packages: pkgmgr.installed(package) # update curl certificate on centos 5 if linux.os['name'] == 'CentOS': system2(('curl', '-L', 'http://curl.haxx.se/ca/cacert.pem', '-o', '/etc/pki/tls/certs/ca-bundle.crt')) system2(('wget', '-P', '/tmp', 'http://cache.ruby-lang.org/pub/ruby/1.9/ruby-1.9.3-p547.tar.gz')) system2(('tar', '-xzvf', 'ruby-1.9.3-p547.tar.gz'), cwd='/tmp') sources_dir = '/tmp/ruby-1.9.3-p547' system2(('./configure', '-prefix=%s' % self._ruby_dir), cwd=sources_dir) system2(('make',), cwd=sources_dir) system2(('make', 'install'), cwd=sources_dir) self.environ['PATH'] = self.environ['PATH'] + (':%s/bin' % self._ruby_dir) self.environ['MY_RUBY_HOME'] = self._ruby_dir
def on_host_init_response(self, hir_message): ''' if not os.path.exists(self.service.initd_script): tomcat = 'tomcat{0}'.format(self.tomcat_version) pkgs = [tomcat] if linux.os.debian_family: pkgs.append('{0}-admin'.format(tomcat)) elif linux.os.redhat_family or linux.os.oracle_family: pkgs.append('{0}-admin-webapps'.format(tomcat)) for pkg in pkgs: pkgmgr.installed(pkg) ''' pkgmgr.installed( 'augeas-tools' if linux.os.debian_family else 'augeas')
def _configure(self): pkgmgr.installed("augeas-tools" if linux.os["family"] == "Debian" else "augeas") augscript = "\n".join( [ "set /files/etc/sysctl.conf/net.ipv4.ip_forward 1", "rm /files/etc/sysctl.conf/net.bridge.bridge-nf-call-ip6tables", "rm /files/etc/sysctl.conf/net.bridge.bridge-nf-call-iptables", "rm /files/etc/sysctl.conf/net.bridge.bridge-nf-call-arptables", "save", ] ) linux.system(("augtool",), stdin=augscript) linux.system(("sysctl", "-p")) if self._data.get("cidr"): iptables.ensure( { "POSTROUTING": [ { "table": "nat", "source": self._data["cidr"], "not_destination": self._data["cidr"], "jump": "MASQUERADE", } ] } ) solo_home = "/tmp/chef" solo_rb = "%s/solo.rb" % solo_home solo_attr = "%s/attr.json" % solo_home pkgmgr.installed("git") if os.path.exists(solo_home): shutil.rmtree(solo_home) linux.system("git clone https://github.com/Scalr/cookbooks.git %s" % solo_home, shell=True) with open(solo_attr, "w+") as fp: json.dump( { "run_list": ["recipe[scalarizr_proxy]"], "scalarizr_proxy": {"scalr_addr": self._data["scalr_addr"], "whitelist": self._data["whitelist"]}, }, fp, ) with open(solo_rb, "w+") as fp: fp.write('file_cache_path "%s"\n' 'cookbook_path "%s/cookbooks"' % (solo_home, solo_home)) linux.system( ("chef-solo", "-c", solo_rb, "-j", solo_attr), close_fds=True, preexec_fn=os.setsid, log_level=logging.INFO )
def check(self): if not self.executable.startswith('/'): exec_paths = software.whereis(self.executable) exec_path = exec_paths[0] if exec_paths else None else: exec_path = self.executable if not exec_path or not os.access(exec_path, os.X_OK): if self.package: pkgmgr.installed(self.package) else: msg = 'Executable %s is not found, you should either ' \ 'specify `package` attribute or install the software ' \ 'manually' % self.executable raise linux.LinuxError(msg)
def update(self, workdir): if not os.access(self.executable, os.X_OK): self._logger.info('Installing Subversion SCM...') pkgmgr.installed('subversion') do_update = False if os.path.exists(os.path.join(workdir, '.svn')): out = system2(('svn', 'info', workdir))[0] try: svn_url = filter(lambda line: line.startswith('URL:'), out.split('\n'))[0].split(':', 1)[1].strip() except IndexError: raise SourceError('Cannot extract Subversion URL. Text:\n %s', out) if svn_url != self.url: #raise SourceError('Working copy %s is checkouted from different repository %s' % (workdir, svn_url)) self._logger.info('%s is not origin of %s (%s is)', self.url, workdir, svn_url) self._logger.info( 'Remove all files in %s and checkout from %s', workdir, self.url) shutil.rmtree(workdir) os.mkdir(workdir) else: do_update = True args = ['svn', 'update' if do_update else 'co'] if self.login and self.password: args += [ '--username', self.login, '--password', self.password, '--non-interactive' ] if self.client_version >= (1, 5, 0): args += ['--trust-server-cert'] if args[1] == 'co': args += [self.url] args += [workdir] self._logger.info('Updating source from %s into working dir %s', self.url, workdir) out = system2(args)[0] self._logger.info(out) self._logger.info( 'Deploying %s to %s has been completed successfully.', self.url, workdir)
def enabled(): # amazon linux doesn't have iptables service installed by default, # which makes "chkconfig --list iptables" fail # update: amzn >= 6.4 doesn't allow installing iptables-services; # however, the latest version of iptables itself suits all or needs if linux.os["name"] == "Amazon": if linux.os["release"] == "6.3": pkgmgr.installed("iptables-services") else: # 6.4 and higher # Upgrading iptables-1.4.18-1.16 to iptables-1.4.18-1.19 did # the job pkgmgr.latest("iptables") if linux.os['family'] in ('RedHat', 'Oracle'): out = redhat.chkconfig(list="iptables")[0] return bool(re.search(r"iptables.*?\s\d:on", out)) else: return os.access(IPTABLES_BIN, os.X_OK)
def update(self, workdir): log = bus.init_op.logger if bus.init_op else self._logger if not os.access(self.executable, os.X_OK): log.info('Installing Subversion SCM...') pkgmgr.installed('subversion') do_update = False if os.path.exists(os.path.join(workdir, '.svn')): out = system2(('svn', 'info', workdir))[0] try: svn_url = filter(lambda line: line.startswith('URL:'), out.split('\n'))[0].split(':', 1)[1].strip() except IndexError: raise SourceError('Cannot extract Subversion URL. Text:\n %s', out) if svn_url != self.url: #raise SourceError('Working copy %s is checkouted from different repository %s' % (workdir, svn_url)) self._logger.info('%s is not origin of %s (%s is)', self.url, workdir, svn_url) self._logger.info('Remove all files in %s and checkout from %s', workdir, self.url) shutil.rmtree(workdir) os.mkdir(workdir) else: do_update = True args = [ 'svn' , 'update' if do_update else 'co' ] if self.login and self.password: args += [ '--username', self.login, '--password', self.password, '--non-interactive' ] if self.client_version >= (1, 5, 0): args += ['--trust-server-cert'] if args[1] == 'co': args += [self.url] args += [workdir] log.info('Updating source from %s into working dir %s', self.url, workdir) out = system2(args)[0] self._logger.info(out) log.info('Deploying %s to %s has been completed successfully.', self.url, workdir)
def _configure(self): pkgmgr.installed('augeas-tools' if linux.os['family'] == 'Debian' else 'augeas') augscript = '\n'.join([ 'set /files/etc/sysctl.conf/net.ipv4.ip_forward 1', 'rm /files/etc/sysctl.conf/net.bridge.bridge-nf-call-ip6tables', 'rm /files/etc/sysctl.conf/net.bridge.bridge-nf-call-iptables', 'rm /files/etc/sysctl.conf/net.bridge.bridge-nf-call-arptables', 'save' ]) linux.system(('augtool',), stdin=augscript) linux.system(('sysctl', '-p')) if self._data['cidr']: iptables.ensure({'POSTROUTING': [{ 'table': 'nat', 'source': self._data['cidr'], 'not_destination': self._data['cidr'], 'jump': 'MASQUERADE' }]}) solo_home = '/tmp/chef' solo_rb = '%s/solo.rb' % solo_home solo_attr = '%s/attr.json' % solo_home pkgmgr.installed('git') if os.path.exists(solo_home): shutil.rmtree(solo_home) linux.system('git clone https://github.com/Scalr/cookbooks.git %s' % solo_home, shell=True) with open(solo_attr, 'w+') as fp: json.dump({ 'run_list': ['recipe[scalarizr_proxy]'], 'normal': { 'scalr_addr': self._data['scalr_addr'], 'whitelist': self._data['whitelist'] } }, fp) with open(solo_rb, 'w+') as fp: fp.write( 'file_cache_path "%s"\n' 'cookbook_path "%s/cookbooks"' % (solo_home, solo_home) ) linux.system(('chef-solo', '-c', solo_rb, '-j', solo_attr), close_fds=True, preexec_fn=os.setsid)
def create(self, volume, snapshot, tranzit_path, complete_cb=None): try: if snapshot.id in self._state_map: raise StorageError( 'Snapshot %s is already %s. Cannot create it again' % (snapshot.id, self._state_map[snapshot.id])) clear_queue(self._upload_queue) if not os.path.exists(self._pigz_bin): if linux.os['family'] == 'Debian' and linux.os['release'] >= ( 10, 4): pkgmgr.installed('pigz') elif linux.os['family'] == 'RedHat' and linux.os[ 'release'] >= (6, 0): pkgmgr.epel_repository() pkgmgr.installed('pigz') self._chunks_md5 = {} self._state_map[snapshot.id] = Snapshot.CREATING #self.prepare_tranzit_vol(volume.tranzit_vol) snap_lv = self._lvm.create_lv_snapshot(volume.device, self.SNAPSHOT_LV_NAME, extents='100%FREE') self._logger.info('Created LVM snapshot %s for volume %s', snap_lv, volume.device) self._return_ev.clear() t = threading.Thread(name='%s creator' % snapshot.id, target=self._create, args=(volume, snapshot, snap_lv, tranzit_path, complete_cb)) t.start() self._return_ev.wait() except: if complete_cb: complete_cb() raise snapshot.snap_strategy = 'data' snapshot.path = os.path.join( volume.snap_backend['path'], '%s.%s' % (snapshot.id, self.MANIFEST_NAME)) return snapshot
def check(self): if linux.os['family'] in ('RedHat', 'Oracle'): # Avoid "Can't locate Time/HiRes.pm in @INC" # with InnoDB Backup Utility v1.5.1-xtrabackup pkgmgr.installed('perl-Time-HiRes') mgr = pkgmgr.package_mgr() if not 'percona' in mgr.repos(): if linux.os['family'] in ('RedHat', 'Oracle'): url = 'http://www.percona.com/downloads/percona-release/percona-release-0.0-1.%s.rpm' % linux.os[ 'arch'] pkgmgr.RpmPackageMgr().install(url) else: codename = linux.ubuntu_release_to_codename[ linux.os['lsb_release']] pkgmgr.apt_source( 'percona.list', ['deb http://repo.percona.com/apt %s main' % codename], gpg_keyserver='hkp://keys.gnupg.net', gpg_keyid='CD2EFD2A') mgr.updatedb() return super(PerconaExec, self).check()
def _install_sg3_utils(self): # Installs sg3_utils package for fast sgp_dd command if linux.os['family'] == 'RedHat' and linux.os['name'] != 'Amazon': pkgmgr.installed('sg3_utils') return arch = None lib_package = None utils_package = None pkg_mgr_cmd = None if linux.os['name'] == 'Amazon': arch = linux.os['arch'] lib_package = 'sg3_utils-libs-1.39-1.%s.rpm' % arch utils_package = 'sg3_utils-1.39-1.%s.rpm' % arch pkg_mgr_cmd = 'rpm' else: arch = 'i386' if linux.os['arch'] == 'i386' else 'amd64' lib_package = 'libsgutils2-2_1.39-0.1_%s.deb' % arch utils_package = 'sg3-utils_1.39-0.1_%s.deb' % arch pkg_mgr_cmd = 'dpkg' system2(('wget', 'http://sg.danny.cz/sg/p/'+lib_package, '-P', '/tmp'),) system2((pkg_mgr_cmd, '-i', '/tmp/'+lib_package)) system2(('wget', 'http://sg.danny.cz/sg/p/'+utils_package, '-P', '/tmp'),) system2((pkg_mgr_cmd, '-i', '/tmp/'+utils_package)) os.remove('/tmp/'+lib_package) os.remove('/tmp/'+utils_package)
from scalarizr.externals.collections import namedtuple from scalarizr.util import system2, firstmatched, PopenError from scalarizr.util.software import which from scalarizr.linux import coreutils, pkgmgr from scalarizr.storage import StorageError logger = logging.getLogger(__name__) class Lvm2Error(PopenError): pass if not os.path.exists('/sbin/pvs'): pkgmgr.installed('lvm2') try: PVS = which('pvs') VGS = which('vgs') LVS = which('lvs') PVSCAN = which('pvscan') PVCREATE = which('pvcreate') VGCREATE = which('vgcreate') LVCREATE = which('lvcreate') LVCHANGE = which('lvchange') VGCHANGE = which('vgchange') VGEXTEND = which('vgextend') VGREDUCE = which('vgreduce')
def rebundle(self): rebundle_dir = tempfile.mkdtemp() try: pl = bus.platform proj_id = pl.get_numeric_project_id() proj_name = pl.get_project_id() cloudstorage = pl.get_storage_conn() # Determine the root filesystem size devices = coreutils.df() root_disk = firstmatched(lambda x: x.mpoint == '/', devices) if not root_disk: raise HandlerError("Can't find root device") # in bytes adjusted to 512 block device size fssize = (root_disk.size * 1000 / 512) * 512 # Old code. Should be reworked if os.path.exists('/dev/root'): root_part_path = os.path.realpath('/dev/root') else: rootfs_stat = os.stat('/') root_device_minor = os.minor(rootfs_stat.st_dev) root_device_major = os.major(rootfs_stat.st_dev) root_part_path = os.path.realpath('/dev/block/{0}:{1}'.format( root_device_major, root_device_minor)) root_part_sysblock_path = glob.glob( '/sys/block/*/%s' % os.path.basename(root_part_path))[0] root_device = '/dev/%s' % os.path.basename( os.path.dirname(root_part_sysblock_path)) arch_name = '%s.tar.gz' % self._role_name.lower() arch_path = os.path.join(rebundle_dir, arch_name) # update gcimagebundle try: pkgmgr.latest(self.gcimagebundle_pkg_name) except: e = sys.exc_info()[1] LOG.warn('Gcimagebundle update failed: %s' % e) if os_dist.redhat_family: semanage = software.which('semanage') if not semanage: pkgmgr.installed('policycoreutils-python') semanage = software.which('semanage') util.system2((semanage, 'permissive', '-a', 'rsync_t')) gc_img_bundle_bin = software.which('gcimagebundle') o, e, p = util.system2( (gc_img_bundle_bin, '-d', root_device, '-e', ','.join( self.exclude_dirs), '--fssize', str(fssize), '-o', rebundle_dir, '--output_file_name', arch_name), raise_exc=False) if p: raise HandlerError( 'Gcimagebundle util returned non-zero code %s. Stderr: %s' % (p, e)) try: LOG.info('Uploading compressed image to cloud storage') tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint( 1, 1000000), int(time.time())) remote_dir = 'gcs://%s' % tmp_bucket_name def progress_cb(progress): LOG.debug('Uploading {perc}%'.format( perc=progress / os.path.getsize(arch_path))) uploader = largetransfer.Upload(arch_path, remote_dir, simple=True, progress_cb=progress_cb) uploader.apply_async() try: try: uploader.join() except: if uploader.error: error = uploader.error[1] else: error = sys.exc_info()[1] msg = 'Image upload failed. Error:\n{error}' msg = msg.format(error=error) raise HandlerError(msg) except: with util.capture_exception(LOG): objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete( bucket=tmp_bucket_name).execute() finally: os.unlink(arch_path) finally: shutil.rmtree(rebundle_dir) goog_image_name = self._role_name.lower().replace( '_', '-') + '-' + str(int(time.time())) try: LOG.info('Registering new image %s' % goog_image_name) compute = pl.get_compute_conn() image_url = 'http://storage.googleapis.com/%s/%s' % ( tmp_bucket_name, arch_name) req_body = dict(name=goog_image_name, sourceType='RAW', rawDisk=dict(source=image_url)) req = compute.images().insert(project=proj_id, body=req_body) operation = req.execute()['name'] LOG.info('Waiting for image to register') def image_is_ready(): req = compute.globalOperations().get(project=proj_id, operation=operation) res = req.execute() if res['status'] == 'DONE': if res.get('error'): errors = [] for e in res['error']['errors']: err_text = '%s: %s' % (e['code'], e['message']) errors.append(err_text) raise Exception('\n'.join(errors)) return True return False util.wait_until(image_is_ready, logger=LOG, timeout=600) finally: try: objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute() except: e = sys.exc_info()[1] LOG.error('Failed to remove image compressed source: %s' % e) return '%s/images/%s' % (proj_name, goog_image_name)
@author: marat ''' from __future__ import with_statement import os import logging import base64 import collections from scalarizr import linux if not linux.which('lvs'): from scalarizr.linux import pkgmgr pkgmgr.installed('lvm2') LOG = logging.getLogger(__name__) class NotFound(linux.LinuxError): pass def system(*args, **kwargs): kwargs['logger'] = LOG kwargs['close_fds'] = True ''' To prevent this garbage in stderr (Fedora/CentOS): File descriptor 6 (/tmp/ffik4yjng (deleted)) leaked on lv* invocation. Parent PID 29542: /usr/bin/python '''
def update(self, workdir): if not os.access(self.executable, os.X_OK): self._logger.info('Installing Git SCM...') if linux.os['family'] == 'Debian': package = 'git-core' else: package = 'git' pkgmgr.installed(package) #if not os.path.exists(workdir): # self._logger.info('Creating destination directory') # os.makedirs(workdir) tmpdir = tempfile.mkdtemp() env = {} try: if self.private_key: pk_path = os.path.join(tmpdir, 'pk.pem') with open(pk_path, 'w') as fp: fp.write(self.private_key) os.chmod(pk_path, 0400) git_ssh_path = os.path.join(tmpdir, 'git_ssh.sh') with open(git_ssh_path, 'w') as fp: fp.write(self.ssh_tpl % pk_path) os.chmod(git_ssh_path, 0755) env.update(dict(GIT_SSH=git_ssh_path)) if os.path.exists(os.path.join(workdir, '.git')): origin_url = system2( ('git', 'config', '--get', 'remote.origin.url'), cwd=workdir, raise_exc=False)[0] if origin_url.strip() != self.url.strip(): self._logger.info('%s is not origin of %s (%s is)', self.url, workdir, origin_url) self._logger.info( 'Remove all files in %s and checkout from %s', workdir, self.url) shutil.rmtree(workdir) os.mkdir(workdir) out, err, ret_code = system2( ('git', 'clone', self.url, workdir), env=env) else: self._logger.info('Updating directory %s (git-pull)', workdir) out, err, ret_code = system2(('git', 'pull'), env=env, cwd=workdir) else: self._logger.info('Checkout from %s', self.url) out, err, ret_code = system2( ('git', 'clone', self.url, workdir), env=env) if ret_code: raise Exception('Git failed to clone repository. %s' % out) self._logger.info('Successfully deployed %s from %s', workdir, self.url) finally: shutil.rmtree(tmpdir)
from __future__ import with_statement import logging import re import os from scalarizr import linux from scalarizr.linux import coreutils from scalarizr.storage2 import StorageError if not linux.which('mdadm'): from scalarizr.linux import pkgmgr pkgmgr.installed('mdadm', updatedb=True) mdadm_binary = linux.which('mdadm') if not os.path.exists('/proc/mdstat'): coreutils.modprobe('md_mod') LOG = logging.getLogger(__name__) def mdadm(mode, md_device=None, *devices, **long_kwds): """ Example: mdadm.mdadm('create', '/dev/md0', '/dev/loop0', '/dev/loop1', level=0, metadata='default', assume_clean=True, raid_devices=2) """ raise_exc = long_kwds.pop('raise_exc', True) return linux.system(linux.build_cmd_args( mdadm_binary,
def _check_pigz(self): try: pkgmgr.check_software(['pigz']) except pkgmgr.SoftwareError: pkgmgr.epel_repository() pkgmgr.installed('pigz', updatedb=True)
def _install_mod_ssl(self): if not os.path.exists(__apache__["mod_ssl_file"]): LOG.info("%s does not exist. Trying to install mod_ssl." % __apache__["mod_ssl_file"]) pkgmgr.installed("mod_ssl")
def _install_ami_tools(self): if linux.os['name'] == 'Amazon': pkgmgr.installed('aws-amitools-ec2-1.5.3') self.bundle_vol_cmd = '/opt/aws/bin/ec2-bundle-vol' return if not os.path.exists(self._tools_dir): if not os.path.exists(os.path.dirname(self._tools_dir)): os.mkdir(os.path.dirname(self._tools_dir)) os.mkdir(self._tools_dir) if not os.path.exists(self._ruby_dir): os.mkdir(self._ruby_dir) self._remove_old_versions() self._install_ruby() ami_tools_src = HttpSource( 'http://s3.amazonaws.com/ec2-downloads/ec2-ami-tools.zip') ami_tools_src.update(self._tools_dir) directory_contents = os.listdir(self._tools_dir) self.ami_bin_dir = None for item in directory_contents: if self.ami_bin_dir: break elif item.startswith('ec2-ami-tools'): self.ami_bin_dir = os.path.join(self._tools_dir, os.path.join(item, 'bin')) if linux.os['name'] == 'CentOS' and linux.os['release'] < (6, 0): # patching ami tools so /dev/root is determinated as valid device ami_tools_dir = os.path.dirname(self.ami_bin_dir) file_to_patch = os.path.join(ami_tools_dir, 'lib/ec2/platform/linux/image.rb') for line in fileinput.input(file_to_patch, inplace=True): if 'ROOT_DEVICE_REGEX = ' in line: definition_part = line.split('=')[0] fixed_regex = '/^(\/dev\/(?:root|(?:xvd|sd)(?:[a-z]|[a-c][a-z]|d[a-x])))[1]?$/' print '%s=%s' % (definition_part, fixed_regex) else: print line, # updating mkfs cause of filesystem option setting bug pkgmgr.installed('texinfo') e2fsprogs_src = HttpSource( 'https://www.kernel.org/pub/linux/kernel/' 'people/tytso/e2fsprogs/v1.42.5/e2fsprogs-1.42.5.tar.gz') e2fsprogs_src.update('/tmp') e2fs_dir = '/tmp/e2fsprogs-1.42.5' build_dir = os.path.join(e2fs_dir, 'build') os.mkdir(build_dir) system2(('../configure'), cwd=build_dir) system2(('make'), cwd=build_dir) system2(('make', 'install'), cwd=build_dir) self.bundle_vol_cmd = os.path.join(self.ami_bin_dir, 'ec2-bundle-vol') system2(('chmod', '-R', '0755', os.path.dirname(self._tools_dir))) system2(('export', 'EC2_AMITOOL_HOME=%s' % os.path.dirname(self.ami_bin_dir)), shell=True)
def rebundle(self): rebundle_dir = tempfile.mkdtemp() try: pl = bus.platform proj_id = pl.get_numeric_project_id() proj_name = pl.get_project_id() cloudstorage = pl.new_storage_client() root_part_path = os.path.realpath('/dev/root') root_part_sysblock_path = glob.glob( '/sys/block/*/%s' % os.path.basename(root_part_path))[0] root_device = '/dev/%s' % os.path.basename( os.path.dirname(root_part_sysblock_path)) arch_name = '%s.tar.gz' % self._role_name.lower() arch_path = os.path.join(rebundle_dir, arch_name) # update gcimagebundle try: pkgmgr.latest(self.gcimagebundle_pkg_name) except: e = sys.exc_info()[1] LOG.warn('Gcimagebundle update failed: %s' % e) if os_dist.redhat_family: semanage = software.which('semanage') if not semanage: pkgmgr.installed('policycoreutils-python') semanage = software.which('semanage') util.system2((semanage, 'permissive', '-a', 'rsync_t')) gc_img_bundle_bin = software.which('gcimagebundle') o, e, p = util.system2( (gc_img_bundle_bin, '-d', root_device, '-e', ','.join( self.exclude_dirs), '-o', rebundle_dir, '--output_file_name', arch_name), raise_exc=False) if p: raise HandlerError( 'Gcimagebundle util returned non-zero code %s. Stderr: %s' % (p, e)) try: LOG.info('Uploading compressed image to cloud storage') tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint( 1, 1000000), int(time.time())) remote_path = 'gcs://%s/%s' % (tmp_bucket_name, arch_name) arch_size = os.stat(arch_path).st_size uploader = FileTransfer(src=arch_path, dst=remote_path) try: upload_result = uploader.run() if upload_result['failed']: errors = [ str(failed['exc_info'][1]) for failed in upload_result['failed'] ] raise HandlerError('Image upload failed. Errors:\n%s' % '\n'.join(errors)) assert arch_size == upload_result['completed'][0]['size'] except: with util.capture_exception(LOG): objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete( bucket=tmp_bucket_name).execute() finally: os.unlink(arch_path) finally: shutil.rmtree(rebundle_dir) goog_image_name = self._role_name.lower().replace( '_', '-') + '-' + str(int(time.time())) try: LOG.info('Registering new image %s' % goog_image_name) compute = pl.new_compute_client() image_url = 'http://storage.googleapis.com/%s/%s' % ( tmp_bucket_name, arch_name) req_body = dict(name=goog_image_name, sourceType='RAW', rawDisk=dict(source=image_url)) req = compute.images().insert(project=proj_id, body=req_body) operation = req.execute()['name'] LOG.info('Waiting for image to register') def image_is_ready(): req = compute.globalOperations().get(project=proj_id, operation=operation) res = req.execute() if res['status'] == 'DONE': if res.get('error'): errors = [] for e in res['error']['errors']: err_text = '%s: %s' % (e['code'], e['message']) errors.append(err_text) raise Exception('\n'.join(errors)) return True return False util.wait_until(image_is_ready, logger=LOG, timeout=600) finally: try: objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute() except: e = sys.exc_info()[1] LOG.error('Faled to remove image compressed source: %s' % e) return '%s/images/%s' % (proj_name, goog_image_name)
def test_installed(mgr): mgr().info.return_value = {'installed': None} pkgmgr.installed('thing', '1.0', True) mgr().install.assert_called_once_with('thing', '1.0')
finally: try: self._remove_bucket(bucket_name, archive_name, cloudstorage) except (Exception, BaseException), e: LOG.error('Faled to remove image compressed source: %s' % e) def _prepare_software(self): try: pkgmgr.latest(self.gcimagebundle_pkg_name) except (Exception, BaseException), e: LOG.warn('Gcimagebundle update failed: %s' % e) if os_dist.redhat_family: semanage = software.which('semanage') if not semanage: pkgmgr.installed('policycoreutils-python') semanage = software.which('semanage') util.system2((semanage, 'permissive', '-a', 'rsync_t')) def snapshot(self, op, name): rebundle_dir = tempfile.mkdtemp() archive_path = '' try: pl = __node__['platform'] proj_id = pl.get_numeric_project_id() proj_name = pl.get_project_id() cloudstorage = pl.new_storage_client() root_part_path = os.path.realpath('/dev/root') root_part_sysblock_path = glob.glob('/sys/block/*/%s' % os.path.basename(root_part_path))[0]
def rsync(src, dst, **long_kwds): if not os.path.exists("/usr/bin/rsync"): pkgmgr.installed("rsync") raise NotImplementedError()
class HttpSource(Source): def __init__(self, url=None): self._logger = logging.getLogger(__name__) self.url = url def update(self, workdir): log = bus.init_op.logger if bus.init_op else self._logger if not os.path.exists(workdir): os.makedirs(workdir) purl = urlparse(self.url) log.info('Downloading %s', self.url) try: hdlrs = [urllib2.HTTPRedirectHandler()] if purl.scheme == 'https': hdlrs.append(urllib2.HTTPSHandler()) opener = urllib2.build_opener(*hdlrs) resp = opener.open(self.url) except urllib2.URLError, e: raise SourceError('Downloading %s failed. %s' % (self.url, e)) tmpdir = tempfile.mkdtemp(dir='/tmp/') tmpdst = os.path.join(tmpdir, os.path.basename(purl.path)) fp = open(tmpdst, 'w+') num_read = 0 while True: buf = resp.read(8192) if not buf: break num_read += len(buf) self._logger.debug('%d bytes downloaded', num_read) fp.write(buf) fp.close() log.info('File saved as %s', tmpdst) try: mime = mimetypes.guess_type(tmpdst) if mime[0] in ('application/x-tar', 'application/zip'): unar = None if mime[0] == 'application/x-tar': unar = ['tar'] if mime[1] == 'gzip': unar += ['-xzf'] elif mime[1] in ('bzip', 'bzip2'): unar += ['-xjf'] else: raise UndefinedSourceError() unar += [tmpdst, '-C', workdir] elif mime[0] == 'application/zip': if not linux.which('unzip'): log.info('Installing unzip de-archiver') pkgmgr.installed('unzip') unar = ['unzip', tmpdst, '-d', workdir] else: raise UndefinedSourceError('Unexpected archive format %s' % str(mime)) log.info('Extracting source from %s into %s', tmpdst, workdir) out = system2(unar)[0] self._logger.info(out) else: log.info('Moving source from %s to %s', tmpdst, workdir) dst = os.path.join(workdir, os.path.basename(tmpdst)) if os.path.isfile(dst): self._logger.debug('Removing already existed file %s', dst) os.remove(dst) shutil.move(tmpdst, workdir) self._logger.info( 'Deploying %s to %s has been completed successfully.', self.url, dst) except: exc = sys.exc_info() if isinstance(exc[0], SourceError): raise raise SourceError, exc[1], exc[2] finally: if os.path.exists(tmpdst): os.remove(tmpdst) if os.path.exists(tmpdir): shutil.rmtree(tmpdir)
from __future__ import with_statement import logging import re import os from scalarizr import linux from scalarizr.linux import coreutils from scalarizr.storage2 import StorageError if not linux.which('mdadm'): from scalarizr.linux import pkgmgr pkgmgr.installed('mdadm') mdadm_binary = linux.which('mdadm') if not os.path.exists('/proc/mdstat'): coreutils.modprobe('md_mod') LOG = logging.getLogger(__name__) def mdadm(mode, md_device=None, *devices, **long_kwds): """ Example: mdadm.mdadm('create', '/dev/md0', '/dev/loop0', '/dev/loop1', level=0, metadata='default', assume_clean=True, raid_devices=2) """ raise_exc = long_kwds.pop('raise_exc', True) return linux.system(linux.build_cmd_args( mdadm_binary, ['--%s' % mode] + ([md_device] if md_device else []),
def rebundle(self): rebundle_dir = tempfile.mkdtemp() try: pl = bus.platform proj_id = pl.get_numeric_project_id() proj_name = pl.get_project_id() cloudstorage = pl.new_storage_client() root_part_path = os.path.realpath('/dev/root') root_part_sysblock_path = glob.glob('/sys/block/*/%s' % os.path.basename(root_part_path))[0] root_device = '/dev/%s' % os.path.basename(os.path.dirname(root_part_sysblock_path)) arch_name = '%s.tar.gz' % self._role_name.lower() arch_path = os.path.join(rebundle_dir, arch_name) # update gcimagebundle try: pkgmgr.latest(self.gcimagebundle_pkg_name) except: e = sys.exc_info()[1] LOG.warn('Gcimagebundle update failed: %s' % e) if os_dist.redhat_family: semanage = software.which('semanage') if not semanage: pkgmgr.installed('policycoreutils-python') semanage = software.which('semanage') util.system2((semanage, 'permissive', '-a', 'rsync_t')) gc_img_bundle_bin = software.which('gcimagebundle') o, e, p = util.system2((gc_img_bundle_bin, '-d', root_device, '-e', ','.join(self.exclude_dirs), '-o', rebundle_dir, '--output_file_name', arch_name), raise_exc=False) if p: raise HandlerError('Gcimagebundle util returned non-zero code %s. Stderr: %s' % (p, e)) try: LOG.info('Uploading compressed image to cloud storage') tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint(1, 1000000), int(time.time())) remote_path = 'gcs://%s/%s' % (tmp_bucket_name, arch_name) arch_size = os.stat(arch_path).st_size uploader = FileTransfer(src=arch_path, dst=remote_path) try: upload_result = uploader.run() if upload_result['failed']: errors = [str(failed['exc_info'][1]) for failed in upload_result['failed']] raise HandlerError('Image upload failed. Errors:\n%s' % '\n'.join(errors)) assert arch_size == upload_result['completed'][0]['size'] except: with util.capture_exception(LOG): objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute() finally: os.unlink(arch_path) finally: shutil.rmtree(rebundle_dir) goog_image_name = self._role_name.lower().replace('_', '-') + '-' + str(int(time.time())) try: LOG.info('Registering new image %s' % goog_image_name) compute = pl.new_compute_client() image_url = 'http://storage.googleapis.com/%s/%s' % (tmp_bucket_name, arch_name) req_body = dict( name=goog_image_name, sourceType='RAW', rawDisk=dict( source=image_url ) ) req = compute.images().insert(project=proj_id, body=req_body) operation = req.execute()['name'] LOG.info('Waiting for image to register') def image_is_ready(): req = compute.globalOperations().get(project=proj_id, operation=operation) res = req.execute() if res['status'] == 'DONE': if res.get('error'): errors = [] for e in res['error']['errors']: err_text = '%s: %s' % (e['code'], e['message']) errors.append(err_text) raise Exception('\n'.join(errors)) return True return False util.wait_until(image_is_ready, logger=LOG, timeout=600) finally: try: objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute() except: e = sys.exc_info()[1] LOG.error('Faled to remove image compressed source: %s' % e) return '%s/images/%s' % (proj_name, goog_image_name)
from __future__ import with_statement import logging import re import os from scalarizr import linux from scalarizr.linux import coreutils from scalarizr.storage2 import StorageError if not linux.which('mdadm'): from scalarizr.linux import pkgmgr pkgmgr.installed('mdadm') mdadm_binary = linux.which('mdadm') if not os.path.exists('/proc/mdstat'): coreutils.modprobe('md_mod') LOG = logging.getLogger(__name__) def mdadm(mode, md_device=None, *devices, **long_kwds): """ Example: mdadm.mdadm('create', '/dev/md0', '/dev/loop0', '/dev/loop1', level=0, metadata='default', assume_clean=True, raid_devices=2) """ raise_exc = long_kwds.pop('raise_exc', True) return linux.system(linux.build_cmd_args( mdadm_binary,
finally: try: self._remove_bucket(bucket_name, archive_name, cloudstorage) except (Exception, BaseException), e: LOG.error('Faled to remove image compressed source: %s' % e) def _prepare_software(self): try: pkgmgr.latest(self.gcimagebundle_pkg_name) except (Exception, BaseException), e: LOG.warn('Gcimagebundle update failed: %s' % e) if os_dist.redhat_family: semanage = software.which('semanage') if not semanage: pkgmgr.installed('policycoreutils-python') semanage = software.which('semanage') util.system2((semanage, 'permissive', '-a', 'rsync_t')) def snapshot(self, op, name): rebundle_dir = tempfile.mkdtemp() archive_path = '' try: pl = __node__['platform'] proj_id = pl.get_numeric_project_id() proj_name = pl.get_project_id() cloudstorage = pl.get_storage_conn() root_part_path = None for d in coreutils.df():
from __future__ import with_statement import os import logging import base64 import collections import time from scalarizr import linux if not linux.which('lvs'): from scalarizr.linux import pkgmgr # set updatedb=True to work over problem on GCE: # E: Problem renaming the file /var/cache/apt/pkgcache.bin.fsF22K to /var/cache/apt/pkgcache.bin pkgmgr.installed('lvm2', updatedb=True) LOG = logging.getLogger(__name__) class NotFound(linux.LinuxError): pass def system(*args, **kwargs): kwargs['logger'] = LOG kwargs['close_fds'] = True ''' To prevent this garbage in stderr (Fedora/CentOS): File descriptor 6 (/tmp/ffik4yjng (deleted)) leaked on lv* invocation. Parent PID 29542: /usr/bin/python '''
from __future__ import with_statement import logging import re import os from scalarizr import linux from scalarizr.linux import coreutils from scalarizr.storage2 import StorageError if not linux.which('mdadm'): from scalarizr.linux import pkgmgr pkgmgr.installed('mdadm', updatedb=True) mdadm_binary = linux.which('mdadm') if not os.path.exists('/proc/mdstat'): coreutils.modprobe('md_mod') LOG = logging.getLogger(__name__) def mdadm(mode, md_device=None, *devices, **long_kwds): """ Example: mdadm.mdadm('create', '/dev/md0', '/dev/loop0', '/dev/loop1', level=0, metadata='default', assume_clean=True, raid_devices=2) """ raise_exc = long_kwds.pop('raise_exc', True) return linux.system(linux.build_cmd_args( mdadm_binary, ['--%s' % mode] + ([md_device] if md_device else []),